aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/devices/Kconfig7
-rw-r--r--drivers/mtd/devices/Makefile1
-rw-r--r--drivers/mtd/devices/m25p80.c16
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c30
-rw-r--r--drivers/mtd/devices/pmc551.c2
-rw-r--r--drivers/mtd/devices/ps3vram.c768
-rw-r--r--drivers/mtd/maps/dc21285.c7
-rw-r--r--drivers/mtd/maps/integrator-flash.c2
-rw-r--r--drivers/mtd/maps/ixp2000.c6
-rw-r--r--drivers/mtd/maps/ixp4xx.c4
-rw-r--r--drivers/mtd/maps/omap_nor.c2
-rw-r--r--drivers/mtd/maps/physmap.c6
-rw-r--r--drivers/mtd/maps/physmap_of.c4
-rw-r--r--drivers/mtd/mtdconcat.c2
-rw-r--r--drivers/mtd/nand/Kconfig2
-rw-r--r--drivers/mtd/nand/fsl_upm.c2
-rw-r--r--drivers/mtd/nand/plat_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c4
-rw-r--r--drivers/mtd/nand/s3c2410.c8
-rw-r--r--drivers/mtd/nand/tmio_nand.c2
-rw-r--r--drivers/mtd/onenand/generic.c2
-rw-r--r--drivers/mtd/onenand/omap2.c12
-rw-r--r--drivers/mtd/ubi/build.c7
-rw-r--r--drivers/mtd/ubi/cdev.c3
-rw-r--r--drivers/mtd/ubi/debug.h10
-rw-r--r--drivers/mtd/ubi/eba.c53
-rw-r--r--drivers/mtd/ubi/io.c30
-rw-r--r--drivers/mtd/ubi/kapi.c2
-rw-r--r--drivers/mtd/ubi/scan.c2
-rw-r--r--drivers/mtd/ubi/ubi-media.h4
-rw-r--r--drivers/mtd/ubi/ubi.h45
-rw-r--r--drivers/mtd/ubi/vmt.c4
-rw-r--r--drivers/mtd/ubi/vtbl.c2
-rw-r--r--drivers/mtd/ubi/wl.c493
34 files changed, 1146 insertions, 400 deletions
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 6fde0a2e3567..bc33200535fc 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -120,6 +120,13 @@ config MTD_PHRAM
120 doesn't have access to, memory beyond the mem=xxx limit, nvram, 120 doesn't have access to, memory beyond the mem=xxx limit, nvram,
121 memory on the video card, etc... 121 memory on the video card, etc...
122 122
123config MTD_PS3VRAM
124 tristate "PS3 video RAM"
125 depends on FB_PS3
126 help
127 This driver allows you to use excess PS3 video RAM as volatile
128 storage or system swap.
129
123config MTD_LART 130config MTD_LART
124 tristate "28F160xx flash driver for LART" 131 tristate "28F160xx flash driver for LART"
125 depends on SA1100_LART 132 depends on SA1100_LART
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 0993d5cf3923..e51521df4e40 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -16,3 +16,4 @@ obj-$(CONFIG_MTD_LART) += lart.o
16obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o 16obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
17obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o 17obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
18obj-$(CONFIG_MTD_M25P80) += m25p80.o 18obj-$(CONFIG_MTD_M25P80) += m25p80.o
19obj-$(CONFIG_MTD_PS3VRAM) += ps3vram.o
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 6659b2275c0c..5733f0643843 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -170,7 +170,7 @@ static int wait_till_ready(struct m25p *flash)
170static int erase_chip(struct m25p *flash) 170static int erase_chip(struct m25p *flash)
171{ 171{
172 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n", 172 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n",
173 flash->spi->dev.bus_id, __func__, 173 dev_name(&flash->spi->dev), __func__,
174 flash->mtd.size / 1024); 174 flash->mtd.size / 1024);
175 175
176 /* Wait until finished previous write command. */ 176 /* Wait until finished previous write command. */
@@ -197,7 +197,7 @@ static int erase_chip(struct m25p *flash)
197static int erase_sector(struct m25p *flash, u32 offset) 197static int erase_sector(struct m25p *flash, u32 offset)
198{ 198{
199 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB at 0x%08x\n", 199 DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB at 0x%08x\n",
200 flash->spi->dev.bus_id, __func__, 200 dev_name(&flash->spi->dev), __func__,
201 flash->mtd.erasesize / 1024, offset); 201 flash->mtd.erasesize / 1024, offset);
202 202
203 /* Wait until finished previous write command. */ 203 /* Wait until finished previous write command. */
@@ -234,7 +234,7 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
234 u32 addr,len; 234 u32 addr,len;
235 235
236 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %d\n", 236 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %d\n",
237 flash->spi->dev.bus_id, __func__, "at", 237 dev_name(&flash->spi->dev), __func__, "at",
238 (u32)instr->addr, instr->len); 238 (u32)instr->addr, instr->len);
239 239
240 /* sanity checks */ 240 /* sanity checks */
@@ -295,7 +295,7 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
295 struct spi_message m; 295 struct spi_message m;
296 296
297 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", 297 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
298 flash->spi->dev.bus_id, __func__, "from", 298 dev_name(&flash->spi->dev), __func__, "from",
299 (u32)from, len); 299 (u32)from, len);
300 300
301 /* sanity checks */ 301 /* sanity checks */
@@ -367,7 +367,7 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
367 struct spi_message m; 367 struct spi_message m;
368 368
369 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n", 369 DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
370 flash->spi->dev.bus_id, __func__, "to", 370 dev_name(&flash->spi->dev), __func__, "to",
371 (u32)to, len); 371 (u32)to, len);
372 372
373 if (retlen) 373 if (retlen)
@@ -563,7 +563,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
563 tmp = spi_write_then_read(spi, &code, 1, id, 5); 563 tmp = spi_write_then_read(spi, &code, 1, id, 5);
564 if (tmp < 0) { 564 if (tmp < 0) {
565 DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n", 565 DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
566 spi->dev.bus_id, tmp); 566 dev_name(&spi->dev), tmp);
567 return NULL; 567 return NULL;
568 } 568 }
569 jedec = id[0]; 569 jedec = id[0];
@@ -617,7 +617,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
617 /* unrecognized chip? */ 617 /* unrecognized chip? */
618 if (i == ARRAY_SIZE(m25p_data)) { 618 if (i == ARRAY_SIZE(m25p_data)) {
619 DEBUG(MTD_DEBUG_LEVEL0, "%s: unrecognized id %s\n", 619 DEBUG(MTD_DEBUG_LEVEL0, "%s: unrecognized id %s\n",
620 spi->dev.bus_id, data->type); 620 dev_name(&spi->dev), data->type);
621 info = NULL; 621 info = NULL;
622 622
623 /* recognized; is that chip really what's there? */ 623 /* recognized; is that chip really what's there? */
@@ -658,7 +658,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
658 if (data && data->name) 658 if (data && data->name)
659 flash->mtd.name = data->name; 659 flash->mtd.name = data->name;
660 else 660 else
661 flash->mtd.name = spi->dev.bus_id; 661 flash->mtd.name = dev_name(&spi->dev);
662 662
663 flash->mtd.type = MTD_NORFLASH; 663 flash->mtd.type = MTD_NORFLASH;
664 flash->mtd.writesize = 1; 664 flash->mtd.writesize = 1;
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 6dd9aff8bb2d..65126cd668ff 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -128,7 +128,7 @@ static int dataflash_waitready(struct spi_device *spi)
128 status = dataflash_status(spi); 128 status = dataflash_status(spi);
129 if (status < 0) { 129 if (status < 0) {
130 DEBUG(MTD_DEBUG_LEVEL1, "%s: status %d?\n", 130 DEBUG(MTD_DEBUG_LEVEL1, "%s: status %d?\n",
131 spi->dev.bus_id, status); 131 dev_name(&spi->dev), status);
132 status = 0; 132 status = 0;
133 } 133 }
134 134
@@ -154,7 +154,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
154 uint8_t *command; 154 uint8_t *command;
155 155
156 DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%x len 0x%x\n", 156 DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%x len 0x%x\n",
157 spi->dev.bus_id, 157 dev_name(&spi->dev),
158 instr->addr, instr->len); 158 instr->addr, instr->len);
159 159
160 /* Sanity checks */ 160 /* Sanity checks */
@@ -197,7 +197,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
197 197
198 if (status < 0) { 198 if (status < 0) {
199 printk(KERN_ERR "%s: erase %x, err %d\n", 199 printk(KERN_ERR "%s: erase %x, err %d\n",
200 spi->dev.bus_id, pageaddr, status); 200 dev_name(&spi->dev), pageaddr, status);
201 /* REVISIT: can retry instr->retries times; or 201 /* REVISIT: can retry instr->retries times; or
202 * giveup and instr->fail_addr = instr->addr; 202 * giveup and instr->fail_addr = instr->addr;
203 */ 203 */
@@ -239,7 +239,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
239 int status; 239 int status;
240 240
241 DEBUG(MTD_DEBUG_LEVEL2, "%s: read 0x%x..0x%x\n", 241 DEBUG(MTD_DEBUG_LEVEL2, "%s: read 0x%x..0x%x\n",
242 priv->spi->dev.bus_id, (unsigned)from, (unsigned)(from + len)); 242 dev_name(&priv->spi->dev), (unsigned)from, (unsigned)(from + len));
243 243
244 *retlen = 0; 244 *retlen = 0;
245 245
@@ -288,7 +288,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
288 status = 0; 288 status = 0;
289 } else 289 } else
290 DEBUG(MTD_DEBUG_LEVEL1, "%s: read %x..%x --> %d\n", 290 DEBUG(MTD_DEBUG_LEVEL1, "%s: read %x..%x --> %d\n",
291 priv->spi->dev.bus_id, 291 dev_name(&priv->spi->dev),
292 (unsigned)from, (unsigned)(from + len), 292 (unsigned)from, (unsigned)(from + len),
293 status); 293 status);
294 return status; 294 return status;
@@ -315,7 +315,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
315 uint8_t *command; 315 uint8_t *command;
316 316
317 DEBUG(MTD_DEBUG_LEVEL2, "%s: write 0x%x..0x%x\n", 317 DEBUG(MTD_DEBUG_LEVEL2, "%s: write 0x%x..0x%x\n",
318 spi->dev.bus_id, (unsigned)to, (unsigned)(to + len)); 318 dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len));
319 319
320 *retlen = 0; 320 *retlen = 0;
321 321
@@ -374,7 +374,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
374 status = spi_sync(spi, &msg); 374 status = spi_sync(spi, &msg);
375 if (status < 0) 375 if (status < 0)
376 DEBUG(MTD_DEBUG_LEVEL1, "%s: xfer %u -> %d \n", 376 DEBUG(MTD_DEBUG_LEVEL1, "%s: xfer %u -> %d \n",
377 spi->dev.bus_id, addr, status); 377 dev_name(&spi->dev), addr, status);
378 378
379 (void) dataflash_waitready(priv->spi); 379 (void) dataflash_waitready(priv->spi);
380 } 380 }
@@ -396,7 +396,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
396 spi_transfer_del(x + 1); 396 spi_transfer_del(x + 1);
397 if (status < 0) 397 if (status < 0)
398 DEBUG(MTD_DEBUG_LEVEL1, "%s: pgm %u/%u -> %d \n", 398 DEBUG(MTD_DEBUG_LEVEL1, "%s: pgm %u/%u -> %d \n",
399 spi->dev.bus_id, addr, writelen, status); 399 dev_name(&spi->dev), addr, writelen, status);
400 400
401 (void) dataflash_waitready(priv->spi); 401 (void) dataflash_waitready(priv->spi);
402 402
@@ -416,14 +416,14 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
416 status = spi_sync(spi, &msg); 416 status = spi_sync(spi, &msg);
417 if (status < 0) 417 if (status < 0)
418 DEBUG(MTD_DEBUG_LEVEL1, "%s: compare %u -> %d \n", 418 DEBUG(MTD_DEBUG_LEVEL1, "%s: compare %u -> %d \n",
419 spi->dev.bus_id, addr, status); 419 dev_name(&spi->dev), addr, status);
420 420
421 status = dataflash_waitready(priv->spi); 421 status = dataflash_waitready(priv->spi);
422 422
423 /* Check result of the compare operation */ 423 /* Check result of the compare operation */
424 if (status & (1 << 6)) { 424 if (status & (1 << 6)) {
425 printk(KERN_ERR "%s: compare page %u, err %d\n", 425 printk(KERN_ERR "%s: compare page %u, err %d\n",
426 spi->dev.bus_id, pageaddr, status); 426 dev_name(&spi->dev), pageaddr, status);
427 remaining = 0; 427 remaining = 0;
428 status = -EIO; 428 status = -EIO;
429 break; 429 break;
@@ -779,7 +779,7 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
779 tmp = spi_write_then_read(spi, &code, 1, id, 3); 779 tmp = spi_write_then_read(spi, &code, 1, id, 3);
780 if (tmp < 0) { 780 if (tmp < 0) {
781 DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n", 781 DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
782 spi->dev.bus_id, tmp); 782 dev_name(&spi->dev), tmp);
783 return ERR_PTR(tmp); 783 return ERR_PTR(tmp);
784 } 784 }
785 if (id[0] != 0x1f) 785 if (id[0] != 0x1f)
@@ -869,7 +869,7 @@ static int __devinit dataflash_probe(struct spi_device *spi)
869 status = dataflash_status(spi); 869 status = dataflash_status(spi);
870 if (status <= 0 || status == 0xff) { 870 if (status <= 0 || status == 0xff) {
871 DEBUG(MTD_DEBUG_LEVEL1, "%s: status error %d\n", 871 DEBUG(MTD_DEBUG_LEVEL1, "%s: status error %d\n",
872 spi->dev.bus_id, status); 872 dev_name(&spi->dev), status);
873 if (status == 0 || status == 0xff) 873 if (status == 0 || status == 0xff)
874 status = -ENODEV; 874 status = -ENODEV;
875 return status; 875 return status;
@@ -905,13 +905,13 @@ static int __devinit dataflash_probe(struct spi_device *spi)
905 /* obsolete AT45DB1282 not (yet?) supported */ 905 /* obsolete AT45DB1282 not (yet?) supported */
906 default: 906 default:
907 DEBUG(MTD_DEBUG_LEVEL1, "%s: unsupported device (%x)\n", 907 DEBUG(MTD_DEBUG_LEVEL1, "%s: unsupported device (%x)\n",
908 spi->dev.bus_id, status & 0x3c); 908 dev_name(&spi->dev), status & 0x3c);
909 status = -ENODEV; 909 status = -ENODEV;
910 } 910 }
911 911
912 if (status < 0) 912 if (status < 0)
913 DEBUG(MTD_DEBUG_LEVEL1, "%s: add_dataflash --> %d\n", 913 DEBUG(MTD_DEBUG_LEVEL1, "%s: add_dataflash --> %d\n",
914 spi->dev.bus_id, status); 914 dev_name(&spi->dev), status);
915 915
916 return status; 916 return status;
917} 917}
@@ -921,7 +921,7 @@ static int __devexit dataflash_remove(struct spi_device *spi)
921 struct dataflash *flash = dev_get_drvdata(&spi->dev); 921 struct dataflash *flash = dev_get_drvdata(&spi->dev);
922 int status; 922 int status;
923 923
924 DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", spi->dev.bus_id); 924 DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", dev_name(&spi->dev));
925 925
926 if (mtd_has_partitions() && flash->partitioned) 926 if (mtd_has_partitions() && flash->partitioned)
927 status = del_mtd_partitions(&flash->mtd); 927 status = del_mtd_partitions(&flash->mtd);
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index d38bca64bb15..d2fd550f7e09 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -34,7 +34,7 @@
34 * aperture size, not the dram size, and the V370PDC supplies no 34 * aperture size, not the dram size, and the V370PDC supplies no
35 * other method for memory size discovery. This problem is 35 * other method for memory size discovery. This problem is
36 * mostly only relevant when compiled as a module, as the 36 * mostly only relevant when compiled as a module, as the
37 * unloading of the module with an aperture size smaller then 37 * unloading of the module with an aperture size smaller than
38 * the ram will cause the driver to detect the onboard memory 38 * the ram will cause the driver to detect the onboard memory
39 * size to be equal to the aperture size when the module is 39 * size to be equal to the aperture size when the module is
40 * reloaded. Soooo, to help, the module supports an msize 40 * reloaded. Soooo, to help, the module supports an msize
diff --git a/drivers/mtd/devices/ps3vram.c b/drivers/mtd/devices/ps3vram.c
new file mode 100644
index 000000000000..d21e9beb7ed2
--- /dev/null
+++ b/drivers/mtd/devices/ps3vram.c
@@ -0,0 +1,768 @@
1/**
2 * ps3vram - Use extra PS3 video ram as MTD block device.
3 *
4 * Copyright (c) 2007-2008 Jim Paris <jim@jtan.com>
5 * Added support RSX DMA Vivien Chappelier <vivien.chappelier@free.fr>
6 */
7
8#include <linux/io.h>
9#include <linux/mm.h>
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <linux/list.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/slab.h>
16#include <linux/version.h>
17#include <linux/gfp.h>
18#include <linux/delay.h>
19#include <linux/mtd/mtd.h>
20
21#include <asm/lv1call.h>
22#include <asm/ps3.h>
23
24#define DEVICE_NAME "ps3vram"
25
26#define XDR_BUF_SIZE (2 * 1024 * 1024) /* XDR buffer (must be 1MiB aligned) */
27#define XDR_IOIF 0x0c000000
28
29#define FIFO_BASE XDR_IOIF
30#define FIFO_SIZE (64 * 1024)
31
32#define DMA_PAGE_SIZE (4 * 1024)
33
34#define CACHE_PAGE_SIZE (256 * 1024)
35#define CACHE_PAGE_COUNT ((XDR_BUF_SIZE - FIFO_SIZE) / CACHE_PAGE_SIZE)
36
37#define CACHE_OFFSET CACHE_PAGE_SIZE
38#define FIFO_OFFSET 0
39
40#define CTRL_PUT 0x10
41#define CTRL_GET 0x11
42#define CTRL_TOP 0x15
43
44#define UPLOAD_SUBCH 1
45#define DOWNLOAD_SUBCH 2
46
47#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c
48#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104
49
50#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601
51
52struct mtd_info ps3vram_mtd;
53
54#define CACHE_PAGE_PRESENT 1
55#define CACHE_PAGE_DIRTY 2
56
57struct ps3vram_tag {
58 unsigned int address;
59 unsigned int flags;
60};
61
62struct ps3vram_cache {
63 unsigned int page_count;
64 unsigned int page_size;
65 struct ps3vram_tag *tags;
66};
67
68struct ps3vram_priv {
69 u64 memory_handle;
70 u64 context_handle;
71 u32 *ctrl;
72 u32 *reports;
73 u8 __iomem *ddr_base;
74 u8 *xdr_buf;
75
76 u32 *fifo_base;
77 u32 *fifo_ptr;
78
79 struct device *dev;
80 struct ps3vram_cache cache;
81
82 /* Used to serialize cache/DMA operations */
83 struct mutex lock;
84};
85
86#define DMA_NOTIFIER_HANDLE_BASE 0x66604200 /* first DMA notifier handle */
87#define DMA_NOTIFIER_OFFSET_BASE 0x1000 /* first DMA notifier offset */
88#define DMA_NOTIFIER_SIZE 0x40
89#define NOTIFIER 7 /* notifier used for completion report */
90
91/* A trailing '-' means to subtract off ps3fb_videomemory.size */
92char *size = "256M-";
93module_param(size, charp, 0);
94MODULE_PARM_DESC(size, "memory size");
95
96static u32 *ps3vram_get_notifier(u32 *reports, int notifier)
97{
98 return (void *) reports +
99 DMA_NOTIFIER_OFFSET_BASE +
100 DMA_NOTIFIER_SIZE * notifier;
101}
102
103static void ps3vram_notifier_reset(struct mtd_info *mtd)
104{
105 int i;
106
107 struct ps3vram_priv *priv = mtd->priv;
108 u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER);
109 for (i = 0; i < 4; i++)
110 notify[i] = 0xffffffff;
111}
112
113static int ps3vram_notifier_wait(struct mtd_info *mtd, unsigned int timeout_ms)
114{
115 struct ps3vram_priv *priv = mtd->priv;
116 u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER);
117 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
118
119 do {
120 if (!notify[3])
121 return 0;
122 msleep(1);
123 } while (time_before(jiffies, timeout));
124
125 return -ETIMEDOUT;
126}
127
128static void ps3vram_init_ring(struct mtd_info *mtd)
129{
130 struct ps3vram_priv *priv = mtd->priv;
131
132 priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET;
133 priv->ctrl[CTRL_GET] = FIFO_BASE + FIFO_OFFSET;
134}
135
136static int ps3vram_wait_ring(struct mtd_info *mtd, unsigned int timeout_ms)
137{
138 struct ps3vram_priv *priv = mtd->priv;
139 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
140
141 do {
142 if (priv->ctrl[CTRL_PUT] == priv->ctrl[CTRL_GET])
143 return 0;
144 msleep(1);
145 } while (time_before(jiffies, timeout));
146
147 dev_dbg(priv->dev, "%s:%d: FIFO timeout (%08x/%08x/%08x)\n", __func__,
148 __LINE__, priv->ctrl[CTRL_PUT], priv->ctrl[CTRL_GET],
149 priv->ctrl[CTRL_TOP]);
150
151 return -ETIMEDOUT;
152}
153
154static void ps3vram_out_ring(struct ps3vram_priv *priv, u32 data)
155{
156 *(priv->fifo_ptr)++ = data;
157}
158
159static void ps3vram_begin_ring(struct ps3vram_priv *priv, u32 chan,
160 u32 tag, u32 size)
161{
162 ps3vram_out_ring(priv, (size << 18) | (chan << 13) | tag);
163}
164
165static void ps3vram_rewind_ring(struct mtd_info *mtd)
166{
167 struct ps3vram_priv *priv = mtd->priv;
168 u64 status;
169
170 ps3vram_out_ring(priv, 0x20000000 | (FIFO_BASE + FIFO_OFFSET));
171
172 priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET;
173
174 /* asking the HV for a blit will kick the fifo */
175 status = lv1_gpu_context_attribute(priv->context_handle,
176 L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT,
177 0, 0, 0, 0);
178 if (status)
179 dev_err(priv->dev, "%s:%d: lv1_gpu_context_attribute failed\n",
180 __func__, __LINE__);
181
182 priv->fifo_ptr = priv->fifo_base;
183}
184
185static void ps3vram_fire_ring(struct mtd_info *mtd)
186{
187 struct ps3vram_priv *priv = mtd->priv;
188 u64 status;
189
190 mutex_lock(&ps3_gpu_mutex);
191
192 priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET +
193 (priv->fifo_ptr - priv->fifo_base) * sizeof(u32);
194
195 /* asking the HV for a blit will kick the fifo */
196 status = lv1_gpu_context_attribute(priv->context_handle,
197 L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT,
198 0, 0, 0, 0);
199 if (status)
200 dev_err(priv->dev, "%s:%d: lv1_gpu_context_attribute failed\n",
201 __func__, __LINE__);
202
203 if ((priv->fifo_ptr - priv->fifo_base) * sizeof(u32) >
204 FIFO_SIZE - 1024) {
205 dev_dbg(priv->dev, "%s:%d: fifo full, rewinding\n", __func__,
206 __LINE__);
207 ps3vram_wait_ring(mtd, 200);
208 ps3vram_rewind_ring(mtd);
209 }
210
211 mutex_unlock(&ps3_gpu_mutex);
212}
213
214static void ps3vram_bind(struct mtd_info *mtd)
215{
216 struct ps3vram_priv *priv = mtd->priv;
217
218 ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0, 1);
219 ps3vram_out_ring(priv, 0x31337303);
220 ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x180, 3);
221 ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER);
222 ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */
223 ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */
224
225 ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0, 1);
226 ps3vram_out_ring(priv, 0x3137c0de);
227 ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x180, 3);
228 ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER);
229 ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */
230 ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */
231
232 ps3vram_fire_ring(mtd);
233}
234
235static int ps3vram_upload(struct mtd_info *mtd, unsigned int src_offset,
236 unsigned int dst_offset, int len, int count)
237{
238 struct ps3vram_priv *priv = mtd->priv;
239
240 ps3vram_begin_ring(priv, UPLOAD_SUBCH,
241 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
242 ps3vram_out_ring(priv, XDR_IOIF + src_offset);
243 ps3vram_out_ring(priv, dst_offset);
244 ps3vram_out_ring(priv, len);
245 ps3vram_out_ring(priv, len);
246 ps3vram_out_ring(priv, len);
247 ps3vram_out_ring(priv, count);
248 ps3vram_out_ring(priv, (1 << 8) | 1);
249 ps3vram_out_ring(priv, 0);
250
251 ps3vram_notifier_reset(mtd);
252 ps3vram_begin_ring(priv, UPLOAD_SUBCH,
253 NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1);
254 ps3vram_out_ring(priv, 0);
255 ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x100, 1);
256 ps3vram_out_ring(priv, 0);
257 ps3vram_fire_ring(mtd);
258 if (ps3vram_notifier_wait(mtd, 200) < 0) {
259 dev_dbg(priv->dev, "%s:%d: notifier timeout\n", __func__,
260 __LINE__);
261 return -1;
262 }
263
264 return 0;
265}
266
267static int ps3vram_download(struct mtd_info *mtd, unsigned int src_offset,
268 unsigned int dst_offset, int len, int count)
269{
270 struct ps3vram_priv *priv = mtd->priv;
271
272 ps3vram_begin_ring(priv, DOWNLOAD_SUBCH,
273 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
274 ps3vram_out_ring(priv, src_offset);
275 ps3vram_out_ring(priv, XDR_IOIF + dst_offset);
276 ps3vram_out_ring(priv, len);
277 ps3vram_out_ring(priv, len);
278 ps3vram_out_ring(priv, len);
279 ps3vram_out_ring(priv, count);
280 ps3vram_out_ring(priv, (1 << 8) | 1);
281 ps3vram_out_ring(priv, 0);
282
283 ps3vram_notifier_reset(mtd);
284 ps3vram_begin_ring(priv, DOWNLOAD_SUBCH,
285 NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1);
286 ps3vram_out_ring(priv, 0);
287 ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x100, 1);
288 ps3vram_out_ring(priv, 0);
289 ps3vram_fire_ring(mtd);
290 if (ps3vram_notifier_wait(mtd, 200) < 0) {
291 dev_dbg(priv->dev, "%s:%d: notifier timeout\n", __func__,
292 __LINE__);
293 return -1;
294 }
295
296 return 0;
297}
298
299static void ps3vram_cache_evict(struct mtd_info *mtd, int entry)
300{
301 struct ps3vram_priv *priv = mtd->priv;
302 struct ps3vram_cache *cache = &priv->cache;
303
304 if (cache->tags[entry].flags & CACHE_PAGE_DIRTY) {
305 dev_dbg(priv->dev, "%s:%d: flushing %d : 0x%08x\n", __func__,
306 __LINE__, entry, cache->tags[entry].address);
307 if (ps3vram_upload(mtd,
308 CACHE_OFFSET + entry * cache->page_size,
309 cache->tags[entry].address,
310 DMA_PAGE_SIZE,
311 cache->page_size / DMA_PAGE_SIZE) < 0) {
312 dev_dbg(priv->dev, "%s:%d: failed to upload from "
313 "0x%x to 0x%x size 0x%x\n", __func__, __LINE__,
314 entry * cache->page_size,
315 cache->tags[entry].address, cache->page_size);
316 }
317 cache->tags[entry].flags &= ~CACHE_PAGE_DIRTY;
318 }
319}
320
321static void ps3vram_cache_load(struct mtd_info *mtd, int entry,
322 unsigned int address)
323{
324 struct ps3vram_priv *priv = mtd->priv;
325 struct ps3vram_cache *cache = &priv->cache;
326
327 dev_dbg(priv->dev, "%s:%d: fetching %d : 0x%08x\n", __func__, __LINE__,
328 entry, address);
329 if (ps3vram_download(mtd,
330 address,
331 CACHE_OFFSET + entry * cache->page_size,
332 DMA_PAGE_SIZE,
333 cache->page_size / DMA_PAGE_SIZE) < 0) {
334 dev_err(priv->dev, "%s:%d: failed to download from "
335 "0x%x to 0x%x size 0x%x\n", __func__, __LINE__, address,
336 entry * cache->page_size, cache->page_size);
337 }
338
339 cache->tags[entry].address = address;
340 cache->tags[entry].flags |= CACHE_PAGE_PRESENT;
341}
342
343
344static void ps3vram_cache_flush(struct mtd_info *mtd)
345{
346 struct ps3vram_priv *priv = mtd->priv;
347 struct ps3vram_cache *cache = &priv->cache;
348 int i;
349
350 dev_dbg(priv->dev, "%s:%d: FLUSH\n", __func__, __LINE__);
351 for (i = 0; i < cache->page_count; i++) {
352 ps3vram_cache_evict(mtd, i);
353 cache->tags[i].flags = 0;
354 }
355}
356
357static unsigned int ps3vram_cache_match(struct mtd_info *mtd, loff_t address)
358{
359 struct ps3vram_priv *priv = mtd->priv;
360 struct ps3vram_cache *cache = &priv->cache;
361 unsigned int base;
362 unsigned int offset;
363 int i;
364 static int counter;
365
366 offset = (unsigned int) (address & (cache->page_size - 1));
367 base = (unsigned int) (address - offset);
368
369 /* fully associative check */
370 for (i = 0; i < cache->page_count; i++) {
371 if ((cache->tags[i].flags & CACHE_PAGE_PRESENT) &&
372 cache->tags[i].address == base) {
373 dev_dbg(priv->dev, "%s:%d: found entry %d : 0x%08x\n",
374 __func__, __LINE__, i, cache->tags[i].address);
375 return i;
376 }
377 }
378
379 /* choose a random entry */
380 i = (jiffies + (counter++)) % cache->page_count;
381 dev_dbg(priv->dev, "%s:%d: using entry %d\n", __func__, __LINE__, i);
382
383 ps3vram_cache_evict(mtd, i);
384 ps3vram_cache_load(mtd, i, base);
385
386 return i;
387}
388
389static int ps3vram_cache_init(struct mtd_info *mtd)
390{
391 struct ps3vram_priv *priv = mtd->priv;
392
393 priv->cache.page_count = CACHE_PAGE_COUNT;
394 priv->cache.page_size = CACHE_PAGE_SIZE;
395 priv->cache.tags = kzalloc(sizeof(struct ps3vram_tag) *
396 CACHE_PAGE_COUNT, GFP_KERNEL);
397 if (priv->cache.tags == NULL) {
398 dev_err(priv->dev, "%s:%d: could not allocate cache tags\n",
399 __func__, __LINE__);
400 return -ENOMEM;
401 }
402
403 dev_info(priv->dev, "created ram cache: %d entries, %d KiB each\n",
404 CACHE_PAGE_COUNT, CACHE_PAGE_SIZE / 1024);
405
406 return 0;
407}
408
409static void ps3vram_cache_cleanup(struct mtd_info *mtd)
410{
411 struct ps3vram_priv *priv = mtd->priv;
412
413 ps3vram_cache_flush(mtd);
414 kfree(priv->cache.tags);
415}
416
417static int ps3vram_erase(struct mtd_info *mtd, struct erase_info *instr)
418{
419 struct ps3vram_priv *priv = mtd->priv;
420
421 if (instr->addr + instr->len > mtd->size)
422 return -EINVAL;
423
424 mutex_lock(&priv->lock);
425
426 ps3vram_cache_flush(mtd);
427
428 /* Set bytes to 0xFF */
429 memset_io(priv->ddr_base + instr->addr, 0xFF, instr->len);
430
431 mutex_unlock(&priv->lock);
432
433 instr->state = MTD_ERASE_DONE;
434 mtd_erase_callback(instr);
435
436 return 0;
437}
438
439static int ps3vram_read(struct mtd_info *mtd, loff_t from, size_t len,
440 size_t *retlen, u_char *buf)
441{
442 struct ps3vram_priv *priv = mtd->priv;
443 unsigned int cached, count;
444
445 dev_dbg(priv->dev, "%s:%d: from=0x%08x len=0x%zx\n", __func__, __LINE__,
446 (unsigned int)from, len);
447
448 if (from >= mtd->size)
449 return -EINVAL;
450
451 if (len > mtd->size - from)
452 len = mtd->size - from;
453
454 /* Copy from vram to buf */
455 count = len;
456 while (count) {
457 unsigned int offset, avail;
458 unsigned int entry;
459
460 offset = (unsigned int) (from & (priv->cache.page_size - 1));
461 avail = priv->cache.page_size - offset;
462
463 mutex_lock(&priv->lock);
464
465 entry = ps3vram_cache_match(mtd, from);
466 cached = CACHE_OFFSET + entry * priv->cache.page_size + offset;
467
468 dev_dbg(priv->dev, "%s:%d: from=%08x cached=%08x offset=%08x "
469 "avail=%08x count=%08x\n", __func__, __LINE__,
470 (unsigned int)from, cached, offset, avail, count);
471
472 if (avail > count)
473 avail = count;
474 memcpy(buf, priv->xdr_buf + cached, avail);
475
476 mutex_unlock(&priv->lock);
477
478 buf += avail;
479 count -= avail;
480 from += avail;
481 }
482
483 *retlen = len;
484 return 0;
485}
486
487static int ps3vram_write(struct mtd_info *mtd, loff_t to, size_t len,
488 size_t *retlen, const u_char *buf)
489{
490 struct ps3vram_priv *priv = mtd->priv;
491 unsigned int cached, count;
492
493 if (to >= mtd->size)
494 return -EINVAL;
495
496 if (len > mtd->size - to)
497 len = mtd->size - to;
498
499 /* Copy from buf to vram */
500 count = len;
501 while (count) {
502 unsigned int offset, avail;
503 unsigned int entry;
504
505 offset = (unsigned int) (to & (priv->cache.page_size - 1));
506 avail = priv->cache.page_size - offset;
507
508 mutex_lock(&priv->lock);
509
510 entry = ps3vram_cache_match(mtd, to);
511 cached = CACHE_OFFSET + entry * priv->cache.page_size + offset;
512
513 dev_dbg(priv->dev, "%s:%d: to=%08x cached=%08x offset=%08x "
514 "avail=%08x count=%08x\n", __func__, __LINE__,
515 (unsigned int)to, cached, offset, avail, count);
516
517 if (avail > count)
518 avail = count;
519 memcpy(priv->xdr_buf + cached, buf, avail);
520
521 priv->cache.tags[entry].flags |= CACHE_PAGE_DIRTY;
522
523 mutex_unlock(&priv->lock);
524
525 buf += avail;
526 count -= avail;
527 to += avail;
528 }
529
530 *retlen = len;
531 return 0;
532}
533
534static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev)
535{
536 struct ps3vram_priv *priv;
537 int status;
538 u64 ddr_lpar;
539 u64 ctrl_lpar;
540 u64 info_lpar;
541 u64 reports_lpar;
542 u64 ddr_size;
543 u64 reports_size;
544 int ret = -ENOMEM;
545 char *rest;
546
547 ret = -EIO;
548 ps3vram_mtd.priv = kzalloc(sizeof(struct ps3vram_priv), GFP_KERNEL);
549 if (!ps3vram_mtd.priv)
550 goto out;
551 priv = ps3vram_mtd.priv;
552
553 mutex_init(&priv->lock);
554 priv->dev = &dev->core;
555
556 /* Allocate XDR buffer (1MiB aligned) */
557 priv->xdr_buf = (void *)__get_free_pages(GFP_KERNEL,
558 get_order(XDR_BUF_SIZE));
559 if (priv->xdr_buf == NULL) {
560 dev_dbg(&dev->core, "%s:%d: could not allocate XDR buffer\n",
561 __func__, __LINE__);
562 ret = -ENOMEM;
563 goto out_free_priv;
564 }
565
566 /* Put FIFO at begginning of XDR buffer */
567 priv->fifo_base = (u32 *) (priv->xdr_buf + FIFO_OFFSET);
568 priv->fifo_ptr = priv->fifo_base;
569
570 /* XXX: Need to open GPU, in case ps3fb or snd_ps3 aren't loaded */
571 if (ps3_open_hv_device(dev)) {
572 dev_err(&dev->core, "%s:%d: ps3_open_hv_device failed\n",
573 __func__, __LINE__);
574 ret = -EAGAIN;
575 goto out_close_gpu;
576 }
577
578 /* Request memory */
579 status = -1;
580 ddr_size = memparse(size, &rest);
581 if (*rest == '-')
582 ddr_size -= ps3fb_videomemory.size;
583 ddr_size = ALIGN(ddr_size, 1024*1024);
584 if (ddr_size <= 0) {
585 dev_err(&dev->core, "%s:%d: specified size is too small\n",
586 __func__, __LINE__);
587 ret = -EINVAL;
588 goto out_close_gpu;
589 }
590
591 while (ddr_size > 0) {
592 status = lv1_gpu_memory_allocate(ddr_size, 0, 0, 0, 0,
593 &priv->memory_handle,
594 &ddr_lpar);
595 if (!status)
596 break;
597 ddr_size -= 1024*1024;
598 }
599 if (status || ddr_size <= 0) {
600 dev_err(&dev->core, "%s:%d: lv1_gpu_memory_allocate failed\n",
601 __func__, __LINE__);
602 ret = -ENOMEM;
603 goto out_free_xdr_buf;
604 }
605
606 /* Request context */
607 status = lv1_gpu_context_allocate(priv->memory_handle,
608 0,
609 &priv->context_handle,
610 &ctrl_lpar,
611 &info_lpar,
612 &reports_lpar,
613 &reports_size);
614 if (status) {
615 dev_err(&dev->core, "%s:%d: lv1_gpu_context_allocate failed\n",
616 __func__, __LINE__);
617 ret = -ENOMEM;
618 goto out_free_memory;
619 }
620
621 /* Map XDR buffer to RSX */
622 status = lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF,
623 ps3_mm_phys_to_lpar(__pa(priv->xdr_buf)),
624 XDR_BUF_SIZE, 0);
625 if (status) {
626 dev_err(&dev->core, "%s:%d: lv1_gpu_context_iomap failed\n",
627 __func__, __LINE__);
628 ret = -ENOMEM;
629 goto out_free_context;
630 }
631
632 priv->ddr_base = ioremap_flags(ddr_lpar, ddr_size, _PAGE_NO_CACHE);
633
634 if (!priv->ddr_base) {
635 dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__,
636 __LINE__);
637 ret = -ENOMEM;
638 goto out_free_context;
639 }
640
641 priv->ctrl = ioremap(ctrl_lpar, 64 * 1024);
642 if (!priv->ctrl) {
643 dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__,
644 __LINE__);
645 ret = -ENOMEM;
646 goto out_unmap_vram;
647 }
648
649 priv->reports = ioremap(reports_lpar, reports_size);
650 if (!priv->reports) {
651 dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__,
652 __LINE__);
653 ret = -ENOMEM;
654 goto out_unmap_ctrl;
655 }
656
657 mutex_lock(&ps3_gpu_mutex);
658 ps3vram_init_ring(&ps3vram_mtd);
659 mutex_unlock(&ps3_gpu_mutex);
660
661 ps3vram_mtd.name = "ps3vram";
662 ps3vram_mtd.size = ddr_size;
663 ps3vram_mtd.flags = MTD_CAP_RAM;
664 ps3vram_mtd.erase = ps3vram_erase;
665 ps3vram_mtd.point = NULL;
666 ps3vram_mtd.unpoint = NULL;
667 ps3vram_mtd.read = ps3vram_read;
668 ps3vram_mtd.write = ps3vram_write;
669 ps3vram_mtd.owner = THIS_MODULE;
670 ps3vram_mtd.type = MTD_RAM;
671 ps3vram_mtd.erasesize = CACHE_PAGE_SIZE;
672 ps3vram_mtd.writesize = 1;
673
674 ps3vram_bind(&ps3vram_mtd);
675
676 mutex_lock(&ps3_gpu_mutex);
677 ret = ps3vram_wait_ring(&ps3vram_mtd, 100);
678 mutex_unlock(&ps3_gpu_mutex);
679 if (ret < 0) {
680 dev_err(&dev->core, "%s:%d: failed to initialize channels\n",
681 __func__, __LINE__);
682 ret = -ETIMEDOUT;
683 goto out_unmap_reports;
684 }
685
686 ps3vram_cache_init(&ps3vram_mtd);
687
688 if (add_mtd_device(&ps3vram_mtd)) {
689 dev_err(&dev->core, "%s:%d: add_mtd_device failed\n",
690 __func__, __LINE__);
691 ret = -EAGAIN;
692 goto out_cache_cleanup;
693 }
694
695 dev_info(&dev->core, "reserved %u MiB of gpu memory\n",
696 (unsigned int)(ddr_size / 1024 / 1024));
697
698 return 0;
699
700out_cache_cleanup:
701 ps3vram_cache_cleanup(&ps3vram_mtd);
702out_unmap_reports:
703 iounmap(priv->reports);
704out_unmap_ctrl:
705 iounmap(priv->ctrl);
706out_unmap_vram:
707 iounmap(priv->ddr_base);
708out_free_context:
709 lv1_gpu_context_free(priv->context_handle);
710out_free_memory:
711 lv1_gpu_memory_free(priv->memory_handle);
712out_close_gpu:
713 ps3_close_hv_device(dev);
714out_free_xdr_buf:
715 free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE));
716out_free_priv:
717 kfree(ps3vram_mtd.priv);
718 ps3vram_mtd.priv = NULL;
719out:
720 return ret;
721}
722
723static int ps3vram_shutdown(struct ps3_system_bus_device *dev)
724{
725 struct ps3vram_priv *priv;
726
727 priv = ps3vram_mtd.priv;
728
729 del_mtd_device(&ps3vram_mtd);
730 ps3vram_cache_cleanup(&ps3vram_mtd);
731 iounmap(priv->reports);
732 iounmap(priv->ctrl);
733 iounmap(priv->ddr_base);
734 lv1_gpu_context_free(priv->context_handle);
735 lv1_gpu_memory_free(priv->memory_handle);
736 ps3_close_hv_device(dev);
737 free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE));
738 kfree(priv);
739 return 0;
740}
741
742static struct ps3_system_bus_driver ps3vram_driver = {
743 .match_id = PS3_MATCH_ID_GPU,
744 .match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK,
745 .core.name = DEVICE_NAME,
746 .core.owner = THIS_MODULE,
747 .probe = ps3vram_probe,
748 .remove = ps3vram_shutdown,
749 .shutdown = ps3vram_shutdown,
750};
751
752static int __init ps3vram_init(void)
753{
754 return ps3_system_bus_driver_register(&ps3vram_driver);
755}
756
757static void __exit ps3vram_exit(void)
758{
759 ps3_system_bus_driver_unregister(&ps3vram_driver);
760}
761
762module_init(ps3vram_init);
763module_exit(ps3vram_exit);
764
765MODULE_LICENSE("GPL");
766MODULE_AUTHOR("Jim Paris <jim@jtan.com>");
767MODULE_DESCRIPTION("MTD driver for PS3 video RAM");
768MODULE_ALIAS(PS3_MODULE_ALIAS_GPU_RAMDISK);
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index 3aa018c092f8..42969fe051b2 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -32,16 +32,15 @@ static struct mtd_info *dc21285_mtd;
32 */ 32 */
33static void nw_en_write(void) 33static void nw_en_write(void)
34{ 34{
35 extern spinlock_t gpio_lock;
36 unsigned long flags; 35 unsigned long flags;
37 36
38 /* 37 /*
39 * we want to write a bit pattern XXX1 to Xilinx to enable 38 * we want to write a bit pattern XXX1 to Xilinx to enable
40 * the write gate, which will be open for about the next 2ms. 39 * the write gate, which will be open for about the next 2ms.
41 */ 40 */
42 spin_lock_irqsave(&gpio_lock, flags); 41 spin_lock_irqsave(&nw_gpio_lock, flags);
43 cpld_modify(1, 1); 42 nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
44 spin_unlock_irqrestore(&gpio_lock, flags); 43 spin_unlock_irqrestore(&nw_gpio_lock, flags);
45 44
46 /* 45 /*
47 * let the ISA bus to catch on... 46 * let the ISA bus to catch on...
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c
index 7100ee3c7b01..d2ec262666c7 100644
--- a/drivers/mtd/maps/integrator-flash.c
+++ b/drivers/mtd/maps/integrator-flash.c
@@ -105,7 +105,7 @@ static int armflash_probe(struct platform_device *dev)
105 info->map.bankwidth = plat->width; 105 info->map.bankwidth = plat->width;
106 info->map.phys = res->start; 106 info->map.phys = res->start;
107 info->map.virt = base; 107 info->map.virt = base;
108 info->map.name = dev->dev.bus_id; 108 info->map.name = dev_name(&dev->dev);
109 info->map.set_vpp = armflash_set_vpp; 109 info->map.set_vpp = armflash_set_vpp;
110 110
111 simple_map_init(&info->map); 111 simple_map_init(&info->map);
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index dcdb1f17577d..d4fb9a3ab4df 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -170,7 +170,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
170 err = -ENOMEM; 170 err = -ENOMEM;
171 goto Error; 171 goto Error;
172 } 172 }
173 memzero(info, sizeof(struct ixp2000_flash_info)); 173 memset(info, 0, sizeof(struct ixp2000_flash_info));
174 174
175 platform_set_drvdata(dev, info); 175 platform_set_drvdata(dev, info);
176 176
@@ -188,7 +188,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
188 */ 188 */
189 info->map.map_priv_2 = (unsigned long) ixp_data->bank_setup; 189 info->map.map_priv_2 = (unsigned long) ixp_data->bank_setup;
190 190
191 info->map.name = dev->dev.bus_id; 191 info->map.name = dev_name(&dev->dev);
192 info->map.read = ixp2000_flash_read8; 192 info->map.read = ixp2000_flash_read8;
193 info->map.write = ixp2000_flash_write8; 193 info->map.write = ixp2000_flash_write8;
194 info->map.copy_from = ixp2000_flash_copy_from; 194 info->map.copy_from = ixp2000_flash_copy_from;
@@ -196,7 +196,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
196 196
197 info->res = request_mem_region(dev->resource->start, 197 info->res = request_mem_region(dev->resource->start,
198 dev->resource->end - dev->resource->start + 1, 198 dev->resource->end - dev->resource->start + 1,
199 dev->dev.bus_id); 199 dev_name(&dev->dev));
200 if (!info->res) { 200 if (!info->res) {
201 dev_err(&dev->dev, "Could not reserve memory region\n"); 201 dev_err(&dev->dev, "Could not reserve memory region\n");
202 err = -ENOMEM; 202 err = -ENOMEM;
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 9c7a5fbd4e51..7214b876feba 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -201,7 +201,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
201 err = -ENOMEM; 201 err = -ENOMEM;
202 goto Error; 202 goto Error;
203 } 203 }
204 memzero(info, sizeof(struct ixp4xx_flash_info)); 204 memset(info, 0, sizeof(struct ixp4xx_flash_info));
205 205
206 platform_set_drvdata(dev, info); 206 platform_set_drvdata(dev, info);
207 207
@@ -218,7 +218,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
218 * handle that. 218 * handle that.
219 */ 219 */
220 info->map.bankwidth = 2; 220 info->map.bankwidth = 2;
221 info->map.name = dev->dev.bus_id; 221 info->map.name = dev_name(&dev->dev);
222 info->map.read = ixp4xx_read16, 222 info->map.read = ixp4xx_read16,
223 info->map.write = ixp4xx_probe_write16, 223 info->map.write = ixp4xx_probe_write16,
224 info->map.copy_from = ixp4xx_copy_from, 224 info->map.copy_from = ixp4xx_copy_from,
diff --git a/drivers/mtd/maps/omap_nor.c b/drivers/mtd/maps/omap_nor.c
index 05f276af15da..7e50e9b1b781 100644
--- a/drivers/mtd/maps/omap_nor.c
+++ b/drivers/mtd/maps/omap_nor.c
@@ -101,7 +101,7 @@ static int __init omapflash_probe(struct platform_device *pdev)
101 err = -ENOMEM; 101 err = -ENOMEM;
102 goto out_release_mem_region; 102 goto out_release_mem_region;
103 } 103 }
104 info->map.name = pdev->dev.bus_id; 104 info->map.name = dev_name(&pdev->dev);
105 info->map.phys = res->start; 105 info->map.phys = res->start;
106 info->map.size = size; 106 info->map.size = size;
107 info->map.bankwidth = pdata->width; 107 info->map.bankwidth = pdata->width;
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index dfbf3f270cea..1db16e549e38 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -108,13 +108,13 @@ static int physmap_flash_probe(struct platform_device *dev)
108 if (!devm_request_mem_region(&dev->dev, 108 if (!devm_request_mem_region(&dev->dev,
109 dev->resource[i].start, 109 dev->resource[i].start,
110 dev->resource[i].end - dev->resource[i].start + 1, 110 dev->resource[i].end - dev->resource[i].start + 1,
111 dev->dev.bus_id)) { 111 dev_name(&dev->dev))) {
112 dev_err(&dev->dev, "Could not reserve memory region\n"); 112 dev_err(&dev->dev, "Could not reserve memory region\n");
113 err = -ENOMEM; 113 err = -ENOMEM;
114 goto err_out; 114 goto err_out;
115 } 115 }
116 116
117 info->map[i].name = dev->dev.bus_id; 117 info->map[i].name = dev_name(&dev->dev);
118 info->map[i].phys = dev->resource[i].start; 118 info->map[i].phys = dev->resource[i].start;
119 info->map[i].size = dev->resource[i].end - dev->resource[i].start + 1; 119 info->map[i].size = dev->resource[i].end - dev->resource[i].start + 1;
120 info->map[i].bankwidth = physmap_data->width; 120 info->map[i].bankwidth = physmap_data->width;
@@ -150,7 +150,7 @@ static int physmap_flash_probe(struct platform_device *dev)
150 * We detected multiple devices. Concatenate them together. 150 * We detected multiple devices. Concatenate them together.
151 */ 151 */
152#ifdef CONFIG_MTD_CONCAT 152#ifdef CONFIG_MTD_CONCAT
153 info->cmtd = mtd_concat_create(info->mtd, devices_found, dev->dev.bus_id); 153 info->cmtd = mtd_concat_create(info->mtd, devices_found, dev_name(&dev->dev));
154 if (info->cmtd == NULL) 154 if (info->cmtd == NULL)
155 err = -ENXIO; 155 err = -ENXIO;
156#else 156#else
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 5fcfec034a94..fbf0ca939d72 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -183,7 +183,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
183 183
184 err = -EBUSY; 184 err = -EBUSY;
185 info->res = request_mem_region(res.start, res.end - res.start + 1, 185 info->res = request_mem_region(res.start, res.end - res.start + 1,
186 dev->dev.bus_id); 186 dev_name(&dev->dev));
187 if (!info->res) 187 if (!info->res)
188 goto err_out; 188 goto err_out;
189 189
@@ -194,7 +194,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
194 goto err_out; 194 goto err_out;
195 } 195 }
196 196
197 info->map.name = dev->dev.bus_id; 197 info->map.name = dev_name(&dev->dev);
198 info->map.phys = res.start; 198 info->map.phys = res.start;
199 info->map.size = res.end - res.start + 1; 199 info->map.size = res.end - res.start + 1;
200 info->map.bankwidth = *width; 200 info->map.bankwidth = *width;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 789842d0e6f2..1a05cf37851e 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -691,7 +691,7 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
691 */ 691 */
692struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */ 692struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
693 int num_devs, /* number of subdevices */ 693 int num_devs, /* number of subdevices */
694 char *name) 694 const char *name)
695{ /* name for the new device */ 695{ /* name for the new device */
696 int i; 696 int i;
697 size_t size; 697 size_t size;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 1c2e9450d663..f8ae0400c49c 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -408,7 +408,7 @@ config MTD_NAND_FSL_UPM
408 408
409config MTD_NAND_MXC 409config MTD_NAND_MXC
410 tristate "MXC NAND support" 410 tristate "MXC NAND support"
411 depends on ARCH_MX2 411 depends on ARCH_MX2 || ARCH_MX3
412 help 412 help
413 This enables the driver for the NAND flash controller on the 413 This enables the driver for the NAND flash controller on the
414 MXC processors. 414 MXC processors.
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index a83192f80eba..7815a404a632 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -222,7 +222,7 @@ static int __devinit fun_probe(struct of_device *ofdev,
222 222
223 fun->rnb_gpio = of_get_gpio(ofdev->node, 0); 223 fun->rnb_gpio = of_get_gpio(ofdev->node, 0);
224 if (fun->rnb_gpio >= 0) { 224 if (fun->rnb_gpio >= 0) {
225 ret = gpio_request(fun->rnb_gpio, ofdev->dev.bus_id); 225 ret = gpio_request(fun->rnb_gpio, dev_name(&ofdev->dev));
226 if (ret) { 226 if (ret) {
227 dev_err(&ofdev->dev, "can't request RNB gpio\n"); 227 dev_err(&ofdev->dev, "can't request RNB gpio\n");
228 goto err2; 228 goto err2;
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index f674c5427b17..75f9f4874ecf 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -54,7 +54,7 @@ static int __init plat_nand_probe(struct platform_device *pdev)
54 data->chip.priv = &data; 54 data->chip.priv = &data;
55 data->mtd.priv = &data->chip; 55 data->mtd.priv = &data->chip;
56 data->mtd.owner = THIS_MODULE; 56 data->mtd.owner = THIS_MODULE;
57 data->mtd.name = pdev->dev.bus_id; 57 data->mtd.name = dev_name(&pdev->dev);
58 58
59 data->chip.IO_ADDR_R = data->io_base; 59 data->chip.IO_ADDR_R = data->io_base;
60 data->chip.IO_ADDR_W = data->io_base; 60 data->chip.IO_ADDR_W = data->io_base;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 15f0a26730ae..fc4144495610 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -20,8 +20,8 @@
20#include <linux/mtd/partitions.h> 20#include <linux/mtd/partitions.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/irq.h> 22#include <linux/irq.h>
23#include <asm/dma.h>
24 23
24#include <mach/dma.h>
25#include <mach/pxa-regs.h> 25#include <mach/pxa-regs.h>
26#include <mach/pxa3xx_nand.h> 26#include <mach/pxa3xx_nand.h>
27 27
@@ -1080,7 +1080,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1080 this = &info->nand_chip; 1080 this = &info->nand_chip;
1081 mtd->priv = info; 1081 mtd->priv = info;
1082 1082
1083 info->clk = clk_get(&pdev->dev, "NANDCLK"); 1083 info->clk = clk_get(&pdev->dev, NULL);
1084 if (IS_ERR(info->clk)) { 1084 if (IS_ERR(info->clk)) {
1085 dev_err(&pdev->dev, "failed to get nand clock\n"); 1085 dev_err(&pdev->dev, "failed to get nand clock\n");
1086 ret = PTR_ERR(info->clk); 1086 ret = PTR_ERR(info->clk);
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 556139ed1fdf..8e375d5fe231 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -45,8 +45,8 @@
45 45
46#include <asm/io.h> 46#include <asm/io.h>
47 47
48#include <asm/plat-s3c/regs-nand.h> 48#include <plat/regs-nand.h>
49#include <asm/plat-s3c/nand.h> 49#include <plat/nand.h>
50 50
51#ifdef CONFIG_MTD_NAND_S3C2410_HWECC 51#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
52static int hardware_ecc = 1; 52static int hardware_ecc = 1;
@@ -818,7 +818,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
818 goto exit_error; 818 goto exit_error;
819 } 819 }
820 820
821 memzero(info, sizeof(*info)); 821 memset(info, 0, sizeof(*info));
822 platform_set_drvdata(pdev, info); 822 platform_set_drvdata(pdev, info);
823 823
824 spin_lock_init(&info->controller.lock); 824 spin_lock_init(&info->controller.lock);
@@ -883,7 +883,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
883 goto exit_error; 883 goto exit_error;
884 } 884 }
885 885
886 memzero(info->mtds, size); 886 memset(info->mtds, 0, size);
887 887
888 /* initialise all possible chips */ 888 /* initialise all possible chips */
889 889
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index edb1e322113d..daa6a4c3b8ce 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -433,7 +433,7 @@ static int tmio_probe(struct platform_device *dev)
433 nand_chip->chip_delay = 15; 433 nand_chip->chip_delay = 15;
434 434
435 retval = request_irq(irq, &tmio_irq, 435 retval = request_irq(irq, &tmio_irq,
436 IRQF_DISABLED, dev->dev.bus_id, tmio); 436 IRQF_DISABLED, dev_name(&dev->dev), tmio);
437 if (retval) { 437 if (retval) {
438 dev_err(&dev->dev, "request_irq error %d\n", retval); 438 dev_err(&dev->dev, "request_irq error %d\n", retval);
439 goto err_irq; 439 goto err_irq;
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index ad81ab8e95e2..5b69e7773c6c 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -63,7 +63,7 @@ static int __devinit generic_onenand_probe(struct device *dev)
63 info->onenand.mmcontrol = pdata->mmcontrol; 63 info->onenand.mmcontrol = pdata->mmcontrol;
64 info->onenand.irq = platform_get_irq(pdev, 0); 64 info->onenand.irq = platform_get_irq(pdev, 0);
65 65
66 info->mtd.name = pdev->dev.bus_id; 66 info->mtd.name = dev_name(&pdev->dev);
67 info->mtd.priv = &info->onenand; 67 info->mtd.priv = &info->onenand;
68 info->mtd.owner = THIS_MODULE; 68 info->mtd.owner = THIS_MODULE;
69 69
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index a7e4d985f5ef..96ecc1766fa8 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -149,7 +149,7 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
149 149
150 INIT_COMPLETION(c->irq_done); 150 INIT_COMPLETION(c->irq_done);
151 if (c->gpio_irq) { 151 if (c->gpio_irq) {
152 result = omap_get_gpio_datain(c->gpio_irq); 152 result = gpio_get_value(c->gpio_irq);
153 if (result == -1) { 153 if (result == -1) {
154 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); 154 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
155 intr = read_reg(c, ONENAND_REG_INTERRUPT); 155 intr = read_reg(c, ONENAND_REG_INTERRUPT);
@@ -634,9 +634,9 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
634 "OneNAND\n", c->gpio_irq); 634 "OneNAND\n", c->gpio_irq);
635 goto err_iounmap; 635 goto err_iounmap;
636 } 636 }
637 omap_set_gpio_direction(c->gpio_irq, 1); 637 gpio_direction_input(c->gpio_irq);
638 638
639 if ((r = request_irq(OMAP_GPIO_IRQ(c->gpio_irq), 639 if ((r = request_irq(gpio_to_irq(c->gpio_irq),
640 omap2_onenand_interrupt, IRQF_TRIGGER_RISING, 640 omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
641 pdev->dev.driver->name, c)) < 0) 641 pdev->dev.driver->name, c)) < 0)
642 goto err_release_gpio; 642 goto err_release_gpio;
@@ -668,7 +668,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
668 c->onenand.base); 668 c->onenand.base);
669 669
670 c->pdev = pdev; 670 c->pdev = pdev;
671 c->mtd.name = pdev->dev.bus_id; 671 c->mtd.name = dev_name(&pdev->dev);
672 c->mtd.priv = &c->onenand; 672 c->mtd.priv = &c->onenand;
673 c->mtd.owner = THIS_MODULE; 673 c->mtd.owner = THIS_MODULE;
674 674
@@ -723,7 +723,7 @@ err_release_dma:
723 if (c->dma_channel != -1) 723 if (c->dma_channel != -1)
724 omap_free_dma(c->dma_channel); 724 omap_free_dma(c->dma_channel);
725 if (c->gpio_irq) 725 if (c->gpio_irq)
726 free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c); 726 free_irq(gpio_to_irq(c->gpio_irq), c);
727err_release_gpio: 727err_release_gpio:
728 if (c->gpio_irq) 728 if (c->gpio_irq)
729 omap_free_gpio(c->gpio_irq); 729 omap_free_gpio(c->gpio_irq);
@@ -760,7 +760,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
760 omap2_onenand_shutdown(pdev); 760 omap2_onenand_shutdown(pdev);
761 platform_set_drvdata(pdev, NULL); 761 platform_set_drvdata(pdev, NULL);
762 if (c->gpio_irq) { 762 if (c->gpio_irq) {
763 free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c); 763 free_irq(gpio_to_irq(c->gpio_irq), c);
764 omap_free_gpio(c->gpio_irq); 764 omap_free_gpio(c->gpio_irq);
765 } 765 }
766 iounmap(c->onenand.base); 766 iounmap(c->onenand.base);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index c7630a228310..7caf22cd5ad0 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -280,7 +280,7 @@ static int ubi_sysfs_init(struct ubi_device *ubi)
280 ubi->dev.release = dev_release; 280 ubi->dev.release = dev_release;
281 ubi->dev.devt = ubi->cdev.dev; 281 ubi->dev.devt = ubi->cdev.dev;
282 ubi->dev.class = ubi_class; 282 ubi->dev.class = ubi_class;
283 sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num); 283 dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num);
284 err = device_register(&ubi->dev); 284 err = device_register(&ubi->dev);
285 if (err) 285 if (err)
286 return err; 286 return err;
@@ -815,19 +815,20 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
815 if (err) 815 if (err)
816 goto out_free; 816 goto out_free;
817 817
818 err = -ENOMEM;
818 ubi->peb_buf1 = vmalloc(ubi->peb_size); 819 ubi->peb_buf1 = vmalloc(ubi->peb_size);
819 if (!ubi->peb_buf1) 820 if (!ubi->peb_buf1)
820 goto out_free; 821 goto out_free;
821 822
822 ubi->peb_buf2 = vmalloc(ubi->peb_size); 823 ubi->peb_buf2 = vmalloc(ubi->peb_size);
823 if (!ubi->peb_buf2) 824 if (!ubi->peb_buf2)
824 goto out_free; 825 goto out_free;
825 826
826#ifdef CONFIG_MTD_UBI_DEBUG 827#ifdef CONFIG_MTD_UBI_DEBUG
827 mutex_init(&ubi->dbg_buf_mutex); 828 mutex_init(&ubi->dbg_buf_mutex);
828 ubi->dbg_peb_buf = vmalloc(ubi->peb_size); 829 ubi->dbg_peb_buf = vmalloc(ubi->peb_size);
829 if (!ubi->dbg_peb_buf) 830 if (!ubi->dbg_peb_buf)
830 goto out_free; 831 goto out_free;
831#endif 832#endif
832 833
833 err = attach_by_scanning(ubi); 834 err = attach_by_scanning(ubi);
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index b30a0b83d7f1..98cf31ed0814 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -721,7 +721,8 @@ static int rename_volumes(struct ubi_device *ubi,
721 * It seems we need to remove volume with name @re->new_name, 721 * It seems we need to remove volume with name @re->new_name,
722 * if it exists. 722 * if it exists.
723 */ 723 */
724 desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, UBI_EXCLUSIVE); 724 desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name,
725 UBI_EXCLUSIVE);
725 if (IS_ERR(desc)) { 726 if (IS_ERR(desc)) {
726 err = PTR_ERR(desc); 727 err = PTR_ERR(desc);
727 if (err == -ENODEV) 728 if (err == -ENODEV)
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 78e914d23ece..13777e5beac9 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -27,11 +27,11 @@
27#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__) 27#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__)
28 28
29#define ubi_assert(expr) do { \ 29#define ubi_assert(expr) do { \
30 if (unlikely(!(expr))) { \ 30 if (unlikely(!(expr))) { \
31 printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \ 31 printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
32 __func__, __LINE__, current->pid); \ 32 __func__, __LINE__, current->pid); \
33 ubi_dbg_dump_stack(); \ 33 ubi_dbg_dump_stack(); \
34 } \ 34 } \
35} while (0) 35} while (0)
36 36
37#define dbg_msg(fmt, ...) \ 37#define dbg_msg(fmt, ...) \
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index d8966bae0e0b..25def348e5ba 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -504,12 +504,9 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
504 if (!vid_hdr) 504 if (!vid_hdr)
505 return -ENOMEM; 505 return -ENOMEM;
506 506
507 mutex_lock(&ubi->buf_mutex);
508
509retry: 507retry:
510 new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN); 508 new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
511 if (new_pnum < 0) { 509 if (new_pnum < 0) {
512 mutex_unlock(&ubi->buf_mutex);
513 ubi_free_vid_hdr(ubi, vid_hdr); 510 ubi_free_vid_hdr(ubi, vid_hdr);
514 return new_pnum; 511 return new_pnum;
515 } 512 }
@@ -529,20 +526,23 @@ retry:
529 goto write_error; 526 goto write_error;
530 527
531 data_size = offset + len; 528 data_size = offset + len;
529 mutex_lock(&ubi->buf_mutex);
532 memset(ubi->peb_buf1 + offset, 0xFF, len); 530 memset(ubi->peb_buf1 + offset, 0xFF, len);
533 531
534 /* Read everything before the area where the write failure happened */ 532 /* Read everything before the area where the write failure happened */
535 if (offset > 0) { 533 if (offset > 0) {
536 err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset); 534 err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset);
537 if (err && err != UBI_IO_BITFLIPS) 535 if (err && err != UBI_IO_BITFLIPS)
538 goto out_put; 536 goto out_unlock;
539 } 537 }
540 538
541 memcpy(ubi->peb_buf1 + offset, buf, len); 539 memcpy(ubi->peb_buf1 + offset, buf, len);
542 540
543 err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size); 541 err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size);
544 if (err) 542 if (err) {
543 mutex_unlock(&ubi->buf_mutex);
545 goto write_error; 544 goto write_error;
545 }
546 546
547 mutex_unlock(&ubi->buf_mutex); 547 mutex_unlock(&ubi->buf_mutex);
548 ubi_free_vid_hdr(ubi, vid_hdr); 548 ubi_free_vid_hdr(ubi, vid_hdr);
@@ -553,8 +553,9 @@ retry:
553 ubi_msg("data was successfully recovered"); 553 ubi_msg("data was successfully recovered");
554 return 0; 554 return 0;
555 555
556out_put: 556out_unlock:
557 mutex_unlock(&ubi->buf_mutex); 557 mutex_unlock(&ubi->buf_mutex);
558out_put:
558 ubi_wl_put_peb(ubi, new_pnum, 1); 559 ubi_wl_put_peb(ubi, new_pnum, 1);
559 ubi_free_vid_hdr(ubi, vid_hdr); 560 ubi_free_vid_hdr(ubi, vid_hdr);
560 return err; 561 return err;
@@ -567,7 +568,6 @@ write_error:
567 ubi_warn("failed to write to PEB %d", new_pnum); 568 ubi_warn("failed to write to PEB %d", new_pnum);
568 ubi_wl_put_peb(ubi, new_pnum, 1); 569 ubi_wl_put_peb(ubi, new_pnum, 1);
569 if (++tries > UBI_IO_RETRIES) { 570 if (++tries > UBI_IO_RETRIES) {
570 mutex_unlock(&ubi->buf_mutex);
571 ubi_free_vid_hdr(ubi, vid_hdr); 571 ubi_free_vid_hdr(ubi, vid_hdr);
572 return err; 572 return err;
573 } 573 }
@@ -717,7 +717,7 @@ write_error:
717 * to the real data size, although the @buf buffer has to contain the 717 * to the real data size, although the @buf buffer has to contain the
718 * alignment. In all other cases, @len has to be aligned. 718 * alignment. In all other cases, @len has to be aligned.
719 * 719 *
720 * It is prohibited to write more then once to logical eraseblocks of static 720 * It is prohibited to write more than once to logical eraseblocks of static
721 * volumes. This function returns zero in case of success and a negative error 721 * volumes. This function returns zero in case of success and a negative error
722 * code in case of failure. 722 * code in case of failure.
723 */ 723 */
@@ -949,10 +949,14 @@ write_error:
949 * This function copies logical eraseblock from physical eraseblock @from to 949 * This function copies logical eraseblock from physical eraseblock @from to
950 * physical eraseblock @to. The @vid_hdr buffer may be changed by this 950 * physical eraseblock @to. The @vid_hdr buffer may be changed by this
951 * function. Returns: 951 * function. Returns:
952 * o %0 in case of success; 952 * o %0 in case of success;
953 * o %1 if the operation was canceled and should be tried later (e.g., 953 * o %1 if the operation was canceled because the volume is being deleted
954 * because a bit-flip was detected at the target PEB); 954 * or because the PEB was put meanwhile;
955 * o %2 if the volume is being deleted and this LEB should not be moved. 955 * o %2 if the operation was canceled because there was a write error to the
956 * target PEB;
957 * o %-EAGAIN if the operation was canceled because a bit-flip was detected
958 * in the target PEB;
959 * o a negative error code in case of failure.
956 */ 960 */
957int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 961int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
958 struct ubi_vid_hdr *vid_hdr) 962 struct ubi_vid_hdr *vid_hdr)
@@ -978,7 +982,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
978 /* 982 /*
979 * Note, we may race with volume deletion, which means that the volume 983 * Note, we may race with volume deletion, which means that the volume
980 * this logical eraseblock belongs to might be being deleted. Since the 984 * this logical eraseblock belongs to might be being deleted. Since the
981 * volume deletion unmaps all the volume's logical eraseblocks, it will 985 * volume deletion un-maps all the volume's logical eraseblocks, it will
982 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. 986 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
983 */ 987 */
984 vol = ubi->volumes[idx]; 988 vol = ubi->volumes[idx];
@@ -986,7 +990,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
986 /* No need to do further work, cancel */ 990 /* No need to do further work, cancel */
987 dbg_eba("volume %d is being removed, cancel", vol_id); 991 dbg_eba("volume %d is being removed, cancel", vol_id);
988 spin_unlock(&ubi->volumes_lock); 992 spin_unlock(&ubi->volumes_lock);
989 return 2; 993 return 1;
990 } 994 }
991 spin_unlock(&ubi->volumes_lock); 995 spin_unlock(&ubi->volumes_lock);
992 996
@@ -1023,7 +1027,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1023 1027
1024 /* 1028 /*
1025 * OK, now the LEB is locked and we can safely start moving it. Since 1029 * OK, now the LEB is locked and we can safely start moving it. Since
1026 * this function utilizes thie @ubi->peb1_buf buffer which is shared 1030 * this function utilizes the @ubi->peb1_buf buffer which is shared
1027 * with some other functions, so lock the buffer by taking the 1031 * with some other functions, so lock the buffer by taking the
1028 * @ubi->buf_mutex. 1032 * @ubi->buf_mutex.
1029 */ 1033 */
@@ -1068,8 +1072,11 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1068 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 1072 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
1069 1073
1070 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); 1074 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
1071 if (err) 1075 if (err) {
1076 if (err == -EIO)
1077 err = 2;
1072 goto out_unlock_buf; 1078 goto out_unlock_buf;
1079 }
1073 1080
1074 cond_resched(); 1081 cond_resched();
1075 1082
@@ -1079,14 +1086,17 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1079 if (err != UBI_IO_BITFLIPS) 1086 if (err != UBI_IO_BITFLIPS)
1080 ubi_warn("cannot read VID header back from PEB %d", to); 1087 ubi_warn("cannot read VID header back from PEB %d", to);
1081 else 1088 else
1082 err = 1; 1089 err = -EAGAIN;
1083 goto out_unlock_buf; 1090 goto out_unlock_buf;
1084 } 1091 }
1085 1092
1086 if (data_size > 0) { 1093 if (data_size > 0) {
1087 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); 1094 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
1088 if (err) 1095 if (err) {
1096 if (err == -EIO)
1097 err = 2;
1089 goto out_unlock_buf; 1098 goto out_unlock_buf;
1099 }
1090 1100
1091 cond_resched(); 1101 cond_resched();
1092 1102
@@ -1101,15 +1111,16 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1101 ubi_warn("cannot read data back from PEB %d", 1111 ubi_warn("cannot read data back from PEB %d",
1102 to); 1112 to);
1103 else 1113 else
1104 err = 1; 1114 err = -EAGAIN;
1105 goto out_unlock_buf; 1115 goto out_unlock_buf;
1106 } 1116 }
1107 1117
1108 cond_resched(); 1118 cond_resched();
1109 1119
1110 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) { 1120 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
1111 ubi_warn("read data back from PEB %d - it is different", 1121 ubi_warn("read data back from PEB %d and it is "
1112 to); 1122 "different", to);
1123 err = -EINVAL;
1113 goto out_unlock_buf; 1124 goto out_unlock_buf;
1114 } 1125 }
1115 } 1126 }
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 2fb64be44f1b..fe81039f2a7c 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -465,7 +465,7 @@ out:
465 * This function synchronously erases physical eraseblock @pnum. If @torture 465 * This function synchronously erases physical eraseblock @pnum. If @torture
466 * flag is not zero, the physical eraseblock is checked by means of writing 466 * flag is not zero, the physical eraseblock is checked by means of writing
467 * different patterns to it and reading them back. If the torturing is enabled, 467 * different patterns to it and reading them back. If the torturing is enabled,
468 * the physical eraseblock is erased more then once. 468 * the physical eraseblock is erased more than once.
469 * 469 *
470 * This function returns the number of erasures made in case of success, %-EIO 470 * This function returns the number of erasures made in case of success, %-EIO
471 * if the erasure failed or the torturing test failed, and other negative error 471 * if the erasure failed or the torturing test failed, and other negative error
@@ -637,8 +637,6 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
637 637
638 dbg_io("read EC header from PEB %d", pnum); 638 dbg_io("read EC header from PEB %d", pnum);
639 ubi_assert(pnum >= 0 && pnum < ubi->peb_count); 639 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
640 if (UBI_IO_DEBUG)
641 verbose = 1;
642 640
643 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); 641 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
644 if (err) { 642 if (err) {
@@ -685,6 +683,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
685 if (verbose) 683 if (verbose)
686 ubi_warn("no EC header found at PEB %d, " 684 ubi_warn("no EC header found at PEB %d, "
687 "only 0xFF bytes", pnum); 685 "only 0xFF bytes", pnum);
686 else if (UBI_IO_DEBUG)
687 dbg_msg("no EC header found at PEB %d, "
688 "only 0xFF bytes", pnum);
688 return UBI_IO_PEB_EMPTY; 689 return UBI_IO_PEB_EMPTY;
689 } 690 }
690 691
@@ -696,7 +697,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
696 ubi_warn("bad magic number at PEB %d: %08x instead of " 697 ubi_warn("bad magic number at PEB %d: %08x instead of "
697 "%08x", pnum, magic, UBI_EC_HDR_MAGIC); 698 "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
698 ubi_dbg_dump_ec_hdr(ec_hdr); 699 ubi_dbg_dump_ec_hdr(ec_hdr);
699 } 700 } else if (UBI_IO_DEBUG)
701 dbg_msg("bad magic number at PEB %d: %08x instead of "
702 "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
700 return UBI_IO_BAD_EC_HDR; 703 return UBI_IO_BAD_EC_HDR;
701 } 704 }
702 705
@@ -708,7 +711,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
708 ubi_warn("bad EC header CRC at PEB %d, calculated " 711 ubi_warn("bad EC header CRC at PEB %d, calculated "
709 "%#08x, read %#08x", pnum, crc, hdr_crc); 712 "%#08x, read %#08x", pnum, crc, hdr_crc);
710 ubi_dbg_dump_ec_hdr(ec_hdr); 713 ubi_dbg_dump_ec_hdr(ec_hdr);
711 } 714 } else if (UBI_IO_DEBUG)
715 dbg_msg("bad EC header CRC at PEB %d, calculated "
716 "%#08x, read %#08x", pnum, crc, hdr_crc);
712 return UBI_IO_BAD_EC_HDR; 717 return UBI_IO_BAD_EC_HDR;
713 } 718 }
714 719
@@ -912,8 +917,6 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
912 917
913 dbg_io("read VID header from PEB %d", pnum); 918 dbg_io("read VID header from PEB %d", pnum);
914 ubi_assert(pnum >= 0 && pnum < ubi->peb_count); 919 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
915 if (UBI_IO_DEBUG)
916 verbose = 1;
917 920
918 p = (char *)vid_hdr - ubi->vid_hdr_shift; 921 p = (char *)vid_hdr - ubi->vid_hdr_shift;
919 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, 922 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
@@ -960,6 +963,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
960 if (verbose) 963 if (verbose)
961 ubi_warn("no VID header found at PEB %d, " 964 ubi_warn("no VID header found at PEB %d, "
962 "only 0xFF bytes", pnum); 965 "only 0xFF bytes", pnum);
966 else if (UBI_IO_DEBUG)
967 dbg_msg("no VID header found at PEB %d, "
968 "only 0xFF bytes", pnum);
963 return UBI_IO_PEB_FREE; 969 return UBI_IO_PEB_FREE;
964 } 970 }
965 971
@@ -971,7 +977,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
971 ubi_warn("bad magic number at PEB %d: %08x instead of " 977 ubi_warn("bad magic number at PEB %d: %08x instead of "
972 "%08x", pnum, magic, UBI_VID_HDR_MAGIC); 978 "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
973 ubi_dbg_dump_vid_hdr(vid_hdr); 979 ubi_dbg_dump_vid_hdr(vid_hdr);
974 } 980 } else if (UBI_IO_DEBUG)
981 dbg_msg("bad magic number at PEB %d: %08x instead of "
982 "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
975 return UBI_IO_BAD_VID_HDR; 983 return UBI_IO_BAD_VID_HDR;
976 } 984 }
977 985
@@ -983,7 +991,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
983 ubi_warn("bad CRC at PEB %d, calculated %#08x, " 991 ubi_warn("bad CRC at PEB %d, calculated %#08x, "
984 "read %#08x", pnum, crc, hdr_crc); 992 "read %#08x", pnum, crc, hdr_crc);
985 ubi_dbg_dump_vid_hdr(vid_hdr); 993 ubi_dbg_dump_vid_hdr(vid_hdr);
986 } 994 } else if (UBI_IO_DEBUG)
995 dbg_msg("bad CRC at PEB %d, calculated %#08x, "
996 "read %#08x", pnum, crc, hdr_crc);
987 return UBI_IO_BAD_VID_HDR; 997 return UBI_IO_BAD_VID_HDR;
988 } 998 }
989 999
@@ -1024,7 +1034,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
1024 1034
1025 err = paranoid_check_peb_ec_hdr(ubi, pnum); 1035 err = paranoid_check_peb_ec_hdr(ubi, pnum);
1026 if (err) 1036 if (err)
1027 return err > 0 ? -EINVAL: err; 1037 return err > 0 ? -EINVAL : err;
1028 1038
1029 vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC); 1039 vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
1030 vid_hdr->version = UBI_VERSION; 1040 vid_hdr->version = UBI_VERSION;
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 5d9bcf109c13..4abbe573fa40 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -564,7 +564,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_unmap);
564 * @dtype: expected data type 564 * @dtype: expected data type
565 * 565 *
566 * This function maps an un-mapped logical eraseblock @lnum to a physical 566 * This function maps an un-mapped logical eraseblock @lnum to a physical
567 * eraseblock. This means, that after a successfull invocation of this 567 * eraseblock. This means, that after a successful invocation of this
568 * function the logical eraseblock @lnum will be empty (contain only %0xFF 568 * function the logical eraseblock @lnum will be empty (contain only %0xFF
569 * bytes) and be mapped to a physical eraseblock, even if an unclean reboot 569 * bytes) and be mapped to a physical eraseblock, even if an unclean reboot
570 * happens. 570 * happens.
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 41d47e1cf15c..ecde202a5a12 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -478,7 +478,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
478 return 0; 478 return 0;
479 } else { 479 } else {
480 /* 480 /*
481 * This logical eraseblock is older then the one found 481 * This logical eraseblock is older than the one found
482 * previously. 482 * previously.
483 */ 483 */
484 if (cmp_res & 4) 484 if (cmp_res & 4)
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index 2ad940409053..8419fdccc79c 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -135,7 +135,7 @@ enum {
135 * The erase counter header takes 64 bytes and has a plenty of unused space for 135 * The erase counter header takes 64 bytes and has a plenty of unused space for
136 * future usage. The unused fields are zeroed. The @version field is used to 136 * future usage. The unused fields are zeroed. The @version field is used to
137 * indicate the version of UBI implementation which is supposed to be able to 137 * indicate the version of UBI implementation which is supposed to be able to
138 * work with this UBI image. If @version is greater then the current UBI 138 * work with this UBI image. If @version is greater than the current UBI
139 * version, the image is rejected. This may be useful in future if something 139 * version, the image is rejected. This may be useful in future if something
140 * is changed radically. This field is duplicated in the volume identifier 140 * is changed radically. This field is duplicated in the volume identifier
141 * header. 141 * header.
@@ -187,7 +187,7 @@ struct ubi_ec_hdr {
187 * (sequence number) is used to distinguish between older and newer versions of 187 * (sequence number) is used to distinguish between older and newer versions of
188 * logical eraseblocks. 188 * logical eraseblocks.
189 * 189 *
190 * There are 2 situations when there may be more then one physical eraseblock 190 * There are 2 situations when there may be more than one physical eraseblock
191 * corresponding to the same logical eraseblock, i.e., having the same @vol_id 191 * corresponding to the same logical eraseblock, i.e., having the same @vol_id
192 * and @lnum values in the volume identifier header. Suppose we have a logical 192 * and @lnum values in the volume identifier header. Suppose we have a logical
193 * eraseblock L and it is mapped to the physical eraseblock P. 193 * eraseblock L and it is mapped to the physical eraseblock P.
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 1c3fa18c26a7..4a8ec485c91d 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -74,6 +74,13 @@
74#define UBI_IO_RETRIES 3 74#define UBI_IO_RETRIES 3
75 75
76/* 76/*
77 * Length of the protection queue. The length is effectively equivalent to the
78 * number of (global) erase cycles PEBs are protected from the wear-leveling
79 * worker.
80 */
81#define UBI_PROT_QUEUE_LEN 10
82
83/*
77 * Error codes returned by the I/O sub-system. 84 * Error codes returned by the I/O sub-system.
78 * 85 *
79 * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only 86 * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only
@@ -95,7 +102,8 @@ enum {
95 102
96/** 103/**
97 * struct ubi_wl_entry - wear-leveling entry. 104 * struct ubi_wl_entry - wear-leveling entry.
98 * @rb: link in the corresponding RB-tree 105 * @u.rb: link in the corresponding (free/used) RB-tree
106 * @u.list: link in the protection queue
99 * @ec: erase counter 107 * @ec: erase counter
100 * @pnum: physical eraseblock number 108 * @pnum: physical eraseblock number
101 * 109 *
@@ -104,7 +112,10 @@ enum {
104 * RB-trees. See WL sub-system for details. 112 * RB-trees. See WL sub-system for details.
105 */ 113 */
106struct ubi_wl_entry { 114struct ubi_wl_entry {
107 struct rb_node rb; 115 union {
116 struct rb_node rb;
117 struct list_head list;
118 } u;
108 int ec; 119 int ec;
109 int pnum; 120 int pnum;
110}; 121};
@@ -288,7 +299,7 @@ struct ubi_wl_entry;
288 * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling 299 * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling
289 * 300 *
290 * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end 301 * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end
291 * of UBI ititializetion 302 * of UBI initialization
292 * @vtbl_slots: how many slots are available in the volume table 303 * @vtbl_slots: how many slots are available in the volume table
293 * @vtbl_size: size of the volume table in bytes 304 * @vtbl_size: size of the volume table in bytes
294 * @vtbl: in-RAM volume table copy 305 * @vtbl: in-RAM volume table copy
@@ -306,18 +317,17 @@ struct ubi_wl_entry;
306 * @used: RB-tree of used physical eraseblocks 317 * @used: RB-tree of used physical eraseblocks
307 * @free: RB-tree of free physical eraseblocks 318 * @free: RB-tree of free physical eraseblocks
308 * @scrub: RB-tree of physical eraseblocks which need scrubbing 319 * @scrub: RB-tree of physical eraseblocks which need scrubbing
309 * @prot: protection trees 320 * @pq: protection queue (contain physical eraseblocks which are temporarily
310 * @prot.pnum: protection tree indexed by physical eraseblock numbers 321 * protected from the wear-leveling worker)
311 * @prot.aec: protection tree indexed by absolute erase counter value 322 * @pq_head: protection queue head
312 * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from, 323 * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
313 * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works 324 * @move_to, @move_to_put @erase_pending, @wl_scheduled and @works
314 * fields 325 * fields
315 * @move_mutex: serializes eraseblock moves 326 * @move_mutex: serializes eraseblock moves
316 * @work_sem: sycnhronizes the WL worker with use tasks 327 * @work_sem: synchronizes the WL worker with use tasks
317 * @wl_scheduled: non-zero if the wear-leveling was scheduled 328 * @wl_scheduled: non-zero if the wear-leveling was scheduled
318 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any 329 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
319 * physical eraseblock 330 * physical eraseblock
320 * @abs_ec: absolute erase counter
321 * @move_from: physical eraseblock from where the data is being moved 331 * @move_from: physical eraseblock from where the data is being moved
322 * @move_to: physical eraseblock where the data is being moved to 332 * @move_to: physical eraseblock where the data is being moved to
323 * @move_to_put: if the "to" PEB was put 333 * @move_to_put: if the "to" PEB was put
@@ -351,11 +361,11 @@ struct ubi_wl_entry;
351 * 361 *
352 * @peb_buf1: a buffer of PEB size used for different purposes 362 * @peb_buf1: a buffer of PEB size used for different purposes
353 * @peb_buf2: another buffer of PEB size used for different purposes 363 * @peb_buf2: another buffer of PEB size used for different purposes
354 * @buf_mutex: proptects @peb_buf1 and @peb_buf2 364 * @buf_mutex: protects @peb_buf1 and @peb_buf2
355 * @ckvol_mutex: serializes static volume checking when opening 365 * @ckvol_mutex: serializes static volume checking when opening
356 * @mult_mutex: serializes operations on multiple volumes, like re-nameing 366 * @mult_mutex: serializes operations on multiple volumes, like re-naming
357 * @dbg_peb_buf: buffer of PEB size used for debugging 367 * @dbg_peb_buf: buffer of PEB size used for debugging
358 * @dbg_buf_mutex: proptects @dbg_peb_buf 368 * @dbg_buf_mutex: protects @dbg_peb_buf
359 */ 369 */
360struct ubi_device { 370struct ubi_device {
361 struct cdev cdev; 371 struct cdev cdev;
@@ -392,16 +402,13 @@ struct ubi_device {
392 struct rb_root used; 402 struct rb_root used;
393 struct rb_root free; 403 struct rb_root free;
394 struct rb_root scrub; 404 struct rb_root scrub;
395 struct { 405 struct list_head pq[UBI_PROT_QUEUE_LEN];
396 struct rb_root pnum; 406 int pq_head;
397 struct rb_root aec;
398 } prot;
399 spinlock_t wl_lock; 407 spinlock_t wl_lock;
400 struct mutex move_mutex; 408 struct mutex move_mutex;
401 struct rw_semaphore work_sem; 409 struct rw_semaphore work_sem;
402 int wl_scheduled; 410 int wl_scheduled;
403 struct ubi_wl_entry **lookuptbl; 411 struct ubi_wl_entry **lookuptbl;
404 unsigned long long abs_ec;
405 struct ubi_wl_entry *move_from; 412 struct ubi_wl_entry *move_from;
406 struct ubi_wl_entry *move_to; 413 struct ubi_wl_entry *move_to;
407 int move_to_put; 414 int move_to_put;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 3531ca9a1e24..22e1d7398fce 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -329,7 +329,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
329 vol->dev.devt = dev; 329 vol->dev.devt = dev;
330 vol->dev.class = ubi_class; 330 vol->dev.class = ubi_class;
331 331
332 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); 332 dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
333 err = device_register(&vol->dev); 333 err = device_register(&vol->dev);
334 if (err) { 334 if (err) {
335 ubi_err("cannot register device"); 335 ubi_err("cannot register device");
@@ -678,7 +678,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
678 vol->dev.parent = &ubi->dev; 678 vol->dev.parent = &ubi->dev;
679 vol->dev.devt = dev; 679 vol->dev.devt = dev;
680 vol->dev.class = ubi_class; 680 vol->dev.class = ubi_class;
681 sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id); 681 dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
682 err = device_register(&vol->dev); 682 err = device_register(&vol->dev);
683 if (err) 683 if (err)
684 goto out_gluebi; 684 goto out_gluebi;
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 333c8941552f..1afc61e7455d 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -577,7 +577,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
577 if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) { 577 if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
578 /* Auto re-size flag may be set only for one volume */ 578 /* Auto re-size flag may be set only for one volume */
579 if (ubi->autoresize_vol_id != -1) { 579 if (ubi->autoresize_vol_id != -1) {
580 ubi_err("more then one auto-resize volume (%d " 580 ubi_err("more than one auto-resize volume (%d "
581 "and %d)", ubi->autoresize_vol_id, i); 581 "and %d)", ubi->autoresize_vol_id, i);
582 kfree(vol); 582 kfree(vol);
583 return -EINVAL; 583 return -EINVAL;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index dcb6dac1dc54..891534f8210d 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -22,7 +22,7 @@
22 * UBI wear-leveling sub-system. 22 * UBI wear-leveling sub-system.
23 * 23 *
24 * This sub-system is responsible for wear-leveling. It works in terms of 24 * This sub-system is responsible for wear-leveling. It works in terms of
25 * physical* eraseblocks and erase counters and knows nothing about logical 25 * physical eraseblocks and erase counters and knows nothing about logical
26 * eraseblocks, volumes, etc. From this sub-system's perspective all physical 26 * eraseblocks, volumes, etc. From this sub-system's perspective all physical
27 * eraseblocks are of two types - used and free. Used physical eraseblocks are 27 * eraseblocks are of two types - used and free. Used physical eraseblocks are
28 * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical 28 * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
@@ -55,8 +55,39 @@
55 * 55 *
56 * As it was said, for the UBI sub-system all physical eraseblocks are either 56 * As it was said, for the UBI sub-system all physical eraseblocks are either
57 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while 57 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
58 * used eraseblocks are kept in a set of different RB-trees: @wl->used, 58 * used eraseblocks are kept in @wl->used or @wl->scrub RB-trees, or
59 * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub. 59 * (temporarily) in the @wl->pq queue.
60 *
61 * When the WL sub-system returns a physical eraseblock, the physical
62 * eraseblock is protected from being moved for some "time". For this reason,
63 * the physical eraseblock is not directly moved from the @wl->free tree to the
64 * @wl->used tree. There is a protection queue in between where this
65 * physical eraseblock is temporarily stored (@wl->pq).
66 *
67 * All this protection stuff is needed because:
68 * o we don't want to move physical eraseblocks just after we have given them
69 * to the user; instead, we first want to let users fill them up with data;
70 *
71 * o there is a chance that the user will put the physical eraseblock very
72 * soon, so it makes sense not to move it for some time, but wait; this is
73 * especially important in case of "short term" physical eraseblocks.
74 *
75 * Physical eraseblocks stay protected only for limited time. But the "time" is
76 * measured in erase cycles in this case. This is implemented with help of the
77 * protection queue. Eraseblocks are put to the tail of this queue when they
78 * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
79 * head of the queue on each erase operation (for any eraseblock). So the
80 * length of the queue defines how may (global) erase cycles PEBs are protected.
81 *
82 * To put it differently, each physical eraseblock has 2 main states: free and
83 * used. The former state corresponds to the @wl->free tree. The latter state
84 * is split up on several sub-states:
85 * o the WL movement is allowed (@wl->used tree);
86 * o the WL movement is temporarily prohibited (@wl->pq queue);
87 * o scrubbing is needed (@wl->scrub tree).
88 *
89 * Depending on the sub-state, wear-leveling entries of the used physical
90 * eraseblocks may be kept in one of those structures.
60 * 91 *
61 * Note, in this implementation, we keep a small in-RAM object for each physical 92 * Note, in this implementation, we keep a small in-RAM object for each physical
62 * eraseblock. This is surely not a scalable solution. But it appears to be good 93 * eraseblock. This is surely not a scalable solution. But it appears to be good
@@ -70,9 +101,6 @@
70 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we 101 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
71 * pick target PEB with an average EC if our PEB is not very "old". This is a 102 * pick target PEB with an average EC if our PEB is not very "old". This is a
72 * room for future re-works of the WL sub-system. 103 * room for future re-works of the WL sub-system.
73 *
74 * Note: the stuff with protection trees looks too complex and is difficult to
75 * understand. Should be fixed.
76 */ 104 */
77 105
78#include <linux/slab.h> 106#include <linux/slab.h>
@@ -85,14 +113,6 @@
85#define WL_RESERVED_PEBS 1 113#define WL_RESERVED_PEBS 1
86 114
87/* 115/*
88 * How many erase cycles are short term, unknown, and long term physical
89 * eraseblocks protected.
90 */
91#define ST_PROTECTION 16
92#define U_PROTECTION 10
93#define LT_PROTECTION 4
94
95/*
96 * Maximum difference between two erase counters. If this threshold is 116 * Maximum difference between two erase counters. If this threshold is
97 * exceeded, the WL sub-system starts moving data from used physical 117 * exceeded, the WL sub-system starts moving data from used physical
98 * eraseblocks with low erase counter to free physical eraseblocks with high 118 * eraseblocks with low erase counter to free physical eraseblocks with high
@@ -108,7 +128,7 @@
108 * situation when the picked physical eraseblock is constantly erased after the 128 * situation when the picked physical eraseblock is constantly erased after the
109 * data is written to it. So, we have a constant which limits the highest erase 129 * data is written to it. So, we have a constant which limits the highest erase
110 * counter of the free physical eraseblock to pick. Namely, the WL sub-system 130 * counter of the free physical eraseblock to pick. Namely, the WL sub-system
111 * does not pick eraseblocks with erase counter greater then the lowest erase 131 * does not pick eraseblocks with erase counter greater than the lowest erase
112 * counter plus %WL_FREE_MAX_DIFF. 132 * counter plus %WL_FREE_MAX_DIFF.
113 */ 133 */
114#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD) 134#define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
@@ -120,64 +140,9 @@
120#define WL_MAX_FAILURES 32 140#define WL_MAX_FAILURES 32
121 141
122/** 142/**
123 * struct ubi_wl_prot_entry - PEB protection entry.
124 * @rb_pnum: link in the @wl->prot.pnum RB-tree
125 * @rb_aec: link in the @wl->prot.aec RB-tree
126 * @abs_ec: the absolute erase counter value when the protection ends
127 * @e: the wear-leveling entry of the physical eraseblock under protection
128 *
129 * When the WL sub-system returns a physical eraseblock, the physical
130 * eraseblock is protected from being moved for some "time". For this reason,
131 * the physical eraseblock is not directly moved from the @wl->free tree to the
132 * @wl->used tree. There is one more tree in between where this physical
133 * eraseblock is temporarily stored (@wl->prot).
134 *
135 * All this protection stuff is needed because:
136 * o we don't want to move physical eraseblocks just after we have given them
137 * to the user; instead, we first want to let users fill them up with data;
138 *
139 * o there is a chance that the user will put the physical eraseblock very
140 * soon, so it makes sense not to move it for some time, but wait; this is
141 * especially important in case of "short term" physical eraseblocks.
142 *
143 * Physical eraseblocks stay protected only for limited time. But the "time" is
144 * measured in erase cycles in this case. This is implemented with help of the
145 * absolute erase counter (@wl->abs_ec). When it reaches certain value, the
146 * physical eraseblocks are moved from the protection trees (@wl->prot.*) to
147 * the @wl->used tree.
148 *
149 * Protected physical eraseblocks are searched by physical eraseblock number
150 * (when they are put) and by the absolute erase counter (to check if it is
151 * time to move them to the @wl->used tree). So there are actually 2 RB-trees
152 * storing the protected physical eraseblocks: @wl->prot.pnum and
153 * @wl->prot.aec. They are referred to as the "protection" trees. The
154 * first one is indexed by the physical eraseblock number. The second one is
155 * indexed by the absolute erase counter. Both trees store
156 * &struct ubi_wl_prot_entry objects.
157 *
158 * Each physical eraseblock has 2 main states: free and used. The former state
159 * corresponds to the @wl->free tree. The latter state is split up on several
160 * sub-states:
161 * o the WL movement is allowed (@wl->used tree);
162 * o the WL movement is temporarily prohibited (@wl->prot.pnum and
163 * @wl->prot.aec trees);
164 * o scrubbing is needed (@wl->scrub tree).
165 *
166 * Depending on the sub-state, wear-leveling entries of the used physical
167 * eraseblocks may be kept in one of those trees.
168 */
169struct ubi_wl_prot_entry {
170 struct rb_node rb_pnum;
171 struct rb_node rb_aec;
172 unsigned long long abs_ec;
173 struct ubi_wl_entry *e;
174};
175
176/**
177 * struct ubi_work - UBI work description data structure. 143 * struct ubi_work - UBI work description data structure.
178 * @list: a link in the list of pending works 144 * @list: a link in the list of pending works
179 * @func: worker function 145 * @func: worker function
180 * @priv: private data of the worker function
181 * @e: physical eraseblock to erase 146 * @e: physical eraseblock to erase
182 * @torture: if the physical eraseblock has to be tortured 147 * @torture: if the physical eraseblock has to be tortured
183 * 148 *
@@ -198,9 +163,11 @@ struct ubi_work {
198static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec); 163static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
199static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, 164static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
200 struct rb_root *root); 165 struct rb_root *root);
166static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e);
201#else 167#else
202#define paranoid_check_ec(ubi, pnum, ec) 0 168#define paranoid_check_ec(ubi, pnum, ec) 0
203#define paranoid_check_in_wl_tree(e, root) 169#define paranoid_check_in_wl_tree(e, root)
170#define paranoid_check_in_pq(ubi, e) 0
204#endif 171#endif
205 172
206/** 173/**
@@ -220,7 +187,7 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
220 struct ubi_wl_entry *e1; 187 struct ubi_wl_entry *e1;
221 188
222 parent = *p; 189 parent = *p;
223 e1 = rb_entry(parent, struct ubi_wl_entry, rb); 190 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
224 191
225 if (e->ec < e1->ec) 192 if (e->ec < e1->ec)
226 p = &(*p)->rb_left; 193 p = &(*p)->rb_left;
@@ -235,8 +202,8 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
235 } 202 }
236 } 203 }
237 204
238 rb_link_node(&e->rb, parent, p); 205 rb_link_node(&e->u.rb, parent, p);
239 rb_insert_color(&e->rb, root); 206 rb_insert_color(&e->u.rb, root);
240} 207}
241 208
242/** 209/**
@@ -331,7 +298,7 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
331 while (p) { 298 while (p) {
332 struct ubi_wl_entry *e1; 299 struct ubi_wl_entry *e1;
333 300
334 e1 = rb_entry(p, struct ubi_wl_entry, rb); 301 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
335 302
336 if (e->pnum == e1->pnum) { 303 if (e->pnum == e1->pnum) {
337 ubi_assert(e == e1); 304 ubi_assert(e == e1);
@@ -355,50 +322,24 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
355} 322}
356 323
357/** 324/**
358 * prot_tree_add - add physical eraseblock to protection trees. 325 * prot_queue_add - add physical eraseblock to the protection queue.
359 * @ubi: UBI device description object 326 * @ubi: UBI device description object
360 * @e: the physical eraseblock to add 327 * @e: the physical eraseblock to add
361 * @pe: protection entry object to use
362 * @abs_ec: absolute erase counter value when this physical eraseblock has
363 * to be removed from the protection trees.
364 * 328 *
365 * @wl->lock has to be locked. 329 * This function adds @e to the tail of the protection queue @ubi->pq, where
330 * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
331 * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
332 * be locked.
366 */ 333 */
367static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e, 334static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
368 struct ubi_wl_prot_entry *pe, int abs_ec)
369{ 335{
370 struct rb_node **p, *parent = NULL; 336 int pq_tail = ubi->pq_head - 1;
371 struct ubi_wl_prot_entry *pe1;
372
373 pe->e = e;
374 pe->abs_ec = ubi->abs_ec + abs_ec;
375
376 p = &ubi->prot.pnum.rb_node;
377 while (*p) {
378 parent = *p;
379 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum);
380
381 if (e->pnum < pe1->e->pnum)
382 p = &(*p)->rb_left;
383 else
384 p = &(*p)->rb_right;
385 }
386 rb_link_node(&pe->rb_pnum, parent, p);
387 rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum);
388
389 p = &ubi->prot.aec.rb_node;
390 parent = NULL;
391 while (*p) {
392 parent = *p;
393 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec);
394 337
395 if (pe->abs_ec < pe1->abs_ec) 338 if (pq_tail < 0)
396 p = &(*p)->rb_left; 339 pq_tail = UBI_PROT_QUEUE_LEN - 1;
397 else 340 ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
398 p = &(*p)->rb_right; 341 list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
399 } 342 dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
400 rb_link_node(&pe->rb_aec, parent, p);
401 rb_insert_color(&pe->rb_aec, &ubi->prot.aec);
402} 343}
403 344
404/** 345/**
@@ -414,14 +355,14 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
414 struct rb_node *p; 355 struct rb_node *p;
415 struct ubi_wl_entry *e; 356 struct ubi_wl_entry *e;
416 357
417 e = rb_entry(rb_first(root), struct ubi_wl_entry, rb); 358 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
418 max += e->ec; 359 max += e->ec;
419 360
420 p = root->rb_node; 361 p = root->rb_node;
421 while (p) { 362 while (p) {
422 struct ubi_wl_entry *e1; 363 struct ubi_wl_entry *e1;
423 364
424 e1 = rb_entry(p, struct ubi_wl_entry, rb); 365 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
425 if (e1->ec >= max) 366 if (e1->ec >= max)
426 p = p->rb_left; 367 p = p->rb_left;
427 else { 368 else {
@@ -443,17 +384,12 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
443 */ 384 */
444int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) 385int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
445{ 386{
446 int err, protect, medium_ec; 387 int err, medium_ec;
447 struct ubi_wl_entry *e, *first, *last; 388 struct ubi_wl_entry *e, *first, *last;
448 struct ubi_wl_prot_entry *pe;
449 389
450 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || 390 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
451 dtype == UBI_UNKNOWN); 391 dtype == UBI_UNKNOWN);
452 392
453 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
454 if (!pe)
455 return -ENOMEM;
456
457retry: 393retry:
458 spin_lock(&ubi->wl_lock); 394 spin_lock(&ubi->wl_lock);
459 if (!ubi->free.rb_node) { 395 if (!ubi->free.rb_node) {
@@ -461,16 +397,13 @@ retry:
461 ubi_assert(list_empty(&ubi->works)); 397 ubi_assert(list_empty(&ubi->works));
462 ubi_err("no free eraseblocks"); 398 ubi_err("no free eraseblocks");
463 spin_unlock(&ubi->wl_lock); 399 spin_unlock(&ubi->wl_lock);
464 kfree(pe);
465 return -ENOSPC; 400 return -ENOSPC;
466 } 401 }
467 spin_unlock(&ubi->wl_lock); 402 spin_unlock(&ubi->wl_lock);
468 403
469 err = produce_free_peb(ubi); 404 err = produce_free_peb(ubi);
470 if (err < 0) { 405 if (err < 0)
471 kfree(pe);
472 return err; 406 return err;
473 }
474 goto retry; 407 goto retry;
475 } 408 }
476 409
@@ -483,7 +416,6 @@ retry:
483 * %WL_FREE_MAX_DIFF. 416 * %WL_FREE_MAX_DIFF.
484 */ 417 */
485 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 418 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
486 protect = LT_PROTECTION;
487 break; 419 break;
488 case UBI_UNKNOWN: 420 case UBI_UNKNOWN:
489 /* 421 /*
@@ -492,81 +424,63 @@ retry:
492 * eraseblock with erase counter greater or equivalent than the 424 * eraseblock with erase counter greater or equivalent than the
493 * lowest erase counter plus %WL_FREE_MAX_DIFF. 425 * lowest erase counter plus %WL_FREE_MAX_DIFF.
494 */ 426 */
495 first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); 427 first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
496 last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, rb); 428 u.rb);
429 last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
497 430
498 if (last->ec - first->ec < WL_FREE_MAX_DIFF) 431 if (last->ec - first->ec < WL_FREE_MAX_DIFF)
499 e = rb_entry(ubi->free.rb_node, 432 e = rb_entry(ubi->free.rb_node,
500 struct ubi_wl_entry, rb); 433 struct ubi_wl_entry, u.rb);
501 else { 434 else {
502 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; 435 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
503 e = find_wl_entry(&ubi->free, medium_ec); 436 e = find_wl_entry(&ubi->free, medium_ec);
504 } 437 }
505 protect = U_PROTECTION;
506 break; 438 break;
507 case UBI_SHORTTERM: 439 case UBI_SHORTTERM:
508 /* 440 /*
509 * For short term data we pick a physical eraseblock with the 441 * For short term data we pick a physical eraseblock with the
510 * lowest erase counter as we expect it will be erased soon. 442 * lowest erase counter as we expect it will be erased soon.
511 */ 443 */
512 e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); 444 e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
513 protect = ST_PROTECTION;
514 break; 445 break;
515 default: 446 default:
516 protect = 0;
517 e = NULL;
518 BUG(); 447 BUG();
519 } 448 }
520 449
450 paranoid_check_in_wl_tree(e, &ubi->free);
451
521 /* 452 /*
522 * Move the physical eraseblock to the protection trees where it will 453 * Move the physical eraseblock to the protection queue where it will
523 * be protected from being moved for some time. 454 * be protected from being moved for some time.
524 */ 455 */
525 paranoid_check_in_wl_tree(e, &ubi->free); 456 rb_erase(&e->u.rb, &ubi->free);
526 rb_erase(&e->rb, &ubi->free); 457 dbg_wl("PEB %d EC %d", e->pnum, e->ec);
527 prot_tree_add(ubi, e, pe, protect); 458 prot_queue_add(ubi, e);
528
529 dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
530 spin_unlock(&ubi->wl_lock); 459 spin_unlock(&ubi->wl_lock);
531
532 return e->pnum; 460 return e->pnum;
533} 461}
534 462
535/** 463/**
536 * prot_tree_del - remove a physical eraseblock from the protection trees 464 * prot_queue_del - remove a physical eraseblock from the protection queue.
537 * @ubi: UBI device description object 465 * @ubi: UBI device description object
538 * @pnum: the physical eraseblock to remove 466 * @pnum: the physical eraseblock to remove
539 * 467 *
540 * This function returns PEB @pnum from the protection trees and returns zero 468 * This function deletes PEB @pnum from the protection queue and returns zero
541 * in case of success and %-ENODEV if the PEB was not found in the protection 469 * in case of success and %-ENODEV if the PEB was not found.
542 * trees.
543 */ 470 */
544static int prot_tree_del(struct ubi_device *ubi, int pnum) 471static int prot_queue_del(struct ubi_device *ubi, int pnum)
545{ 472{
546 struct rb_node *p; 473 struct ubi_wl_entry *e;
547 struct ubi_wl_prot_entry *pe = NULL;
548
549 p = ubi->prot.pnum.rb_node;
550 while (p) {
551
552 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
553
554 if (pnum == pe->e->pnum)
555 goto found;
556 474
557 if (pnum < pe->e->pnum) 475 e = ubi->lookuptbl[pnum];
558 p = p->rb_left; 476 if (!e)
559 else 477 return -ENODEV;
560 p = p->rb_right;
561 }
562 478
563 return -ENODEV; 479 if (paranoid_check_in_pq(ubi, e))
480 return -ENODEV;
564 481
565found: 482 list_del(&e->u.list);
566 ubi_assert(pe->e->pnum == pnum); 483 dbg_wl("deleted PEB %d from the protection queue", e->pnum);
567 rb_erase(&pe->rb_aec, &ubi->prot.aec);
568 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
569 kfree(pe);
570 return 0; 484 return 0;
571} 485}
572 486
@@ -632,47 +546,47 @@ out_free:
632} 546}
633 547
634/** 548/**
635 * check_protection_over - check if it is time to stop protecting some PEBs. 549 * serve_prot_queue - check if it is time to stop protecting PEBs.
636 * @ubi: UBI device description object 550 * @ubi: UBI device description object
637 * 551 *
638 * This function is called after each erase operation, when the absolute erase 552 * This function is called after each erase operation and removes PEBs from the
639 * counter is incremented, to check if some physical eraseblock have not to be 553 * tail of the protection queue. These PEBs have been protected for long enough
640 * protected any longer. These physical eraseblocks are moved from the 554 * and should be moved to the used tree.
641 * protection trees to the used tree.
642 */ 555 */
643static void check_protection_over(struct ubi_device *ubi) 556static void serve_prot_queue(struct ubi_device *ubi)
644{ 557{
645 struct ubi_wl_prot_entry *pe; 558 struct ubi_wl_entry *e, *tmp;
559 int count;
646 560
647 /* 561 /*
648 * There may be several protected physical eraseblock to remove, 562 * There may be several protected physical eraseblock to remove,
649 * process them all. 563 * process them all.
650 */ 564 */
651 while (1) { 565repeat:
652 spin_lock(&ubi->wl_lock); 566 count = 0;
653 if (!ubi->prot.aec.rb_node) { 567 spin_lock(&ubi->wl_lock);
654 spin_unlock(&ubi->wl_lock); 568 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
655 break; 569 dbg_wl("PEB %d EC %d protection over, move to used tree",
656 } 570 e->pnum, e->ec);
657
658 pe = rb_entry(rb_first(&ubi->prot.aec),
659 struct ubi_wl_prot_entry, rb_aec);
660 571
661 if (pe->abs_ec > ubi->abs_ec) { 572 list_del(&e->u.list);
573 wl_tree_add(e, &ubi->used);
574 if (count++ > 32) {
575 /*
576 * Let's be nice and avoid holding the spinlock for
577 * too long.
578 */
662 spin_unlock(&ubi->wl_lock); 579 spin_unlock(&ubi->wl_lock);
663 break; 580 cond_resched();
581 goto repeat;
664 } 582 }
665
666 dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu",
667 pe->e->pnum, ubi->abs_ec, pe->abs_ec);
668 rb_erase(&pe->rb_aec, &ubi->prot.aec);
669 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
670 wl_tree_add(pe->e, &ubi->used);
671 spin_unlock(&ubi->wl_lock);
672
673 kfree(pe);
674 cond_resched();
675 } 583 }
584
585 ubi->pq_head += 1;
586 if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
587 ubi->pq_head = 0;
588 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
589 spin_unlock(&ubi->wl_lock);
676} 590}
677 591
678/** 592/**
@@ -680,8 +594,8 @@ static void check_protection_over(struct ubi_device *ubi)
680 * @ubi: UBI device description object 594 * @ubi: UBI device description object
681 * @wrk: the work to schedule 595 * @wrk: the work to schedule
682 * 596 *
683 * This function enqueues a work defined by @wrk to the tail of the pending 597 * This function adds a work defined by @wrk to the tail of the pending works
684 * works list. 598 * list.
685 */ 599 */
686static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) 600static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
687{ 601{
@@ -739,13 +653,11 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
739static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, 653static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
740 int cancel) 654 int cancel)
741{ 655{
742 int err, put = 0, scrubbing = 0, protect = 0; 656 int err, scrubbing = 0, torture = 0;
743 struct ubi_wl_prot_entry *uninitialized_var(pe);
744 struct ubi_wl_entry *e1, *e2; 657 struct ubi_wl_entry *e1, *e2;
745 struct ubi_vid_hdr *vid_hdr; 658 struct ubi_vid_hdr *vid_hdr;
746 659
747 kfree(wrk); 660 kfree(wrk);
748
749 if (cancel) 661 if (cancel)
750 return 0; 662 return 0;
751 663
@@ -781,7 +693,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
781 * highly worn-out free physical eraseblock. If the erase 693 * highly worn-out free physical eraseblock. If the erase
782 * counters differ much enough, start wear-leveling. 694 * counters differ much enough, start wear-leveling.
783 */ 695 */
784 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); 696 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
785 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 697 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
786 698
787 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { 699 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
@@ -790,21 +702,21 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
790 goto out_cancel; 702 goto out_cancel;
791 } 703 }
792 paranoid_check_in_wl_tree(e1, &ubi->used); 704 paranoid_check_in_wl_tree(e1, &ubi->used);
793 rb_erase(&e1->rb, &ubi->used); 705 rb_erase(&e1->u.rb, &ubi->used);
794 dbg_wl("move PEB %d EC %d to PEB %d EC %d", 706 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
795 e1->pnum, e1->ec, e2->pnum, e2->ec); 707 e1->pnum, e1->ec, e2->pnum, e2->ec);
796 } else { 708 } else {
797 /* Perform scrubbing */ 709 /* Perform scrubbing */
798 scrubbing = 1; 710 scrubbing = 1;
799 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); 711 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
800 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 712 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
801 paranoid_check_in_wl_tree(e1, &ubi->scrub); 713 paranoid_check_in_wl_tree(e1, &ubi->scrub);
802 rb_erase(&e1->rb, &ubi->scrub); 714 rb_erase(&e1->u.rb, &ubi->scrub);
803 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); 715 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
804 } 716 }
805 717
806 paranoid_check_in_wl_tree(e2, &ubi->free); 718 paranoid_check_in_wl_tree(e2, &ubi->free);
807 rb_erase(&e2->rb, &ubi->free); 719 rb_erase(&e2->u.rb, &ubi->free);
808 ubi->move_from = e1; 720 ubi->move_from = e1;
809 ubi->move_to = e2; 721 ubi->move_to = e2;
810 spin_unlock(&ubi->wl_lock); 722 spin_unlock(&ubi->wl_lock);
@@ -844,46 +756,67 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
844 756
845 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); 757 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
846 if (err) { 758 if (err) {
847 759 if (err == -EAGAIN)
760 goto out_not_moved;
848 if (err < 0) 761 if (err < 0)
849 goto out_error; 762 goto out_error;
850 if (err == 1) 763 if (err == 2) {
764 /* Target PEB write error, torture it */
765 torture = 1;
851 goto out_not_moved; 766 goto out_not_moved;
767 }
852 768
853 /* 769 /*
854 * For some reason the LEB was not moved - it might be because 770 * The LEB has not been moved because the volume is being
855 * the volume is being deleted. We should prevent this PEB from 771 * deleted or the PEB has been put meanwhile. We should prevent
856 * being selected for wear-levelling movement for some "time", 772 * this PEB from being selected for wear-leveling movement
857 * so put it to the protection tree. 773 * again, so put it to the protection queue.
858 */ 774 */
859 775
860 dbg_wl("cancelled moving PEB %d", e1->pnum); 776 dbg_wl("canceled moving PEB %d", e1->pnum);
861 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); 777 ubi_assert(err == 1);
862 if (!pe) { 778
863 err = -ENOMEM; 779 ubi_free_vid_hdr(ubi, vid_hdr);
864 goto out_error; 780 vid_hdr = NULL;
865 } 781
782 spin_lock(&ubi->wl_lock);
783 prot_queue_add(ubi, e1);
784 ubi_assert(!ubi->move_to_put);
785 ubi->move_from = ubi->move_to = NULL;
786 ubi->wl_scheduled = 0;
787 spin_unlock(&ubi->wl_lock);
866 788
867 protect = 1; 789 e1 = NULL;
790 err = schedule_erase(ubi, e2, 0);
791 if (err)
792 goto out_error;
793 mutex_unlock(&ubi->move_mutex);
794 return 0;
868 } 795 }
869 796
797 /* The PEB has been successfully moved */
870 ubi_free_vid_hdr(ubi, vid_hdr); 798 ubi_free_vid_hdr(ubi, vid_hdr);
871 if (scrubbing && !protect) 799 vid_hdr = NULL;
800 if (scrubbing)
872 ubi_msg("scrubbed PEB %d, data moved to PEB %d", 801 ubi_msg("scrubbed PEB %d, data moved to PEB %d",
873 e1->pnum, e2->pnum); 802 e1->pnum, e2->pnum);
874 803
875 spin_lock(&ubi->wl_lock); 804 spin_lock(&ubi->wl_lock);
876 if (protect) 805 if (!ubi->move_to_put) {
877 prot_tree_add(ubi, e1, pe, protect);
878 if (!ubi->move_to_put)
879 wl_tree_add(e2, &ubi->used); 806 wl_tree_add(e2, &ubi->used);
880 else 807 e2 = NULL;
881 put = 1; 808 }
882 ubi->move_from = ubi->move_to = NULL; 809 ubi->move_from = ubi->move_to = NULL;
883 ubi->move_to_put = ubi->wl_scheduled = 0; 810 ubi->move_to_put = ubi->wl_scheduled = 0;
884 spin_unlock(&ubi->wl_lock); 811 spin_unlock(&ubi->wl_lock);
885 812
886 if (put) { 813 err = schedule_erase(ubi, e1, 0);
814 if (err) {
815 e1 = NULL;
816 goto out_error;
817 }
818
819 if (e2) {
887 /* 820 /*
888 * Well, the target PEB was put meanwhile, schedule it for 821 * Well, the target PEB was put meanwhile, schedule it for
889 * erasure. 822 * erasure.
@@ -894,13 +827,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
894 goto out_error; 827 goto out_error;
895 } 828 }
896 829
897 if (!protect) {
898 err = schedule_erase(ubi, e1, 0);
899 if (err)
900 goto out_error;
901 }
902
903
904 dbg_wl("done"); 830 dbg_wl("done");
905 mutex_unlock(&ubi->move_mutex); 831 mutex_unlock(&ubi->move_mutex);
906 return 0; 832 return 0;
@@ -908,20 +834,24 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
908 /* 834 /*
909 * For some reasons the LEB was not moved, might be an error, might be 835 * For some reasons the LEB was not moved, might be an error, might be
910 * something else. @e1 was not changed, so return it back. @e2 might 836 * something else. @e1 was not changed, so return it back. @e2 might
911 * be changed, schedule it for erasure. 837 * have been changed, schedule it for erasure.
912 */ 838 */
913out_not_moved: 839out_not_moved:
840 dbg_wl("canceled moving PEB %d", e1->pnum);
914 ubi_free_vid_hdr(ubi, vid_hdr); 841 ubi_free_vid_hdr(ubi, vid_hdr);
842 vid_hdr = NULL;
915 spin_lock(&ubi->wl_lock); 843 spin_lock(&ubi->wl_lock);
916 if (scrubbing) 844 if (scrubbing)
917 wl_tree_add(e1, &ubi->scrub); 845 wl_tree_add(e1, &ubi->scrub);
918 else 846 else
919 wl_tree_add(e1, &ubi->used); 847 wl_tree_add(e1, &ubi->used);
848 ubi_assert(!ubi->move_to_put);
920 ubi->move_from = ubi->move_to = NULL; 849 ubi->move_from = ubi->move_to = NULL;
921 ubi->move_to_put = ubi->wl_scheduled = 0; 850 ubi->wl_scheduled = 0;
922 spin_unlock(&ubi->wl_lock); 851 spin_unlock(&ubi->wl_lock);
923 852
924 err = schedule_erase(ubi, e2, 0); 853 e1 = NULL;
854 err = schedule_erase(ubi, e2, torture);
925 if (err) 855 if (err)
926 goto out_error; 856 goto out_error;
927 857
@@ -938,8 +868,10 @@ out_error:
938 ubi->move_to_put = ubi->wl_scheduled = 0; 868 ubi->move_to_put = ubi->wl_scheduled = 0;
939 spin_unlock(&ubi->wl_lock); 869 spin_unlock(&ubi->wl_lock);
940 870
941 kmem_cache_free(ubi_wl_entry_slab, e1); 871 if (e1)
942 kmem_cache_free(ubi_wl_entry_slab, e2); 872 kmem_cache_free(ubi_wl_entry_slab, e1);
873 if (e2)
874 kmem_cache_free(ubi_wl_entry_slab, e2);
943 ubi_ro_mode(ubi); 875 ubi_ro_mode(ubi);
944 876
945 mutex_unlock(&ubi->move_mutex); 877 mutex_unlock(&ubi->move_mutex);
@@ -985,10 +917,10 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
985 /* 917 /*
986 * We schedule wear-leveling only if the difference between the 918 * We schedule wear-leveling only if the difference between the
987 * lowest erase counter of used physical eraseblocks and a high 919 * lowest erase counter of used physical eraseblocks and a high
988 * erase counter of free physical eraseblocks is greater then 920 * erase counter of free physical eraseblocks is greater than
989 * %UBI_WL_THRESHOLD. 921 * %UBI_WL_THRESHOLD.
990 */ 922 */
991 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); 923 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
992 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 924 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
993 925
994 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) 926 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
@@ -1050,7 +982,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1050 kfree(wl_wrk); 982 kfree(wl_wrk);
1051 983
1052 spin_lock(&ubi->wl_lock); 984 spin_lock(&ubi->wl_lock);
1053 ubi->abs_ec += 1;
1054 wl_tree_add(e, &ubi->free); 985 wl_tree_add(e, &ubi->free);
1055 spin_unlock(&ubi->wl_lock); 986 spin_unlock(&ubi->wl_lock);
1056 987
@@ -1058,7 +989,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1058 * One more erase operation has happened, take care about 989 * One more erase operation has happened, take care about
1059 * protected physical eraseblocks. 990 * protected physical eraseblocks.
1060 */ 991 */
1061 check_protection_over(ubi); 992 serve_prot_queue(ubi);
1062 993
1063 /* And take care about wear-leveling */ 994 /* And take care about wear-leveling */
1064 err = ensure_wear_leveling(ubi); 995 err = ensure_wear_leveling(ubi);
@@ -1190,12 +1121,12 @@ retry:
1190 } else { 1121 } else {
1191 if (in_wl_tree(e, &ubi->used)) { 1122 if (in_wl_tree(e, &ubi->used)) {
1192 paranoid_check_in_wl_tree(e, &ubi->used); 1123 paranoid_check_in_wl_tree(e, &ubi->used);
1193 rb_erase(&e->rb, &ubi->used); 1124 rb_erase(&e->u.rb, &ubi->used);
1194 } else if (in_wl_tree(e, &ubi->scrub)) { 1125 } else if (in_wl_tree(e, &ubi->scrub)) {
1195 paranoid_check_in_wl_tree(e, &ubi->scrub); 1126 paranoid_check_in_wl_tree(e, &ubi->scrub);
1196 rb_erase(&e->rb, &ubi->scrub); 1127 rb_erase(&e->u.rb, &ubi->scrub);
1197 } else { 1128 } else {
1198 err = prot_tree_del(ubi, e->pnum); 1129 err = prot_queue_del(ubi, e->pnum);
1199 if (err) { 1130 if (err) {
1200 ubi_err("PEB %d not found", pnum); 1131 ubi_err("PEB %d not found", pnum);
1201 ubi_ro_mode(ubi); 1132 ubi_ro_mode(ubi);
@@ -1255,11 +1186,11 @@ retry:
1255 1186
1256 if (in_wl_tree(e, &ubi->used)) { 1187 if (in_wl_tree(e, &ubi->used)) {
1257 paranoid_check_in_wl_tree(e, &ubi->used); 1188 paranoid_check_in_wl_tree(e, &ubi->used);
1258 rb_erase(&e->rb, &ubi->used); 1189 rb_erase(&e->u.rb, &ubi->used);
1259 } else { 1190 } else {
1260 int err; 1191 int err;
1261 1192
1262 err = prot_tree_del(ubi, e->pnum); 1193 err = prot_queue_del(ubi, e->pnum);
1263 if (err) { 1194 if (err) {
1264 ubi_err("PEB %d not found", pnum); 1195 ubi_err("PEB %d not found", pnum);
1265 ubi_ro_mode(ubi); 1196 ubi_ro_mode(ubi);
@@ -1290,7 +1221,7 @@ int ubi_wl_flush(struct ubi_device *ubi)
1290 int err; 1221 int err;
1291 1222
1292 /* 1223 /*
1293 * Erase while the pending works queue is not empty, but not more then 1224 * Erase while the pending works queue is not empty, but not more than
1294 * the number of currently pending works. 1225 * the number of currently pending works.
1295 */ 1226 */
1296 dbg_wl("flush (%d pending works)", ubi->works_count); 1227 dbg_wl("flush (%d pending works)", ubi->works_count);
@@ -1308,7 +1239,7 @@ int ubi_wl_flush(struct ubi_device *ubi)
1308 up_write(&ubi->work_sem); 1239 up_write(&ubi->work_sem);
1309 1240
1310 /* 1241 /*
1311 * And in case last was the WL worker and it cancelled the LEB 1242 * And in case last was the WL worker and it canceled the LEB
1312 * movement, flush again. 1243 * movement, flush again.
1313 */ 1244 */
1314 while (ubi->works_count) { 1245 while (ubi->works_count) {
@@ -1337,11 +1268,11 @@ static void tree_destroy(struct rb_root *root)
1337 else if (rb->rb_right) 1268 else if (rb->rb_right)
1338 rb = rb->rb_right; 1269 rb = rb->rb_right;
1339 else { 1270 else {
1340 e = rb_entry(rb, struct ubi_wl_entry, rb); 1271 e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1341 1272
1342 rb = rb_parent(rb); 1273 rb = rb_parent(rb);
1343 if (rb) { 1274 if (rb) {
1344 if (rb->rb_left == &e->rb) 1275 if (rb->rb_left == &e->u.rb)
1345 rb->rb_left = NULL; 1276 rb->rb_left = NULL;
1346 else 1277 else
1347 rb->rb_right = NULL; 1278 rb->rb_right = NULL;
@@ -1436,15 +1367,13 @@ static void cancel_pending(struct ubi_device *ubi)
1436 */ 1367 */
1437int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) 1368int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1438{ 1369{
1439 int err; 1370 int err, i;
1440 struct rb_node *rb1, *rb2; 1371 struct rb_node *rb1, *rb2;
1441 struct ubi_scan_volume *sv; 1372 struct ubi_scan_volume *sv;
1442 struct ubi_scan_leb *seb, *tmp; 1373 struct ubi_scan_leb *seb, *tmp;
1443 struct ubi_wl_entry *e; 1374 struct ubi_wl_entry *e;
1444 1375
1445
1446 ubi->used = ubi->free = ubi->scrub = RB_ROOT; 1376 ubi->used = ubi->free = ubi->scrub = RB_ROOT;
1447 ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
1448 spin_lock_init(&ubi->wl_lock); 1377 spin_lock_init(&ubi->wl_lock);
1449 mutex_init(&ubi->move_mutex); 1378 mutex_init(&ubi->move_mutex);
1450 init_rwsem(&ubi->work_sem); 1379 init_rwsem(&ubi->work_sem);
@@ -1458,6 +1387,10 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1458 if (!ubi->lookuptbl) 1387 if (!ubi->lookuptbl)
1459 return err; 1388 return err;
1460 1389
1390 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1391 INIT_LIST_HEAD(&ubi->pq[i]);
1392 ubi->pq_head = 0;
1393
1461 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { 1394 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1462 cond_resched(); 1395 cond_resched();
1463 1396
@@ -1552,33 +1485,18 @@ out_free:
1552} 1485}
1553 1486
1554/** 1487/**
1555 * protection_trees_destroy - destroy the protection RB-trees. 1488 * protection_queue_destroy - destroy the protection queue.
1556 * @ubi: UBI device description object 1489 * @ubi: UBI device description object
1557 */ 1490 */
1558static void protection_trees_destroy(struct ubi_device *ubi) 1491static void protection_queue_destroy(struct ubi_device *ubi)
1559{ 1492{
1560 struct rb_node *rb; 1493 int i;
1561 struct ubi_wl_prot_entry *pe; 1494 struct ubi_wl_entry *e, *tmp;
1562
1563 rb = ubi->prot.aec.rb_node;
1564 while (rb) {
1565 if (rb->rb_left)
1566 rb = rb->rb_left;
1567 else if (rb->rb_right)
1568 rb = rb->rb_right;
1569 else {
1570 pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec);
1571
1572 rb = rb_parent(rb);
1573 if (rb) {
1574 if (rb->rb_left == &pe->rb_aec)
1575 rb->rb_left = NULL;
1576 else
1577 rb->rb_right = NULL;
1578 }
1579 1495
1580 kmem_cache_free(ubi_wl_entry_slab, pe->e); 1496 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1581 kfree(pe); 1497 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1498 list_del(&e->u.list);
1499 kmem_cache_free(ubi_wl_entry_slab, e);
1582 } 1500 }
1583 } 1501 }
1584} 1502}
@@ -1591,7 +1509,7 @@ void ubi_wl_close(struct ubi_device *ubi)
1591{ 1509{
1592 dbg_wl("close the WL sub-system"); 1510 dbg_wl("close the WL sub-system");
1593 cancel_pending(ubi); 1511 cancel_pending(ubi);
1594 protection_trees_destroy(ubi); 1512 protection_queue_destroy(ubi);
1595 tree_destroy(&ubi->used); 1513 tree_destroy(&ubi->used);
1596 tree_destroy(&ubi->free); 1514 tree_destroy(&ubi->free);
1597 tree_destroy(&ubi->scrub); 1515 tree_destroy(&ubi->scrub);
@@ -1661,4 +1579,27 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1661 return 1; 1579 return 1;
1662} 1580}
1663 1581
1582/**
1583 * paranoid_check_in_pq - check if wear-leveling entry is in the protection
1584 * queue.
1585 * @ubi: UBI device description object
1586 * @e: the wear-leveling entry to check
1587 *
1588 * This function returns zero if @e is in @ubi->pq and %1 if it is not.
1589 */
1590static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
1591{
1592 struct ubi_wl_entry *p;
1593 int i;
1594
1595 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1596 list_for_each_entry(p, &ubi->pq[i], u.list)
1597 if (p == e)
1598 return 0;
1599
1600 ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
1601 e->pnum, e->ec);
1602 ubi_dbg_dump_stack();
1603 return 1;
1604}
1664#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ 1605#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */