aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2009-01-05 04:50:33 -0500
committerDavid Woodhouse <David.Woodhouse@intel.com>2009-01-05 04:50:33 -0500
commit353816f43d1fb340ff2d9a911dd5d0799c09f6a5 (patch)
tree517290fd884d286fe2971137ac89f89e3567785a /drivers/mtd
parent160bbab3000dafccbe43688e48208cecf4deb879 (diff)
parentfe0bdec68b77020281dc814805edfe594ae89e0f (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: arch/arm/mach-pxa/corgi.c arch/arm/mach-pxa/poodle.c arch/arm/mach-pxa/spitz.c
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/maps/dc21285.c7
-rw-r--r--drivers/mtd/maps/ixp2000.c2
-rw-r--r--drivers/mtd/maps/ixp4xx.c2
-rw-r--r--drivers/mtd/nand/Kconfig2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c4
-rw-r--r--drivers/mtd/nand/s3c2410.c8
-rw-r--r--drivers/mtd/onenand/omap2.c10
-rw-r--r--drivers/mtd/ubi/build.c5
-rw-r--r--drivers/mtd/ubi/cdev.c3
-rw-r--r--drivers/mtd/ubi/debug.h10
-rw-r--r--drivers/mtd/ubi/eba.c51
-rw-r--r--drivers/mtd/ubi/io.c28
-rw-r--r--drivers/mtd/ubi/ubi.h45
-rw-r--r--drivers/mtd/ubi/wl.c489
14 files changed, 318 insertions, 348 deletions
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index 3aa018c092f8..42969fe051b2 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -32,16 +32,15 @@ static struct mtd_info *dc21285_mtd;
32 */ 32 */
33static void nw_en_write(void) 33static void nw_en_write(void)
34{ 34{
35 extern spinlock_t gpio_lock;
36 unsigned long flags; 35 unsigned long flags;
37 36
38 /* 37 /*
39 * we want to write a bit pattern XXX1 to Xilinx to enable 38 * we want to write a bit pattern XXX1 to Xilinx to enable
40 * the write gate, which will be open for about the next 2ms. 39 * the write gate, which will be open for about the next 2ms.
41 */ 40 */
42 spin_lock_irqsave(&gpio_lock, flags); 41 spin_lock_irqsave(&nw_gpio_lock, flags);
43 cpld_modify(1, 1); 42 nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
44 spin_unlock_irqrestore(&gpio_lock, flags); 43 spin_unlock_irqrestore(&nw_gpio_lock, flags);
45 44
46 /* 45 /*
47 * let the ISA bus to catch on... 46 * let the ISA bus to catch on...
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index d76880d91bdb..d4fb9a3ab4df 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -170,7 +170,7 @@ static int ixp2000_flash_probe(struct platform_device *dev)
170 err = -ENOMEM; 170 err = -ENOMEM;
171 goto Error; 171 goto Error;
172 } 172 }
173 memzero(info, sizeof(struct ixp2000_flash_info)); 173 memset(info, 0, sizeof(struct ixp2000_flash_info));
174 174
175 platform_set_drvdata(dev, info); 175 platform_set_drvdata(dev, info);
176 176
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 4d0be2f1503f..7214b876feba 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -201,7 +201,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev)
201 err = -ENOMEM; 201 err = -ENOMEM;
202 goto Error; 202 goto Error;
203 } 203 }
204 memzero(info, sizeof(struct ixp4xx_flash_info)); 204 memset(info, 0, sizeof(struct ixp4xx_flash_info));
205 205
206 platform_set_drvdata(dev, info); 206 platform_set_drvdata(dev, info);
207 207
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 1c2e9450d663..f8ae0400c49c 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -408,7 +408,7 @@ config MTD_NAND_FSL_UPM
408 408
409config MTD_NAND_MXC 409config MTD_NAND_MXC
410 tristate "MXC NAND support" 410 tristate "MXC NAND support"
411 depends on ARCH_MX2 411 depends on ARCH_MX2 || ARCH_MX3
412 help 412 help
413 This enables the driver for the NAND flash controller on the 413 This enables the driver for the NAND flash controller on the
414 MXC processors. 414 MXC processors.
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 15f0a26730ae..fc4144495610 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -20,8 +20,8 @@
20#include <linux/mtd/partitions.h> 20#include <linux/mtd/partitions.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/irq.h> 22#include <linux/irq.h>
23#include <asm/dma.h>
24 23
24#include <mach/dma.h>
25#include <mach/pxa-regs.h> 25#include <mach/pxa-regs.h>
26#include <mach/pxa3xx_nand.h> 26#include <mach/pxa3xx_nand.h>
27 27
@@ -1080,7 +1080,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1080 this = &info->nand_chip; 1080 this = &info->nand_chip;
1081 mtd->priv = info; 1081 mtd->priv = info;
1082 1082
1083 info->clk = clk_get(&pdev->dev, "NANDCLK"); 1083 info->clk = clk_get(&pdev->dev, NULL);
1084 if (IS_ERR(info->clk)) { 1084 if (IS_ERR(info->clk)) {
1085 dev_err(&pdev->dev, "failed to get nand clock\n"); 1085 dev_err(&pdev->dev, "failed to get nand clock\n");
1086 ret = PTR_ERR(info->clk); 1086 ret = PTR_ERR(info->clk);
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 556139ed1fdf..8e375d5fe231 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -45,8 +45,8 @@
45 45
46#include <asm/io.h> 46#include <asm/io.h>
47 47
48#include <asm/plat-s3c/regs-nand.h> 48#include <plat/regs-nand.h>
49#include <asm/plat-s3c/nand.h> 49#include <plat/nand.h>
50 50
51#ifdef CONFIG_MTD_NAND_S3C2410_HWECC 51#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
52static int hardware_ecc = 1; 52static int hardware_ecc = 1;
@@ -818,7 +818,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
818 goto exit_error; 818 goto exit_error;
819 } 819 }
820 820
821 memzero(info, sizeof(*info)); 821 memset(info, 0, sizeof(*info));
822 platform_set_drvdata(pdev, info); 822 platform_set_drvdata(pdev, info);
823 823
824 spin_lock_init(&info->controller.lock); 824 spin_lock_init(&info->controller.lock);
@@ -883,7 +883,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev,
883 goto exit_error; 883 goto exit_error;
884 } 884 }
885 885
886 memzero(info->mtds, size); 886 memset(info->mtds, 0, size);
887 887
888 /* initialise all possible chips */ 888 /* initialise all possible chips */
889 889
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 710edee790b5..96ecc1766fa8 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -149,7 +149,7 @@ static int omap2_onenand_wait(struct mtd_info *mtd, int state)
149 149
150 INIT_COMPLETION(c->irq_done); 150 INIT_COMPLETION(c->irq_done);
151 if (c->gpio_irq) { 151 if (c->gpio_irq) {
152 result = omap_get_gpio_datain(c->gpio_irq); 152 result = gpio_get_value(c->gpio_irq);
153 if (result == -1) { 153 if (result == -1) {
154 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS); 154 ctrl = read_reg(c, ONENAND_REG_CTRL_STATUS);
155 intr = read_reg(c, ONENAND_REG_INTERRUPT); 155 intr = read_reg(c, ONENAND_REG_INTERRUPT);
@@ -634,9 +634,9 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
634 "OneNAND\n", c->gpio_irq); 634 "OneNAND\n", c->gpio_irq);
635 goto err_iounmap; 635 goto err_iounmap;
636 } 636 }
637 omap_set_gpio_direction(c->gpio_irq, 1); 637 gpio_direction_input(c->gpio_irq);
638 638
639 if ((r = request_irq(OMAP_GPIO_IRQ(c->gpio_irq), 639 if ((r = request_irq(gpio_to_irq(c->gpio_irq),
640 omap2_onenand_interrupt, IRQF_TRIGGER_RISING, 640 omap2_onenand_interrupt, IRQF_TRIGGER_RISING,
641 pdev->dev.driver->name, c)) < 0) 641 pdev->dev.driver->name, c)) < 0)
642 goto err_release_gpio; 642 goto err_release_gpio;
@@ -723,7 +723,7 @@ err_release_dma:
723 if (c->dma_channel != -1) 723 if (c->dma_channel != -1)
724 omap_free_dma(c->dma_channel); 724 omap_free_dma(c->dma_channel);
725 if (c->gpio_irq) 725 if (c->gpio_irq)
726 free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c); 726 free_irq(gpio_to_irq(c->gpio_irq), c);
727err_release_gpio: 727err_release_gpio:
728 if (c->gpio_irq) 728 if (c->gpio_irq)
729 omap_free_gpio(c->gpio_irq); 729 omap_free_gpio(c->gpio_irq);
@@ -760,7 +760,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
760 omap2_onenand_shutdown(pdev); 760 omap2_onenand_shutdown(pdev);
761 platform_set_drvdata(pdev, NULL); 761 platform_set_drvdata(pdev, NULL);
762 if (c->gpio_irq) { 762 if (c->gpio_irq) {
763 free_irq(OMAP_GPIO_IRQ(c->gpio_irq), c); 763 free_irq(gpio_to_irq(c->gpio_irq), c);
764 omap_free_gpio(c->gpio_irq); 764 omap_free_gpio(c->gpio_irq);
765 } 765 }
766 iounmap(c->onenand.base); 766 iounmap(c->onenand.base);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 84a134ead7cc..9082768cc6c3 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -815,19 +815,20 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
815 if (err) 815 if (err)
816 goto out_free; 816 goto out_free;
817 817
818 err = -ENOMEM;
818 ubi->peb_buf1 = vmalloc(ubi->peb_size); 819 ubi->peb_buf1 = vmalloc(ubi->peb_size);
819 if (!ubi->peb_buf1) 820 if (!ubi->peb_buf1)
820 goto out_free; 821 goto out_free;
821 822
822 ubi->peb_buf2 = vmalloc(ubi->peb_size); 823 ubi->peb_buf2 = vmalloc(ubi->peb_size);
823 if (!ubi->peb_buf2) 824 if (!ubi->peb_buf2)
824 goto out_free; 825 goto out_free;
825 826
826#ifdef CONFIG_MTD_UBI_DEBUG 827#ifdef CONFIG_MTD_UBI_DEBUG
827 mutex_init(&ubi->dbg_buf_mutex); 828 mutex_init(&ubi->dbg_buf_mutex);
828 ubi->dbg_peb_buf = vmalloc(ubi->peb_size); 829 ubi->dbg_peb_buf = vmalloc(ubi->peb_size);
829 if (!ubi->dbg_peb_buf) 830 if (!ubi->dbg_peb_buf)
830 goto out_free; 831 goto out_free;
831#endif 832#endif
832 833
833 err = attach_by_scanning(ubi); 834 err = attach_by_scanning(ubi);
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index b30a0b83d7f1..98cf31ed0814 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -721,7 +721,8 @@ static int rename_volumes(struct ubi_device *ubi,
721 * It seems we need to remove volume with name @re->new_name, 721 * It seems we need to remove volume with name @re->new_name,
722 * if it exists. 722 * if it exists.
723 */ 723 */
724 desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, UBI_EXCLUSIVE); 724 desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name,
725 UBI_EXCLUSIVE);
725 if (IS_ERR(desc)) { 726 if (IS_ERR(desc)) {
726 err = PTR_ERR(desc); 727 err = PTR_ERR(desc);
727 if (err == -ENODEV) 728 if (err == -ENODEV)
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 78e914d23ece..13777e5beac9 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -27,11 +27,11 @@
27#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__) 27#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__)
28 28
29#define ubi_assert(expr) do { \ 29#define ubi_assert(expr) do { \
30 if (unlikely(!(expr))) { \ 30 if (unlikely(!(expr))) { \
31 printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \ 31 printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
32 __func__, __LINE__, current->pid); \ 32 __func__, __LINE__, current->pid); \
33 ubi_dbg_dump_stack(); \ 33 ubi_dbg_dump_stack(); \
34 } \ 34 } \
35} while (0) 35} while (0)
36 36
37#define dbg_msg(fmt, ...) \ 37#define dbg_msg(fmt, ...) \
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index d8966bae0e0b..048a606cebde 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -504,12 +504,9 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
504 if (!vid_hdr) 504 if (!vid_hdr)
505 return -ENOMEM; 505 return -ENOMEM;
506 506
507 mutex_lock(&ubi->buf_mutex);
508
509retry: 507retry:
510 new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN); 508 new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
511 if (new_pnum < 0) { 509 if (new_pnum < 0) {
512 mutex_unlock(&ubi->buf_mutex);
513 ubi_free_vid_hdr(ubi, vid_hdr); 510 ubi_free_vid_hdr(ubi, vid_hdr);
514 return new_pnum; 511 return new_pnum;
515 } 512 }
@@ -529,20 +526,23 @@ retry:
529 goto write_error; 526 goto write_error;
530 527
531 data_size = offset + len; 528 data_size = offset + len;
529 mutex_lock(&ubi->buf_mutex);
532 memset(ubi->peb_buf1 + offset, 0xFF, len); 530 memset(ubi->peb_buf1 + offset, 0xFF, len);
533 531
534 /* Read everything before the area where the write failure happened */ 532 /* Read everything before the area where the write failure happened */
535 if (offset > 0) { 533 if (offset > 0) {
536 err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset); 534 err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset);
537 if (err && err != UBI_IO_BITFLIPS) 535 if (err && err != UBI_IO_BITFLIPS)
538 goto out_put; 536 goto out_unlock;
539 } 537 }
540 538
541 memcpy(ubi->peb_buf1 + offset, buf, len); 539 memcpy(ubi->peb_buf1 + offset, buf, len);
542 540
543 err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size); 541 err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size);
544 if (err) 542 if (err) {
543 mutex_unlock(&ubi->buf_mutex);
545 goto write_error; 544 goto write_error;
545 }
546 546
547 mutex_unlock(&ubi->buf_mutex); 547 mutex_unlock(&ubi->buf_mutex);
548 ubi_free_vid_hdr(ubi, vid_hdr); 548 ubi_free_vid_hdr(ubi, vid_hdr);
@@ -553,8 +553,9 @@ retry:
553 ubi_msg("data was successfully recovered"); 553 ubi_msg("data was successfully recovered");
554 return 0; 554 return 0;
555 555
556out_put: 556out_unlock:
557 mutex_unlock(&ubi->buf_mutex); 557 mutex_unlock(&ubi->buf_mutex);
558out_put:
558 ubi_wl_put_peb(ubi, new_pnum, 1); 559 ubi_wl_put_peb(ubi, new_pnum, 1);
559 ubi_free_vid_hdr(ubi, vid_hdr); 560 ubi_free_vid_hdr(ubi, vid_hdr);
560 return err; 561 return err;
@@ -567,7 +568,6 @@ write_error:
567 ubi_warn("failed to write to PEB %d", new_pnum); 568 ubi_warn("failed to write to PEB %d", new_pnum);
568 ubi_wl_put_peb(ubi, new_pnum, 1); 569 ubi_wl_put_peb(ubi, new_pnum, 1);
569 if (++tries > UBI_IO_RETRIES) { 570 if (++tries > UBI_IO_RETRIES) {
570 mutex_unlock(&ubi->buf_mutex);
571 ubi_free_vid_hdr(ubi, vid_hdr); 571 ubi_free_vid_hdr(ubi, vid_hdr);
572 return err; 572 return err;
573 } 573 }
@@ -949,10 +949,14 @@ write_error:
949 * This function copies logical eraseblock from physical eraseblock @from to 949 * This function copies logical eraseblock from physical eraseblock @from to
950 * physical eraseblock @to. The @vid_hdr buffer may be changed by this 950 * physical eraseblock @to. The @vid_hdr buffer may be changed by this
951 * function. Returns: 951 * function. Returns:
952 * o %0 in case of success; 952 * o %0 in case of success;
953 * o %1 if the operation was canceled and should be tried later (e.g., 953 * o %1 if the operation was canceled because the volume is being deleted
954 * because a bit-flip was detected at the target PEB); 954 * or because the PEB was put meanwhile;
955 * o %2 if the volume is being deleted and this LEB should not be moved. 955 * o %2 if the operation was canceled because there was a write error to the
956 * target PEB;
957 * o %-EAGAIN if the operation was canceled because a bit-flip was detected
958 * in the target PEB;
959 * o a negative error code in case of failure.
956 */ 960 */
957int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 961int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
958 struct ubi_vid_hdr *vid_hdr) 962 struct ubi_vid_hdr *vid_hdr)
@@ -978,7 +982,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
978 /* 982 /*
979 * Note, we may race with volume deletion, which means that the volume 983 * Note, we may race with volume deletion, which means that the volume
980 * this logical eraseblock belongs to might be being deleted. Since the 984 * this logical eraseblock belongs to might be being deleted. Since the
981 * volume deletion unmaps all the volume's logical eraseblocks, it will 985 * volume deletion un-maps all the volume's logical eraseblocks, it will
982 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. 986 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
983 */ 987 */
984 vol = ubi->volumes[idx]; 988 vol = ubi->volumes[idx];
@@ -986,7 +990,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
986 /* No need to do further work, cancel */ 990 /* No need to do further work, cancel */
987 dbg_eba("volume %d is being removed, cancel", vol_id); 991 dbg_eba("volume %d is being removed, cancel", vol_id);
988 spin_unlock(&ubi->volumes_lock); 992 spin_unlock(&ubi->volumes_lock);
989 return 2; 993 return 1;
990 } 994 }
991 spin_unlock(&ubi->volumes_lock); 995 spin_unlock(&ubi->volumes_lock);
992 996
@@ -1023,7 +1027,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1023 1027
1024 /* 1028 /*
1025 * OK, now the LEB is locked and we can safely start moving it. Since 1029 * OK, now the LEB is locked and we can safely start moving it. Since
1026 * this function utilizes thie @ubi->peb1_buf buffer which is shared 1030 * this function utilizes the @ubi->peb1_buf buffer which is shared
1027 * with some other functions, so lock the buffer by taking the 1031 * with some other functions, so lock the buffer by taking the
1028 * @ubi->buf_mutex. 1032 * @ubi->buf_mutex.
1029 */ 1033 */
@@ -1068,8 +1072,11 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1068 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 1072 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
1069 1073
1070 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); 1074 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
1071 if (err) 1075 if (err) {
1076 if (err == -EIO)
1077 err = 2;
1072 goto out_unlock_buf; 1078 goto out_unlock_buf;
1079 }
1073 1080
1074 cond_resched(); 1081 cond_resched();
1075 1082
@@ -1079,14 +1086,17 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1079 if (err != UBI_IO_BITFLIPS) 1086 if (err != UBI_IO_BITFLIPS)
1080 ubi_warn("cannot read VID header back from PEB %d", to); 1087 ubi_warn("cannot read VID header back from PEB %d", to);
1081 else 1088 else
1082 err = 1; 1089 err = -EAGAIN;
1083 goto out_unlock_buf; 1090 goto out_unlock_buf;
1084 } 1091 }
1085 1092
1086 if (data_size > 0) { 1093 if (data_size > 0) {
1087 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); 1094 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
1088 if (err) 1095 if (err) {
1096 if (err == -EIO)
1097 err = 2;
1089 goto out_unlock_buf; 1098 goto out_unlock_buf;
1099 }
1090 1100
1091 cond_resched(); 1101 cond_resched();
1092 1102
@@ -1101,15 +1111,16 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1101 ubi_warn("cannot read data back from PEB %d", 1111 ubi_warn("cannot read data back from PEB %d",
1102 to); 1112 to);
1103 else 1113 else
1104 err = 1; 1114 err = -EAGAIN;
1105 goto out_unlock_buf; 1115 goto out_unlock_buf;
1106 } 1116 }
1107 1117
1108 cond_resched(); 1118 cond_resched();
1109 1119
1110 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) { 1120 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
1111 ubi_warn("read data back from PEB %d - it is different", 1121 ubi_warn("read data back from PEB %d and it is "
1112 to); 1122 "different", to);
1123 err = -EINVAL;
1113 goto out_unlock_buf; 1124 goto out_unlock_buf;
1114 } 1125 }
1115 } 1126 }
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 2fb64be44f1b..a74118c05745 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -637,8 +637,6 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
637 637
638 dbg_io("read EC header from PEB %d", pnum); 638 dbg_io("read EC header from PEB %d", pnum);
639 ubi_assert(pnum >= 0 && pnum < ubi->peb_count); 639 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
640 if (UBI_IO_DEBUG)
641 verbose = 1;
642 640
643 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); 641 err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
644 if (err) { 642 if (err) {
@@ -685,6 +683,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
685 if (verbose) 683 if (verbose)
686 ubi_warn("no EC header found at PEB %d, " 684 ubi_warn("no EC header found at PEB %d, "
687 "only 0xFF bytes", pnum); 685 "only 0xFF bytes", pnum);
686 else if (UBI_IO_DEBUG)
687 dbg_msg("no EC header found at PEB %d, "
688 "only 0xFF bytes", pnum);
688 return UBI_IO_PEB_EMPTY; 689 return UBI_IO_PEB_EMPTY;
689 } 690 }
690 691
@@ -696,7 +697,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
696 ubi_warn("bad magic number at PEB %d: %08x instead of " 697 ubi_warn("bad magic number at PEB %d: %08x instead of "
697 "%08x", pnum, magic, UBI_EC_HDR_MAGIC); 698 "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
698 ubi_dbg_dump_ec_hdr(ec_hdr); 699 ubi_dbg_dump_ec_hdr(ec_hdr);
699 } 700 } else if (UBI_IO_DEBUG)
701 dbg_msg("bad magic number at PEB %d: %08x instead of "
702 "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
700 return UBI_IO_BAD_EC_HDR; 703 return UBI_IO_BAD_EC_HDR;
701 } 704 }
702 705
@@ -708,7 +711,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
708 ubi_warn("bad EC header CRC at PEB %d, calculated " 711 ubi_warn("bad EC header CRC at PEB %d, calculated "
709 "%#08x, read %#08x", pnum, crc, hdr_crc); 712 "%#08x, read %#08x", pnum, crc, hdr_crc);
710 ubi_dbg_dump_ec_hdr(ec_hdr); 713 ubi_dbg_dump_ec_hdr(ec_hdr);
711 } 714 } else if (UBI_IO_DEBUG)
715 dbg_msg("bad EC header CRC at PEB %d, calculated "
716 "%#08x, read %#08x", pnum, crc, hdr_crc);
712 return UBI_IO_BAD_EC_HDR; 717 return UBI_IO_BAD_EC_HDR;
713 } 718 }
714 719
@@ -912,8 +917,6 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
912 917
913 dbg_io("read VID header from PEB %d", pnum); 918 dbg_io("read VID header from PEB %d", pnum);
914 ubi_assert(pnum >= 0 && pnum < ubi->peb_count); 919 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
915 if (UBI_IO_DEBUG)
916 verbose = 1;
917 920
918 p = (char *)vid_hdr - ubi->vid_hdr_shift; 921 p = (char *)vid_hdr - ubi->vid_hdr_shift;
919 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, 922 err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
@@ -960,6 +963,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
960 if (verbose) 963 if (verbose)
961 ubi_warn("no VID header found at PEB %d, " 964 ubi_warn("no VID header found at PEB %d, "
962 "only 0xFF bytes", pnum); 965 "only 0xFF bytes", pnum);
966 else if (UBI_IO_DEBUG)
967 dbg_msg("no VID header found at PEB %d, "
968 "only 0xFF bytes", pnum);
963 return UBI_IO_PEB_FREE; 969 return UBI_IO_PEB_FREE;
964 } 970 }
965 971
@@ -971,7 +977,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
971 ubi_warn("bad magic number at PEB %d: %08x instead of " 977 ubi_warn("bad magic number at PEB %d: %08x instead of "
972 "%08x", pnum, magic, UBI_VID_HDR_MAGIC); 978 "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
973 ubi_dbg_dump_vid_hdr(vid_hdr); 979 ubi_dbg_dump_vid_hdr(vid_hdr);
974 } 980 } else if (UBI_IO_DEBUG)
981 dbg_msg("bad magic number at PEB %d: %08x instead of "
982 "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
975 return UBI_IO_BAD_VID_HDR; 983 return UBI_IO_BAD_VID_HDR;
976 } 984 }
977 985
@@ -983,7 +991,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
983 ubi_warn("bad CRC at PEB %d, calculated %#08x, " 991 ubi_warn("bad CRC at PEB %d, calculated %#08x, "
984 "read %#08x", pnum, crc, hdr_crc); 992 "read %#08x", pnum, crc, hdr_crc);
985 ubi_dbg_dump_vid_hdr(vid_hdr); 993 ubi_dbg_dump_vid_hdr(vid_hdr);
986 } 994 } else if (UBI_IO_DEBUG)
995 dbg_msg("bad CRC at PEB %d, calculated %#08x, "
996 "read %#08x", pnum, crc, hdr_crc);
987 return UBI_IO_BAD_VID_HDR; 997 return UBI_IO_BAD_VID_HDR;
988 } 998 }
989 999
@@ -1024,7 +1034,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
1024 1034
1025 err = paranoid_check_peb_ec_hdr(ubi, pnum); 1035 err = paranoid_check_peb_ec_hdr(ubi, pnum);
1026 if (err) 1036 if (err)
1027 return err > 0 ? -EINVAL: err; 1037 return err > 0 ? -EINVAL : err;
1028 1038
1029 vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC); 1039 vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
1030 vid_hdr->version = UBI_VERSION; 1040 vid_hdr->version = UBI_VERSION;
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 1c3fa18c26a7..4a8ec485c91d 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -74,6 +74,13 @@
74#define UBI_IO_RETRIES 3 74#define UBI_IO_RETRIES 3
75 75
76/* 76/*
77 * Length of the protection queue. The length is effectively equivalent to the
78 * number of (global) erase cycles PEBs are protected from the wear-leveling
79 * worker.
80 */
81#define UBI_PROT_QUEUE_LEN 10
82
83/*
77 * Error codes returned by the I/O sub-system. 84 * Error codes returned by the I/O sub-system.
78 * 85 *
79 * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only 86 * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only
@@ -95,7 +102,8 @@ enum {
95 102
96/** 103/**
97 * struct ubi_wl_entry - wear-leveling entry. 104 * struct ubi_wl_entry - wear-leveling entry.
98 * @rb: link in the corresponding RB-tree 105 * @u.rb: link in the corresponding (free/used) RB-tree
106 * @u.list: link in the protection queue
99 * @ec: erase counter 107 * @ec: erase counter
100 * @pnum: physical eraseblock number 108 * @pnum: physical eraseblock number
101 * 109 *
@@ -104,7 +112,10 @@ enum {
104 * RB-trees. See WL sub-system for details. 112 * RB-trees. See WL sub-system for details.
105 */ 113 */
106struct ubi_wl_entry { 114struct ubi_wl_entry {
107 struct rb_node rb; 115 union {
116 struct rb_node rb;
117 struct list_head list;
118 } u;
108 int ec; 119 int ec;
109 int pnum; 120 int pnum;
110}; 121};
@@ -288,7 +299,7 @@ struct ubi_wl_entry;
288 * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling 299 * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling
289 * 300 *
290 * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end 301 * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end
291 * of UBI ititializetion 302 * of UBI initialization
292 * @vtbl_slots: how many slots are available in the volume table 303 * @vtbl_slots: how many slots are available in the volume table
293 * @vtbl_size: size of the volume table in bytes 304 * @vtbl_size: size of the volume table in bytes
294 * @vtbl: in-RAM volume table copy 305 * @vtbl: in-RAM volume table copy
@@ -306,18 +317,17 @@ struct ubi_wl_entry;
306 * @used: RB-tree of used physical eraseblocks 317 * @used: RB-tree of used physical eraseblocks
307 * @free: RB-tree of free physical eraseblocks 318 * @free: RB-tree of free physical eraseblocks
308 * @scrub: RB-tree of physical eraseblocks which need scrubbing 319 * @scrub: RB-tree of physical eraseblocks which need scrubbing
309 * @prot: protection trees 320 * @pq: protection queue (contain physical eraseblocks which are temporarily
310 * @prot.pnum: protection tree indexed by physical eraseblock numbers 321 * protected from the wear-leveling worker)
311 * @prot.aec: protection tree indexed by absolute erase counter value 322 * @pq_head: protection queue head
312 * @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from, 323 * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
313 * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works 324 * @move_to, @move_to_put @erase_pending, @wl_scheduled and @works
314 * fields 325 * fields
315 * @move_mutex: serializes eraseblock moves 326 * @move_mutex: serializes eraseblock moves
316 * @work_sem: sycnhronizes the WL worker with use tasks 327 * @work_sem: synchronizes the WL worker with use tasks
317 * @wl_scheduled: non-zero if the wear-leveling was scheduled 328 * @wl_scheduled: non-zero if the wear-leveling was scheduled
318 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any 329 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
319 * physical eraseblock 330 * physical eraseblock
320 * @abs_ec: absolute erase counter
321 * @move_from: physical eraseblock from where the data is being moved 331 * @move_from: physical eraseblock from where the data is being moved
322 * @move_to: physical eraseblock where the data is being moved to 332 * @move_to: physical eraseblock where the data is being moved to
323 * @move_to_put: if the "to" PEB was put 333 * @move_to_put: if the "to" PEB was put
@@ -351,11 +361,11 @@ struct ubi_wl_entry;
351 * 361 *
352 * @peb_buf1: a buffer of PEB size used for different purposes 362 * @peb_buf1: a buffer of PEB size used for different purposes
353 * @peb_buf2: another buffer of PEB size used for different purposes 363 * @peb_buf2: another buffer of PEB size used for different purposes
354 * @buf_mutex: proptects @peb_buf1 and @peb_buf2 364 * @buf_mutex: protects @peb_buf1 and @peb_buf2
355 * @ckvol_mutex: serializes static volume checking when opening 365 * @ckvol_mutex: serializes static volume checking when opening
356 * @mult_mutex: serializes operations on multiple volumes, like re-nameing 366 * @mult_mutex: serializes operations on multiple volumes, like re-naming
357 * @dbg_peb_buf: buffer of PEB size used for debugging 367 * @dbg_peb_buf: buffer of PEB size used for debugging
358 * @dbg_buf_mutex: proptects @dbg_peb_buf 368 * @dbg_buf_mutex: protects @dbg_peb_buf
359 */ 369 */
360struct ubi_device { 370struct ubi_device {
361 struct cdev cdev; 371 struct cdev cdev;
@@ -392,16 +402,13 @@ struct ubi_device {
392 struct rb_root used; 402 struct rb_root used;
393 struct rb_root free; 403 struct rb_root free;
394 struct rb_root scrub; 404 struct rb_root scrub;
395 struct { 405 struct list_head pq[UBI_PROT_QUEUE_LEN];
396 struct rb_root pnum; 406 int pq_head;
397 struct rb_root aec;
398 } prot;
399 spinlock_t wl_lock; 407 spinlock_t wl_lock;
400 struct mutex move_mutex; 408 struct mutex move_mutex;
401 struct rw_semaphore work_sem; 409 struct rw_semaphore work_sem;
402 int wl_scheduled; 410 int wl_scheduled;
403 struct ubi_wl_entry **lookuptbl; 411 struct ubi_wl_entry **lookuptbl;
404 unsigned long long abs_ec;
405 struct ubi_wl_entry *move_from; 412 struct ubi_wl_entry *move_from;
406 struct ubi_wl_entry *move_to; 413 struct ubi_wl_entry *move_to;
407 int move_to_put; 414 int move_to_put;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index dcb6dac1dc54..14901cb82c18 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -22,7 +22,7 @@
22 * UBI wear-leveling sub-system. 22 * UBI wear-leveling sub-system.
23 * 23 *
24 * This sub-system is responsible for wear-leveling. It works in terms of 24 * This sub-system is responsible for wear-leveling. It works in terms of
25 * physical* eraseblocks and erase counters and knows nothing about logical 25 * physical eraseblocks and erase counters and knows nothing about logical
26 * eraseblocks, volumes, etc. From this sub-system's perspective all physical 26 * eraseblocks, volumes, etc. From this sub-system's perspective all physical
27 * eraseblocks are of two types - used and free. Used physical eraseblocks are 27 * eraseblocks are of two types - used and free. Used physical eraseblocks are
28 * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical 28 * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
@@ -55,8 +55,39 @@
55 * 55 *
56 * As it was said, for the UBI sub-system all physical eraseblocks are either 56 * As it was said, for the UBI sub-system all physical eraseblocks are either
57 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while 57 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
58 * used eraseblocks are kept in a set of different RB-trees: @wl->used, 58 * used eraseblocks are kept in @wl->used or @wl->scrub RB-trees, or
59 * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub. 59 * (temporarily) in the @wl->pq queue.
60 *
61 * When the WL sub-system returns a physical eraseblock, the physical
62 * eraseblock is protected from being moved for some "time". For this reason,
63 * the physical eraseblock is not directly moved from the @wl->free tree to the
64 * @wl->used tree. There is a protection queue in between where this
65 * physical eraseblock is temporarily stored (@wl->pq).
66 *
67 * All this protection stuff is needed because:
68 * o we don't want to move physical eraseblocks just after we have given them
69 * to the user; instead, we first want to let users fill them up with data;
70 *
71 * o there is a chance that the user will put the physical eraseblock very
72 * soon, so it makes sense not to move it for some time, but wait; this is
73 * especially important in case of "short term" physical eraseblocks.
74 *
75 * Physical eraseblocks stay protected only for limited time. But the "time" is
76 * measured in erase cycles in this case. This is implemented with help of the
77 * protection queue. Eraseblocks are put to the tail of this queue when they
78 * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
79 * head of the queue on each erase operation (for any eraseblock). So the
80 * length of the queue defines how may (global) erase cycles PEBs are protected.
81 *
82 * To put it differently, each physical eraseblock has 2 main states: free and
83 * used. The former state corresponds to the @wl->free tree. The latter state
84 * is split up on several sub-states:
85 * o the WL movement is allowed (@wl->used tree);
86 * o the WL movement is temporarily prohibited (@wl->pq queue);
87 * o scrubbing is needed (@wl->scrub tree).
88 *
89 * Depending on the sub-state, wear-leveling entries of the used physical
90 * eraseblocks may be kept in one of those structures.
60 * 91 *
61 * Note, in this implementation, we keep a small in-RAM object for each physical 92 * Note, in this implementation, we keep a small in-RAM object for each physical
62 * eraseblock. This is surely not a scalable solution. But it appears to be good 93 * eraseblock. This is surely not a scalable solution. But it appears to be good
@@ -70,9 +101,6 @@
70 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we 101 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
71 * pick target PEB with an average EC if our PEB is not very "old". This is a 102 * pick target PEB with an average EC if our PEB is not very "old". This is a
72 * room for future re-works of the WL sub-system. 103 * room for future re-works of the WL sub-system.
73 *
74 * Note: the stuff with protection trees looks too complex and is difficult to
75 * understand. Should be fixed.
76 */ 104 */
77 105
78#include <linux/slab.h> 106#include <linux/slab.h>
@@ -85,14 +113,6 @@
85#define WL_RESERVED_PEBS 1 113#define WL_RESERVED_PEBS 1
86 114
87/* 115/*
88 * How many erase cycles are short term, unknown, and long term physical
89 * eraseblocks protected.
90 */
91#define ST_PROTECTION 16
92#define U_PROTECTION 10
93#define LT_PROTECTION 4
94
95/*
96 * Maximum difference between two erase counters. If this threshold is 116 * Maximum difference between two erase counters. If this threshold is
97 * exceeded, the WL sub-system starts moving data from used physical 117 * exceeded, the WL sub-system starts moving data from used physical
98 * eraseblocks with low erase counter to free physical eraseblocks with high 118 * eraseblocks with low erase counter to free physical eraseblocks with high
@@ -120,64 +140,9 @@
120#define WL_MAX_FAILURES 32 140#define WL_MAX_FAILURES 32
121 141
122/** 142/**
123 * struct ubi_wl_prot_entry - PEB protection entry.
124 * @rb_pnum: link in the @wl->prot.pnum RB-tree
125 * @rb_aec: link in the @wl->prot.aec RB-tree
126 * @abs_ec: the absolute erase counter value when the protection ends
127 * @e: the wear-leveling entry of the physical eraseblock under protection
128 *
129 * When the WL sub-system returns a physical eraseblock, the physical
130 * eraseblock is protected from being moved for some "time". For this reason,
131 * the physical eraseblock is not directly moved from the @wl->free tree to the
132 * @wl->used tree. There is one more tree in between where this physical
133 * eraseblock is temporarily stored (@wl->prot).
134 *
135 * All this protection stuff is needed because:
136 * o we don't want to move physical eraseblocks just after we have given them
137 * to the user; instead, we first want to let users fill them up with data;
138 *
139 * o there is a chance that the user will put the physical eraseblock very
140 * soon, so it makes sense not to move it for some time, but wait; this is
141 * especially important in case of "short term" physical eraseblocks.
142 *
143 * Physical eraseblocks stay protected only for limited time. But the "time" is
144 * measured in erase cycles in this case. This is implemented with help of the
145 * absolute erase counter (@wl->abs_ec). When it reaches certain value, the
146 * physical eraseblocks are moved from the protection trees (@wl->prot.*) to
147 * the @wl->used tree.
148 *
149 * Protected physical eraseblocks are searched by physical eraseblock number
150 * (when they are put) and by the absolute erase counter (to check if it is
151 * time to move them to the @wl->used tree). So there are actually 2 RB-trees
152 * storing the protected physical eraseblocks: @wl->prot.pnum and
153 * @wl->prot.aec. They are referred to as the "protection" trees. The
154 * first one is indexed by the physical eraseblock number. The second one is
155 * indexed by the absolute erase counter. Both trees store
156 * &struct ubi_wl_prot_entry objects.
157 *
158 * Each physical eraseblock has 2 main states: free and used. The former state
159 * corresponds to the @wl->free tree. The latter state is split up on several
160 * sub-states:
161 * o the WL movement is allowed (@wl->used tree);
162 * o the WL movement is temporarily prohibited (@wl->prot.pnum and
163 * @wl->prot.aec trees);
164 * o scrubbing is needed (@wl->scrub tree).
165 *
166 * Depending on the sub-state, wear-leveling entries of the used physical
167 * eraseblocks may be kept in one of those trees.
168 */
169struct ubi_wl_prot_entry {
170 struct rb_node rb_pnum;
171 struct rb_node rb_aec;
172 unsigned long long abs_ec;
173 struct ubi_wl_entry *e;
174};
175
176/**
177 * struct ubi_work - UBI work description data structure. 143 * struct ubi_work - UBI work description data structure.
178 * @list: a link in the list of pending works 144 * @list: a link in the list of pending works
179 * @func: worker function 145 * @func: worker function
180 * @priv: private data of the worker function
181 * @e: physical eraseblock to erase 146 * @e: physical eraseblock to erase
182 * @torture: if the physical eraseblock has to be tortured 147 * @torture: if the physical eraseblock has to be tortured
183 * 148 *
@@ -198,9 +163,11 @@ struct ubi_work {
198static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec); 163static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
199static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, 164static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
200 struct rb_root *root); 165 struct rb_root *root);
166static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e);
201#else 167#else
202#define paranoid_check_ec(ubi, pnum, ec) 0 168#define paranoid_check_ec(ubi, pnum, ec) 0
203#define paranoid_check_in_wl_tree(e, root) 169#define paranoid_check_in_wl_tree(e, root)
170#define paranoid_check_in_pq(ubi, e) 0
204#endif 171#endif
205 172
206/** 173/**
@@ -220,7 +187,7 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
220 struct ubi_wl_entry *e1; 187 struct ubi_wl_entry *e1;
221 188
222 parent = *p; 189 parent = *p;
223 e1 = rb_entry(parent, struct ubi_wl_entry, rb); 190 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
224 191
225 if (e->ec < e1->ec) 192 if (e->ec < e1->ec)
226 p = &(*p)->rb_left; 193 p = &(*p)->rb_left;
@@ -235,8 +202,8 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
235 } 202 }
236 } 203 }
237 204
238 rb_link_node(&e->rb, parent, p); 205 rb_link_node(&e->u.rb, parent, p);
239 rb_insert_color(&e->rb, root); 206 rb_insert_color(&e->u.rb, root);
240} 207}
241 208
242/** 209/**
@@ -331,7 +298,7 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
331 while (p) { 298 while (p) {
332 struct ubi_wl_entry *e1; 299 struct ubi_wl_entry *e1;
333 300
334 e1 = rb_entry(p, struct ubi_wl_entry, rb); 301 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
335 302
336 if (e->pnum == e1->pnum) { 303 if (e->pnum == e1->pnum) {
337 ubi_assert(e == e1); 304 ubi_assert(e == e1);
@@ -355,50 +322,24 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
355} 322}
356 323
357/** 324/**
358 * prot_tree_add - add physical eraseblock to protection trees. 325 * prot_queue_add - add physical eraseblock to the protection queue.
359 * @ubi: UBI device description object 326 * @ubi: UBI device description object
360 * @e: the physical eraseblock to add 327 * @e: the physical eraseblock to add
361 * @pe: protection entry object to use
362 * @abs_ec: absolute erase counter value when this physical eraseblock has
363 * to be removed from the protection trees.
364 * 328 *
365 * @wl->lock has to be locked. 329 * This function adds @e to the tail of the protection queue @ubi->pq, where
330 * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
331 * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
332 * be locked.
366 */ 333 */
367static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e, 334static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
368 struct ubi_wl_prot_entry *pe, int abs_ec)
369{ 335{
370 struct rb_node **p, *parent = NULL; 336 int pq_tail = ubi->pq_head - 1;
371 struct ubi_wl_prot_entry *pe1;
372
373 pe->e = e;
374 pe->abs_ec = ubi->abs_ec + abs_ec;
375
376 p = &ubi->prot.pnum.rb_node;
377 while (*p) {
378 parent = *p;
379 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum);
380
381 if (e->pnum < pe1->e->pnum)
382 p = &(*p)->rb_left;
383 else
384 p = &(*p)->rb_right;
385 }
386 rb_link_node(&pe->rb_pnum, parent, p);
387 rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum);
388
389 p = &ubi->prot.aec.rb_node;
390 parent = NULL;
391 while (*p) {
392 parent = *p;
393 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec);
394 337
395 if (pe->abs_ec < pe1->abs_ec) 338 if (pq_tail < 0)
396 p = &(*p)->rb_left; 339 pq_tail = UBI_PROT_QUEUE_LEN - 1;
397 else 340 ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
398 p = &(*p)->rb_right; 341 list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
399 } 342 dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
400 rb_link_node(&pe->rb_aec, parent, p);
401 rb_insert_color(&pe->rb_aec, &ubi->prot.aec);
402} 343}
403 344
404/** 345/**
@@ -414,14 +355,14 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
414 struct rb_node *p; 355 struct rb_node *p;
415 struct ubi_wl_entry *e; 356 struct ubi_wl_entry *e;
416 357
417 e = rb_entry(rb_first(root), struct ubi_wl_entry, rb); 358 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
418 max += e->ec; 359 max += e->ec;
419 360
420 p = root->rb_node; 361 p = root->rb_node;
421 while (p) { 362 while (p) {
422 struct ubi_wl_entry *e1; 363 struct ubi_wl_entry *e1;
423 364
424 e1 = rb_entry(p, struct ubi_wl_entry, rb); 365 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
425 if (e1->ec >= max) 366 if (e1->ec >= max)
426 p = p->rb_left; 367 p = p->rb_left;
427 else { 368 else {
@@ -443,17 +384,12 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
443 */ 384 */
444int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) 385int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
445{ 386{
446 int err, protect, medium_ec; 387 int err, medium_ec;
447 struct ubi_wl_entry *e, *first, *last; 388 struct ubi_wl_entry *e, *first, *last;
448 struct ubi_wl_prot_entry *pe;
449 389
450 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || 390 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
451 dtype == UBI_UNKNOWN); 391 dtype == UBI_UNKNOWN);
452 392
453 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
454 if (!pe)
455 return -ENOMEM;
456
457retry: 393retry:
458 spin_lock(&ubi->wl_lock); 394 spin_lock(&ubi->wl_lock);
459 if (!ubi->free.rb_node) { 395 if (!ubi->free.rb_node) {
@@ -461,16 +397,13 @@ retry:
461 ubi_assert(list_empty(&ubi->works)); 397 ubi_assert(list_empty(&ubi->works));
462 ubi_err("no free eraseblocks"); 398 ubi_err("no free eraseblocks");
463 spin_unlock(&ubi->wl_lock); 399 spin_unlock(&ubi->wl_lock);
464 kfree(pe);
465 return -ENOSPC; 400 return -ENOSPC;
466 } 401 }
467 spin_unlock(&ubi->wl_lock); 402 spin_unlock(&ubi->wl_lock);
468 403
469 err = produce_free_peb(ubi); 404 err = produce_free_peb(ubi);
470 if (err < 0) { 405 if (err < 0)
471 kfree(pe);
472 return err; 406 return err;
473 }
474 goto retry; 407 goto retry;
475 } 408 }
476 409
@@ -483,7 +416,6 @@ retry:
483 * %WL_FREE_MAX_DIFF. 416 * %WL_FREE_MAX_DIFF.
484 */ 417 */
485 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 418 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
486 protect = LT_PROTECTION;
487 break; 419 break;
488 case UBI_UNKNOWN: 420 case UBI_UNKNOWN:
489 /* 421 /*
@@ -492,81 +424,63 @@ retry:
492 * eraseblock with erase counter greater or equivalent than the 424 * eraseblock with erase counter greater or equivalent than the
493 * lowest erase counter plus %WL_FREE_MAX_DIFF. 425 * lowest erase counter plus %WL_FREE_MAX_DIFF.
494 */ 426 */
495 first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); 427 first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
496 last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, rb); 428 u.rb);
429 last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
497 430
498 if (last->ec - first->ec < WL_FREE_MAX_DIFF) 431 if (last->ec - first->ec < WL_FREE_MAX_DIFF)
499 e = rb_entry(ubi->free.rb_node, 432 e = rb_entry(ubi->free.rb_node,
500 struct ubi_wl_entry, rb); 433 struct ubi_wl_entry, u.rb);
501 else { 434 else {
502 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; 435 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
503 e = find_wl_entry(&ubi->free, medium_ec); 436 e = find_wl_entry(&ubi->free, medium_ec);
504 } 437 }
505 protect = U_PROTECTION;
506 break; 438 break;
507 case UBI_SHORTTERM: 439 case UBI_SHORTTERM:
508 /* 440 /*
509 * For short term data we pick a physical eraseblock with the 441 * For short term data we pick a physical eraseblock with the
510 * lowest erase counter as we expect it will be erased soon. 442 * lowest erase counter as we expect it will be erased soon.
511 */ 443 */
512 e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); 444 e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
513 protect = ST_PROTECTION;
514 break; 445 break;
515 default: 446 default:
516 protect = 0;
517 e = NULL;
518 BUG(); 447 BUG();
519 } 448 }
520 449
450 paranoid_check_in_wl_tree(e, &ubi->free);
451
521 /* 452 /*
522 * Move the physical eraseblock to the protection trees where it will 453 * Move the physical eraseblock to the protection queue where it will
523 * be protected from being moved for some time. 454 * be protected from being moved for some time.
524 */ 455 */
525 paranoid_check_in_wl_tree(e, &ubi->free); 456 rb_erase(&e->u.rb, &ubi->free);
526 rb_erase(&e->rb, &ubi->free); 457 dbg_wl("PEB %d EC %d", e->pnum, e->ec);
527 prot_tree_add(ubi, e, pe, protect); 458 prot_queue_add(ubi, e);
528
529 dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
530 spin_unlock(&ubi->wl_lock); 459 spin_unlock(&ubi->wl_lock);
531
532 return e->pnum; 460 return e->pnum;
533} 461}
534 462
535/** 463/**
536 * prot_tree_del - remove a physical eraseblock from the protection trees 464 * prot_queue_del - remove a physical eraseblock from the protection queue.
537 * @ubi: UBI device description object 465 * @ubi: UBI device description object
538 * @pnum: the physical eraseblock to remove 466 * @pnum: the physical eraseblock to remove
539 * 467 *
540 * This function returns PEB @pnum from the protection trees and returns zero 468 * This function deletes PEB @pnum from the protection queue and returns zero
541 * in case of success and %-ENODEV if the PEB was not found in the protection 469 * in case of success and %-ENODEV if the PEB was not found.
542 * trees.
543 */ 470 */
544static int prot_tree_del(struct ubi_device *ubi, int pnum) 471static int prot_queue_del(struct ubi_device *ubi, int pnum)
545{ 472{
546 struct rb_node *p; 473 struct ubi_wl_entry *e;
547 struct ubi_wl_prot_entry *pe = NULL;
548
549 p = ubi->prot.pnum.rb_node;
550 while (p) {
551
552 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
553
554 if (pnum == pe->e->pnum)
555 goto found;
556 474
557 if (pnum < pe->e->pnum) 475 e = ubi->lookuptbl[pnum];
558 p = p->rb_left; 476 if (!e)
559 else 477 return -ENODEV;
560 p = p->rb_right;
561 }
562 478
563 return -ENODEV; 479 if (paranoid_check_in_pq(ubi, e))
480 return -ENODEV;
564 481
565found: 482 list_del(&e->u.list);
566 ubi_assert(pe->e->pnum == pnum); 483 dbg_wl("deleted PEB %d from the protection queue", e->pnum);
567 rb_erase(&pe->rb_aec, &ubi->prot.aec);
568 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
569 kfree(pe);
570 return 0; 484 return 0;
571} 485}
572 486
@@ -632,47 +546,47 @@ out_free:
632} 546}
633 547
634/** 548/**
635 * check_protection_over - check if it is time to stop protecting some PEBs. 549 * serve_prot_queue - check if it is time to stop protecting PEBs.
636 * @ubi: UBI device description object 550 * @ubi: UBI device description object
637 * 551 *
638 * This function is called after each erase operation, when the absolute erase 552 * This function is called after each erase operation and removes PEBs from the
639 * counter is incremented, to check if some physical eraseblock have not to be 553 * tail of the protection queue. These PEBs have been protected for long enough
640 * protected any longer. These physical eraseblocks are moved from the 554 * and should be moved to the used tree.
641 * protection trees to the used tree.
642 */ 555 */
643static void check_protection_over(struct ubi_device *ubi) 556static void serve_prot_queue(struct ubi_device *ubi)
644{ 557{
645 struct ubi_wl_prot_entry *pe; 558 struct ubi_wl_entry *e, *tmp;
559 int count;
646 560
647 /* 561 /*
648 * There may be several protected physical eraseblock to remove, 562 * There may be several protected physical eraseblock to remove,
649 * process them all. 563 * process them all.
650 */ 564 */
651 while (1) { 565repeat:
652 spin_lock(&ubi->wl_lock); 566 count = 0;
653 if (!ubi->prot.aec.rb_node) { 567 spin_lock(&ubi->wl_lock);
654 spin_unlock(&ubi->wl_lock); 568 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
655 break; 569 dbg_wl("PEB %d EC %d protection over, move to used tree",
656 } 570 e->pnum, e->ec);
657
658 pe = rb_entry(rb_first(&ubi->prot.aec),
659 struct ubi_wl_prot_entry, rb_aec);
660 571
661 if (pe->abs_ec > ubi->abs_ec) { 572 list_del(&e->u.list);
573 wl_tree_add(e, &ubi->used);
574 if (count++ > 32) {
575 /*
576 * Let's be nice and avoid holding the spinlock for
577 * too long.
578 */
662 spin_unlock(&ubi->wl_lock); 579 spin_unlock(&ubi->wl_lock);
663 break; 580 cond_resched();
581 goto repeat;
664 } 582 }
665
666 dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu",
667 pe->e->pnum, ubi->abs_ec, pe->abs_ec);
668 rb_erase(&pe->rb_aec, &ubi->prot.aec);
669 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
670 wl_tree_add(pe->e, &ubi->used);
671 spin_unlock(&ubi->wl_lock);
672
673 kfree(pe);
674 cond_resched();
675 } 583 }
584
585 ubi->pq_head += 1;
586 if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
587 ubi->pq_head = 0;
588 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
589 spin_unlock(&ubi->wl_lock);
676} 590}
677 591
678/** 592/**
@@ -680,8 +594,8 @@ static void check_protection_over(struct ubi_device *ubi)
680 * @ubi: UBI device description object 594 * @ubi: UBI device description object
681 * @wrk: the work to schedule 595 * @wrk: the work to schedule
682 * 596 *
683 * This function enqueues a work defined by @wrk to the tail of the pending 597 * This function adds a work defined by @wrk to the tail of the pending works
684 * works list. 598 * list.
685 */ 599 */
686static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) 600static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
687{ 601{
@@ -739,13 +653,11 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
739static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, 653static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
740 int cancel) 654 int cancel)
741{ 655{
742 int err, put = 0, scrubbing = 0, protect = 0; 656 int err, scrubbing = 0, torture = 0;
743 struct ubi_wl_prot_entry *uninitialized_var(pe);
744 struct ubi_wl_entry *e1, *e2; 657 struct ubi_wl_entry *e1, *e2;
745 struct ubi_vid_hdr *vid_hdr; 658 struct ubi_vid_hdr *vid_hdr;
746 659
747 kfree(wrk); 660 kfree(wrk);
748
749 if (cancel) 661 if (cancel)
750 return 0; 662 return 0;
751 663
@@ -781,7 +693,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
781 * highly worn-out free physical eraseblock. If the erase 693 * highly worn-out free physical eraseblock. If the erase
782 * counters differ much enough, start wear-leveling. 694 * counters differ much enough, start wear-leveling.
783 */ 695 */
784 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); 696 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
785 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 697 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
786 698
787 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { 699 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
@@ -790,21 +702,21 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
790 goto out_cancel; 702 goto out_cancel;
791 } 703 }
792 paranoid_check_in_wl_tree(e1, &ubi->used); 704 paranoid_check_in_wl_tree(e1, &ubi->used);
793 rb_erase(&e1->rb, &ubi->used); 705 rb_erase(&e1->u.rb, &ubi->used);
794 dbg_wl("move PEB %d EC %d to PEB %d EC %d", 706 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
795 e1->pnum, e1->ec, e2->pnum, e2->ec); 707 e1->pnum, e1->ec, e2->pnum, e2->ec);
796 } else { 708 } else {
797 /* Perform scrubbing */ 709 /* Perform scrubbing */
798 scrubbing = 1; 710 scrubbing = 1;
799 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); 711 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
800 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 712 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
801 paranoid_check_in_wl_tree(e1, &ubi->scrub); 713 paranoid_check_in_wl_tree(e1, &ubi->scrub);
802 rb_erase(&e1->rb, &ubi->scrub); 714 rb_erase(&e1->u.rb, &ubi->scrub);
803 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); 715 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
804 } 716 }
805 717
806 paranoid_check_in_wl_tree(e2, &ubi->free); 718 paranoid_check_in_wl_tree(e2, &ubi->free);
807 rb_erase(&e2->rb, &ubi->free); 719 rb_erase(&e2->u.rb, &ubi->free);
808 ubi->move_from = e1; 720 ubi->move_from = e1;
809 ubi->move_to = e2; 721 ubi->move_to = e2;
810 spin_unlock(&ubi->wl_lock); 722 spin_unlock(&ubi->wl_lock);
@@ -844,46 +756,67 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
844 756
845 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); 757 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
846 if (err) { 758 if (err) {
847 759 if (err == -EAGAIN)
760 goto out_not_moved;
848 if (err < 0) 761 if (err < 0)
849 goto out_error; 762 goto out_error;
850 if (err == 1) 763 if (err == 2) {
764 /* Target PEB write error, torture it */
765 torture = 1;
851 goto out_not_moved; 766 goto out_not_moved;
767 }
852 768
853 /* 769 /*
854 * For some reason the LEB was not moved - it might be because 770 * The LEB has not been moved because the volume is being
855 * the volume is being deleted. We should prevent this PEB from 771 * deleted or the PEB has been put meanwhile. We should prevent
856 * being selected for wear-levelling movement for some "time", 772 * this PEB from being selected for wear-leveling movement
857 * so put it to the protection tree. 773 * again, so put it to the protection queue.
858 */ 774 */
859 775
860 dbg_wl("cancelled moving PEB %d", e1->pnum); 776 dbg_wl("canceled moving PEB %d", e1->pnum);
861 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); 777 ubi_assert(err == 1);
862 if (!pe) { 778
863 err = -ENOMEM; 779 ubi_free_vid_hdr(ubi, vid_hdr);
864 goto out_error; 780 vid_hdr = NULL;
865 } 781
782 spin_lock(&ubi->wl_lock);
783 prot_queue_add(ubi, e1);
784 ubi_assert(!ubi->move_to_put);
785 ubi->move_from = ubi->move_to = NULL;
786 ubi->wl_scheduled = 0;
787 spin_unlock(&ubi->wl_lock);
866 788
867 protect = 1; 789 e1 = NULL;
790 err = schedule_erase(ubi, e2, 0);
791 if (err)
792 goto out_error;
793 mutex_unlock(&ubi->move_mutex);
794 return 0;
868 } 795 }
869 796
797 /* The PEB has been successfully moved */
870 ubi_free_vid_hdr(ubi, vid_hdr); 798 ubi_free_vid_hdr(ubi, vid_hdr);
871 if (scrubbing && !protect) 799 vid_hdr = NULL;
800 if (scrubbing)
872 ubi_msg("scrubbed PEB %d, data moved to PEB %d", 801 ubi_msg("scrubbed PEB %d, data moved to PEB %d",
873 e1->pnum, e2->pnum); 802 e1->pnum, e2->pnum);
874 803
875 spin_lock(&ubi->wl_lock); 804 spin_lock(&ubi->wl_lock);
876 if (protect) 805 if (!ubi->move_to_put) {
877 prot_tree_add(ubi, e1, pe, protect);
878 if (!ubi->move_to_put)
879 wl_tree_add(e2, &ubi->used); 806 wl_tree_add(e2, &ubi->used);
880 else 807 e2 = NULL;
881 put = 1; 808 }
882 ubi->move_from = ubi->move_to = NULL; 809 ubi->move_from = ubi->move_to = NULL;
883 ubi->move_to_put = ubi->wl_scheduled = 0; 810 ubi->move_to_put = ubi->wl_scheduled = 0;
884 spin_unlock(&ubi->wl_lock); 811 spin_unlock(&ubi->wl_lock);
885 812
886 if (put) { 813 err = schedule_erase(ubi, e1, 0);
814 if (err) {
815 e1 = NULL;
816 goto out_error;
817 }
818
819 if (e2) {
887 /* 820 /*
888 * Well, the target PEB was put meanwhile, schedule it for 821 * Well, the target PEB was put meanwhile, schedule it for
889 * erasure. 822 * erasure.
@@ -894,13 +827,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
894 goto out_error; 827 goto out_error;
895 } 828 }
896 829
897 if (!protect) {
898 err = schedule_erase(ubi, e1, 0);
899 if (err)
900 goto out_error;
901 }
902
903
904 dbg_wl("done"); 830 dbg_wl("done");
905 mutex_unlock(&ubi->move_mutex); 831 mutex_unlock(&ubi->move_mutex);
906 return 0; 832 return 0;
@@ -908,20 +834,24 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
908 /* 834 /*
909 * For some reasons the LEB was not moved, might be an error, might be 835 * For some reasons the LEB was not moved, might be an error, might be
910 * something else. @e1 was not changed, so return it back. @e2 might 836 * something else. @e1 was not changed, so return it back. @e2 might
911 * be changed, schedule it for erasure. 837 * have been changed, schedule it for erasure.
912 */ 838 */
913out_not_moved: 839out_not_moved:
840 dbg_wl("canceled moving PEB %d", e1->pnum);
914 ubi_free_vid_hdr(ubi, vid_hdr); 841 ubi_free_vid_hdr(ubi, vid_hdr);
842 vid_hdr = NULL;
915 spin_lock(&ubi->wl_lock); 843 spin_lock(&ubi->wl_lock);
916 if (scrubbing) 844 if (scrubbing)
917 wl_tree_add(e1, &ubi->scrub); 845 wl_tree_add(e1, &ubi->scrub);
918 else 846 else
919 wl_tree_add(e1, &ubi->used); 847 wl_tree_add(e1, &ubi->used);
848 ubi_assert(!ubi->move_to_put);
920 ubi->move_from = ubi->move_to = NULL; 849 ubi->move_from = ubi->move_to = NULL;
921 ubi->move_to_put = ubi->wl_scheduled = 0; 850 ubi->wl_scheduled = 0;
922 spin_unlock(&ubi->wl_lock); 851 spin_unlock(&ubi->wl_lock);
923 852
924 err = schedule_erase(ubi, e2, 0); 853 e1 = NULL;
854 err = schedule_erase(ubi, e2, torture);
925 if (err) 855 if (err)
926 goto out_error; 856 goto out_error;
927 857
@@ -938,8 +868,10 @@ out_error:
938 ubi->move_to_put = ubi->wl_scheduled = 0; 868 ubi->move_to_put = ubi->wl_scheduled = 0;
939 spin_unlock(&ubi->wl_lock); 869 spin_unlock(&ubi->wl_lock);
940 870
941 kmem_cache_free(ubi_wl_entry_slab, e1); 871 if (e1)
942 kmem_cache_free(ubi_wl_entry_slab, e2); 872 kmem_cache_free(ubi_wl_entry_slab, e1);
873 if (e2)
874 kmem_cache_free(ubi_wl_entry_slab, e2);
943 ubi_ro_mode(ubi); 875 ubi_ro_mode(ubi);
944 876
945 mutex_unlock(&ubi->move_mutex); 877 mutex_unlock(&ubi->move_mutex);
@@ -988,7 +920,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
988 * erase counter of free physical eraseblocks is greater then 920 * erase counter of free physical eraseblocks is greater then
989 * %UBI_WL_THRESHOLD. 921 * %UBI_WL_THRESHOLD.
990 */ 922 */
991 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); 923 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
992 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 924 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
993 925
994 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) 926 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
@@ -1050,7 +982,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1050 kfree(wl_wrk); 982 kfree(wl_wrk);
1051 983
1052 spin_lock(&ubi->wl_lock); 984 spin_lock(&ubi->wl_lock);
1053 ubi->abs_ec += 1;
1054 wl_tree_add(e, &ubi->free); 985 wl_tree_add(e, &ubi->free);
1055 spin_unlock(&ubi->wl_lock); 986 spin_unlock(&ubi->wl_lock);
1056 987
@@ -1058,7 +989,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1058 * One more erase operation has happened, take care about 989 * One more erase operation has happened, take care about
1059 * protected physical eraseblocks. 990 * protected physical eraseblocks.
1060 */ 991 */
1061 check_protection_over(ubi); 992 serve_prot_queue(ubi);
1062 993
1063 /* And take care about wear-leveling */ 994 /* And take care about wear-leveling */
1064 err = ensure_wear_leveling(ubi); 995 err = ensure_wear_leveling(ubi);
@@ -1190,12 +1121,12 @@ retry:
1190 } else { 1121 } else {
1191 if (in_wl_tree(e, &ubi->used)) { 1122 if (in_wl_tree(e, &ubi->used)) {
1192 paranoid_check_in_wl_tree(e, &ubi->used); 1123 paranoid_check_in_wl_tree(e, &ubi->used);
1193 rb_erase(&e->rb, &ubi->used); 1124 rb_erase(&e->u.rb, &ubi->used);
1194 } else if (in_wl_tree(e, &ubi->scrub)) { 1125 } else if (in_wl_tree(e, &ubi->scrub)) {
1195 paranoid_check_in_wl_tree(e, &ubi->scrub); 1126 paranoid_check_in_wl_tree(e, &ubi->scrub);
1196 rb_erase(&e->rb, &ubi->scrub); 1127 rb_erase(&e->u.rb, &ubi->scrub);
1197 } else { 1128 } else {
1198 err = prot_tree_del(ubi, e->pnum); 1129 err = prot_queue_del(ubi, e->pnum);
1199 if (err) { 1130 if (err) {
1200 ubi_err("PEB %d not found", pnum); 1131 ubi_err("PEB %d not found", pnum);
1201 ubi_ro_mode(ubi); 1132 ubi_ro_mode(ubi);
@@ -1255,11 +1186,11 @@ retry:
1255 1186
1256 if (in_wl_tree(e, &ubi->used)) { 1187 if (in_wl_tree(e, &ubi->used)) {
1257 paranoid_check_in_wl_tree(e, &ubi->used); 1188 paranoid_check_in_wl_tree(e, &ubi->used);
1258 rb_erase(&e->rb, &ubi->used); 1189 rb_erase(&e->u.rb, &ubi->used);
1259 } else { 1190 } else {
1260 int err; 1191 int err;
1261 1192
1262 err = prot_tree_del(ubi, e->pnum); 1193 err = prot_queue_del(ubi, e->pnum);
1263 if (err) { 1194 if (err) {
1264 ubi_err("PEB %d not found", pnum); 1195 ubi_err("PEB %d not found", pnum);
1265 ubi_ro_mode(ubi); 1196 ubi_ro_mode(ubi);
@@ -1290,7 +1221,7 @@ int ubi_wl_flush(struct ubi_device *ubi)
1290 int err; 1221 int err;
1291 1222
1292 /* 1223 /*
1293 * Erase while the pending works queue is not empty, but not more then 1224 * Erase while the pending works queue is not empty, but not more than
1294 * the number of currently pending works. 1225 * the number of currently pending works.
1295 */ 1226 */
1296 dbg_wl("flush (%d pending works)", ubi->works_count); 1227 dbg_wl("flush (%d pending works)", ubi->works_count);
@@ -1308,7 +1239,7 @@ int ubi_wl_flush(struct ubi_device *ubi)
1308 up_write(&ubi->work_sem); 1239 up_write(&ubi->work_sem);
1309 1240
1310 /* 1241 /*
1311 * And in case last was the WL worker and it cancelled the LEB 1242 * And in case last was the WL worker and it canceled the LEB
1312 * movement, flush again. 1243 * movement, flush again.
1313 */ 1244 */
1314 while (ubi->works_count) { 1245 while (ubi->works_count) {
@@ -1337,11 +1268,11 @@ static void tree_destroy(struct rb_root *root)
1337 else if (rb->rb_right) 1268 else if (rb->rb_right)
1338 rb = rb->rb_right; 1269 rb = rb->rb_right;
1339 else { 1270 else {
1340 e = rb_entry(rb, struct ubi_wl_entry, rb); 1271 e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1341 1272
1342 rb = rb_parent(rb); 1273 rb = rb_parent(rb);
1343 if (rb) { 1274 if (rb) {
1344 if (rb->rb_left == &e->rb) 1275 if (rb->rb_left == &e->u.rb)
1345 rb->rb_left = NULL; 1276 rb->rb_left = NULL;
1346 else 1277 else
1347 rb->rb_right = NULL; 1278 rb->rb_right = NULL;
@@ -1436,15 +1367,13 @@ static void cancel_pending(struct ubi_device *ubi)
1436 */ 1367 */
1437int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) 1368int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1438{ 1369{
1439 int err; 1370 int err, i;
1440 struct rb_node *rb1, *rb2; 1371 struct rb_node *rb1, *rb2;
1441 struct ubi_scan_volume *sv; 1372 struct ubi_scan_volume *sv;
1442 struct ubi_scan_leb *seb, *tmp; 1373 struct ubi_scan_leb *seb, *tmp;
1443 struct ubi_wl_entry *e; 1374 struct ubi_wl_entry *e;
1444 1375
1445
1446 ubi->used = ubi->free = ubi->scrub = RB_ROOT; 1376 ubi->used = ubi->free = ubi->scrub = RB_ROOT;
1447 ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
1448 spin_lock_init(&ubi->wl_lock); 1377 spin_lock_init(&ubi->wl_lock);
1449 mutex_init(&ubi->move_mutex); 1378 mutex_init(&ubi->move_mutex);
1450 init_rwsem(&ubi->work_sem); 1379 init_rwsem(&ubi->work_sem);
@@ -1458,6 +1387,10 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1458 if (!ubi->lookuptbl) 1387 if (!ubi->lookuptbl)
1459 return err; 1388 return err;
1460 1389
1390 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1391 INIT_LIST_HEAD(&ubi->pq[i]);
1392 ubi->pq_head = 0;
1393
1461 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { 1394 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1462 cond_resched(); 1395 cond_resched();
1463 1396
@@ -1552,33 +1485,18 @@ out_free:
1552} 1485}
1553 1486
1554/** 1487/**
1555 * protection_trees_destroy - destroy the protection RB-trees. 1488 * protection_queue_destroy - destroy the protection queue.
1556 * @ubi: UBI device description object 1489 * @ubi: UBI device description object
1557 */ 1490 */
1558static void protection_trees_destroy(struct ubi_device *ubi) 1491static void protection_queue_destroy(struct ubi_device *ubi)
1559{ 1492{
1560 struct rb_node *rb; 1493 int i;
1561 struct ubi_wl_prot_entry *pe; 1494 struct ubi_wl_entry *e, *tmp;
1562
1563 rb = ubi->prot.aec.rb_node;
1564 while (rb) {
1565 if (rb->rb_left)
1566 rb = rb->rb_left;
1567 else if (rb->rb_right)
1568 rb = rb->rb_right;
1569 else {
1570 pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec);
1571
1572 rb = rb_parent(rb);
1573 if (rb) {
1574 if (rb->rb_left == &pe->rb_aec)
1575 rb->rb_left = NULL;
1576 else
1577 rb->rb_right = NULL;
1578 }
1579 1495
1580 kmem_cache_free(ubi_wl_entry_slab, pe->e); 1496 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1581 kfree(pe); 1497 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1498 list_del(&e->u.list);
1499 kmem_cache_free(ubi_wl_entry_slab, e);
1582 } 1500 }
1583 } 1501 }
1584} 1502}
@@ -1591,7 +1509,7 @@ void ubi_wl_close(struct ubi_device *ubi)
1591{ 1509{
1592 dbg_wl("close the WL sub-system"); 1510 dbg_wl("close the WL sub-system");
1593 cancel_pending(ubi); 1511 cancel_pending(ubi);
1594 protection_trees_destroy(ubi); 1512 protection_queue_destroy(ubi);
1595 tree_destroy(&ubi->used); 1513 tree_destroy(&ubi->used);
1596 tree_destroy(&ubi->free); 1514 tree_destroy(&ubi->free);
1597 tree_destroy(&ubi->scrub); 1515 tree_destroy(&ubi->scrub);
@@ -1661,4 +1579,27 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1661 return 1; 1579 return 1;
1662} 1580}
1663 1581
1582/**
1583 * paranoid_check_in_pq - check if wear-leveling entry is in the protection
1584 * queue.
1585 * @ubi: UBI device description object
1586 * @e: the wear-leveling entry to check
1587 *
1588 * This function returns zero if @e is in @ubi->pq and %1 if it is not.
1589 */
1590static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
1591{
1592 struct ubi_wl_entry *p;
1593 int i;
1594
1595 for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1596 list_for_each_entry(p, &ubi->pq[i], u.list)
1597 if (p == e)
1598 return 0;
1599
1600 ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
1601 e->pnum, e->ec);
1602 ubi_dbg_dump_stack();
1603 return 1;
1604}
1664#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ 1605#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */