aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2012-10-09 10:03:21 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2012-10-09 10:04:25 -0400
commitffe315012510165ce82e4dd4767f0a5dba9edbf7 (patch)
treef601cd980af9d0ced5ca9aedecef4fa0d2ca0e15 /drivers/mtd
parente2d3a35ee427aaba99b6c68a56609ce276c51270 (diff)
parent4a8e43feeac7996b8de2d5b2823e316917493df4 (diff)
Merge tag 'disintegrate-mtd-20121009' of git://git.infradead.org/users/dhowells/linux-headers
UAPI Disintegration 2012-10-09 Conflicts: MAINTAINERS arch/arm/configs/bcmring_defconfig arch/arm/mach-imx/clk-imx51-imx53.c drivers/mtd/nand/Kconfig drivers/mtd/nand/bcm_umi_nand.c drivers/mtd/nand/nand_bcm_umi.h drivers/mtd/nand/orion_nand.c
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/mtdchar.c50
-rw-r--r--drivers/mtd/mtdcore.c6
-rw-r--r--drivers/mtd/mtdoops.c4
-rw-r--r--drivers/mtd/mtdpart.c12
-rw-r--r--drivers/mtd/nand/ams-delta.c10
-rw-r--r--drivers/mtd/nand/davinci_nand.c4
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c56
-rw-r--r--drivers/mtd/nand/mxc_nand.c2
-rw-r--r--drivers/mtd/nand/nomadik_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c303
-rw-r--r--drivers/mtd/nand/orion_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c89
-rw-r--r--drivers/mtd/nand/s3c2410.c2
-rw-r--r--drivers/mtd/onenand/omap2.c34
-rw-r--r--drivers/mtd/ubi/Kconfig61
-rw-r--r--drivers/mtd/ubi/Makefile1
-rw-r--r--drivers/mtd/ubi/attach.c428
-rw-r--r--drivers/mtd/ubi/build.c274
-rw-r--r--drivers/mtd/ubi/cdev.c18
-rw-r--r--drivers/mtd/ubi/debug.c153
-rw-r--r--drivers/mtd/ubi/debug.h12
-rw-r--r--drivers/mtd/ubi/eba.c159
-rw-r--r--drivers/mtd/ubi/fastmap.c1537
-rw-r--r--drivers/mtd/ubi/gluebi.c30
-rw-r--r--drivers/mtd/ubi/io.c80
-rw-r--r--drivers/mtd/ubi/misc.c14
-rw-r--r--drivers/mtd/ubi/ubi-media.h137
-rw-r--r--drivers/mtd/ubi/ubi.h134
-rw-r--r--drivers/mtd/ubi/vtbl.c14
-rw-r--r--drivers/mtd/ubi/wl.c647
30 files changed, 3610 insertions, 665 deletions
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index f2f482bec573..73ae81a629f2 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1123,6 +1123,33 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
1123} 1123}
1124#endif 1124#endif
1125 1125
1126static inline unsigned long get_vm_size(struct vm_area_struct *vma)
1127{
1128 return vma->vm_end - vma->vm_start;
1129}
1130
1131static inline resource_size_t get_vm_offset(struct vm_area_struct *vma)
1132{
1133 return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT;
1134}
1135
1136/*
1137 * Set a new vm offset.
1138 *
1139 * Verify that the incoming offset really works as a page offset,
1140 * and that the offset and size fit in a resource_size_t.
1141 */
1142static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off)
1143{
1144 pgoff_t pgoff = off >> PAGE_SHIFT;
1145 if (off != (resource_size_t) pgoff << PAGE_SHIFT)
1146 return -EINVAL;
1147 if (off + get_vm_size(vma) - 1 < off)
1148 return -EINVAL;
1149 vma->vm_pgoff = pgoff;
1150 return 0;
1151}
1152
1126/* 1153/*
1127 * set up a mapping for shared memory segments 1154 * set up a mapping for shared memory segments
1128 */ 1155 */
@@ -1132,21 +1159,30 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
1132 struct mtd_file_info *mfi = file->private_data; 1159 struct mtd_file_info *mfi = file->private_data;
1133 struct mtd_info *mtd = mfi->mtd; 1160 struct mtd_info *mtd = mfi->mtd;
1134 struct map_info *map = mtd->priv; 1161 struct map_info *map = mtd->priv;
1135 unsigned long start; 1162 resource_size_t start, off;
1136 unsigned long off; 1163 unsigned long len, vma_len;
1137 u32 len;
1138 1164
1139 if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) { 1165 if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) {
1140 off = vma->vm_pgoff << PAGE_SHIFT; 1166 off = get_vm_offset(vma);
1141 start = map->phys; 1167 start = map->phys;
1142 len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); 1168 len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
1143 start &= PAGE_MASK; 1169 start &= PAGE_MASK;
1144 if ((vma->vm_end - vma->vm_start + off) > len) 1170 vma_len = get_vm_size(vma);
1171
1172 /* Overflow in off+len? */
1173 if (vma_len + off < off)
1174 return -EINVAL;
1175 /* Does it fit in the mapping? */
1176 if (vma_len + off > len)
1145 return -EINVAL; 1177 return -EINVAL;
1146 1178
1147 off += start; 1179 off += start;
1148 vma->vm_pgoff = off >> PAGE_SHIFT; 1180 /* Did that overflow? */
1149 vma->vm_flags |= VM_IO | VM_RESERVED; 1181 if (off < start)
1182 return -EINVAL;
1183 if (set_vm_offset(vma, off) < 0)
1184 return -EINVAL;
1185 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
1150 1186
1151#ifdef pgprot_noncached 1187#ifdef pgprot_noncached
1152 if (file->f_flags & O_DSYNC || off >= __pa(high_memory)) 1188 if (file->f_flags & O_DSYNC || off >= __pa(high_memory))
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index ec794a72975d..374c46dff7dd 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -1077,8 +1077,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
1077 * until the request succeeds or until the allocation size falls below 1077 * until the request succeeds or until the allocation size falls below
1078 * the system page size. This attempts to make sure it does not adversely 1078 * the system page size. This attempts to make sure it does not adversely
1079 * impact system performance, so when allocating more than one page, we 1079 * impact system performance, so when allocating more than one page, we
1080 * ask the memory allocator to avoid re-trying, swapping, writing back 1080 * ask the memory allocator to avoid re-trying.
1081 * or performing I/O.
1082 * 1081 *
1083 * Note, this function also makes sure that the allocated buffer is aligned to 1082 * Note, this function also makes sure that the allocated buffer is aligned to
1084 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value. 1083 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
@@ -1092,8 +1091,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
1092 */ 1091 */
1093void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size) 1092void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
1094{ 1093{
1095 gfp_t flags = __GFP_NOWARN | __GFP_WAIT | 1094 gfp_t flags = __GFP_NOWARN | __GFP_WAIT | __GFP_NORETRY;
1096 __GFP_NORETRY | __GFP_NO_KSWAPD;
1097 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE); 1095 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
1098 void *kbuf; 1096 void *kbuf;
1099 1097
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 788f00be8d07..f5b3f91fa1cc 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -385,8 +385,8 @@ static void mtdoops_notify_remove(struct mtd_info *mtd)
385 printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n"); 385 printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
386 386
387 cxt->mtd = NULL; 387 cxt->mtd = NULL;
388 flush_work_sync(&cxt->work_erase); 388 flush_work(&cxt->work_erase);
389 flush_work_sync(&cxt->work_write); 389 flush_work(&cxt->work_write);
390} 390}
391 391
392 392
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index f8c08ec65feb..70fa70a8318f 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -747,7 +747,7 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
747 return ret; 747 return ret;
748} 748}
749 749
750int mtd_is_partition(struct mtd_info *mtd) 750int mtd_is_partition(const struct mtd_info *mtd)
751{ 751{
752 struct mtd_part *part; 752 struct mtd_part *part;
753 int ispart = 0; 753 int ispart = 0;
@@ -763,3 +763,13 @@ int mtd_is_partition(struct mtd_info *mtd)
763 return ispart; 763 return ispart;
764} 764}
765EXPORT_SYMBOL_GPL(mtd_is_partition); 765EXPORT_SYMBOL_GPL(mtd_is_partition);
766
767/* Returns the size of the entire flash chip */
768uint64_t mtd_get_device_size(const struct mtd_info *mtd)
769{
770 if (!mtd_is_partition(mtd))
771 return mtd->size;
772
773 return PART(mtd)->master->size;
774}
775EXPORT_SYMBOL_GPL(mtd_get_device_size);
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 2d73f2393586..9e7723aa7acc 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -23,11 +23,15 @@
23#include <linux/mtd/mtd.h> 23#include <linux/mtd/mtd.h>
24#include <linux/mtd/nand.h> 24#include <linux/mtd/nand.h>
25#include <linux/mtd/partitions.h> 25#include <linux/mtd/partitions.h>
26#include <linux/gpio.h>
27#include <linux/platform_data/gpio-omap.h>
28
26#include <asm/io.h> 29#include <asm/io.h>
27#include <mach/hardware.h>
28#include <asm/sizes.h> 30#include <asm/sizes.h>
29#include <linux/gpio.h> 31
30#include <plat/board-ams-delta.h> 32#include <mach/board-ams-delta.h>
33
34#include <mach/hardware.h>
31 35
32/* 36/*
33 * MTD structure for E3 (Delta) 37 * MTD structure for E3 (Delta)
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index df1ab7dc3440..945047ad0952 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -35,8 +35,8 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/of_device.h> 36#include <linux/of_device.h>
37 37
38#include <mach/nand.h> 38#include <linux/platform_data/mtd-davinci.h>
39#include <mach/aemif.h> 39#include <linux/platform_data/mtd-davinci-aemif.h>
40 40
41/* 41/*
42 * This is a device driver for the NAND flash controller found on the 42 * This is a device driver for the NAND flash controller found on the
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 1be83dcc730a..3551a99076ba 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -31,6 +31,7 @@
31#include <linux/mtd/nand_ecc.h> 31#include <linux/mtd/nand_ecc.h>
32#include <asm/fsl_ifc.h> 32#include <asm/fsl_ifc.h>
33 33
34#define FSL_IFC_V1_1_0 0x01010000
34#define ERR_BYTE 0xFF /* Value returned for read 35#define ERR_BYTE 0xFF /* Value returned for read
35 bytes when read failed */ 36 bytes when read failed */
36#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait 37#define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait
@@ -735,13 +736,62 @@ static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
735 return 0; 736 return 0;
736} 737}
737 738
739static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
740{
741 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
742 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
743 uint32_t csor = 0, csor_8k = 0, csor_ext = 0;
744 uint32_t cs = priv->bank;
745
746 /* Save CSOR and CSOR_ext */
747 csor = in_be32(&ifc->csor_cs[cs].csor);
748 csor_ext = in_be32(&ifc->csor_cs[cs].csor_ext);
749
750 /* chage PageSize 8K and SpareSize 1K*/
751 csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
752 out_be32(&ifc->csor_cs[cs].csor, csor_8k);
753 out_be32(&ifc->csor_cs[cs].csor_ext, 0x0000400);
754
755 /* READID */
756 out_be32(&ifc->ifc_nand.nand_fir0,
757 (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
758 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
759 (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
760 out_be32(&ifc->ifc_nand.nand_fcr0,
761 NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT);
762 out_be32(&ifc->ifc_nand.row3, 0x0);
763
764 out_be32(&ifc->ifc_nand.nand_fbcr, 0x0);
765
766 /* Program ROW0/COL0 */
767 out_be32(&ifc->ifc_nand.row0, 0x0);
768 out_be32(&ifc->ifc_nand.col0, 0x0);
769
770 /* set the chip select for NAND Transaction */
771 out_be32(&ifc->ifc_nand.nand_csel, cs << IFC_NAND_CSEL_SHIFT);
772
773 /* start read seq */
774 out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT);
775
776 /* wait for command complete flag or timeout */
777 wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
778 IFC_TIMEOUT_MSECS * HZ/1000);
779
780 if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
781 printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
782
783 /* Restore CSOR and CSOR_ext */
784 out_be32(&ifc->csor_cs[cs].csor, csor);
785 out_be32(&ifc->csor_cs[cs].csor_ext, csor_ext);
786}
787
738static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) 788static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
739{ 789{
740 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 790 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
741 struct fsl_ifc_regs __iomem *ifc = ctrl->regs; 791 struct fsl_ifc_regs __iomem *ifc = ctrl->regs;
742 struct nand_chip *chip = &priv->chip; 792 struct nand_chip *chip = &priv->chip;
743 struct nand_ecclayout *layout; 793 struct nand_ecclayout *layout;
744 u32 csor; 794 u32 csor, ver;
745 795
746 /* Fill in fsl_ifc_mtd structure */ 796 /* Fill in fsl_ifc_mtd structure */
747 priv->mtd.priv = chip; 797 priv->mtd.priv = chip;
@@ -834,6 +884,10 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
834 chip->ecc.mode = NAND_ECC_SOFT; 884 chip->ecc.mode = NAND_ECC_SOFT;
835 } 885 }
836 886
887 ver = in_be32(&ifc->ifc_rev);
888 if (ver == FSL_IFC_V1_1_0)
889 fsl_ifc_sram_init(priv);
890
837 return 0; 891 return 0;
838} 892}
839 893
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 8ec7cc007dee..72e31d86030d 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -36,7 +36,7 @@
36#include <linux/of_mtd.h> 36#include <linux/of_mtd.h>
37 37
38#include <asm/mach/flash.h> 38#include <asm/mach/flash.h>
39#include <mach/mxc_nand.h> 39#include <linux/platform_data/mtd-mxc_nand.h>
40#include <mach/hardware.h> 40#include <mach/hardware.h>
41 41
42#define DRIVER_NAME "mxc_nand" 42#define DRIVER_NAME "mxc_nand"
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index a86aa812ca13..9ee0c4edfacf 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -31,7 +31,7 @@
31#include <linux/mtd/partitions.h> 31#include <linux/mtd/partitions.h>
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <mach/nand.h> 34#include <linux/platform_data/mtd-nomadik-nand.h>
35#include <mach/fsmc.h> 35#include <mach/fsmc.h>
36 36
37#include <mtd/mtd-abi.h> 37#include <mtd/mtd-abi.h>
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 9142005c3029..5b3138620646 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -29,7 +29,7 @@
29 29
30#include <plat/dma.h> 30#include <plat/dma.h>
31#include <plat/gpmc.h> 31#include <plat/gpmc.h>
32#include <plat/nand.h> 32#include <linux/platform_data/mtd-nand-omap2.h>
33 33
34#define DRIVER_NAME "omap2-nand" 34#define DRIVER_NAME "omap2-nand"
35#define OMAP_NAND_TIMEOUT_MS 5000 35#define OMAP_NAND_TIMEOUT_MS 5000
@@ -101,6 +101,16 @@
101#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0) 101#define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
102#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1) 102#define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
103 103
104#define PREFETCH_CONFIG1_CS_SHIFT 24
105#define ECC_CONFIG_CS_SHIFT 1
106#define CS_MASK 0x7
107#define ENABLE_PREFETCH (0x1 << 7)
108#define DMA_MPU_MODE_SHIFT 2
109#define ECCSIZE1_SHIFT 22
110#define ECC1RESULTSIZE 0x1
111#define ECCCLEAR 0x100
112#define ECC1 0x1
113
104/* oob info generated runtime depending on ecc algorithm and layout selected */ 114/* oob info generated runtime depending on ecc algorithm and layout selected */
105static struct nand_ecclayout omap_oobinfo; 115static struct nand_ecclayout omap_oobinfo;
106/* Define some generic bad / good block scan pattern which are used 116/* Define some generic bad / good block scan pattern which are used
@@ -124,15 +134,18 @@ struct omap_nand_info {
124 134
125 int gpmc_cs; 135 int gpmc_cs;
126 unsigned long phys_base; 136 unsigned long phys_base;
137 unsigned long mem_size;
127 struct completion comp; 138 struct completion comp;
128 struct dma_chan *dma; 139 struct dma_chan *dma;
129 int gpmc_irq; 140 int gpmc_irq_fifo;
141 int gpmc_irq_count;
130 enum { 142 enum {
131 OMAP_NAND_IO_READ = 0, /* read */ 143 OMAP_NAND_IO_READ = 0, /* read */
132 OMAP_NAND_IO_WRITE, /* write */ 144 OMAP_NAND_IO_WRITE, /* write */
133 } iomode; 145 } iomode;
134 u_char *buf; 146 u_char *buf;
135 int buf_len; 147 int buf_len;
148 struct gpmc_nand_regs reg;
136 149
137#ifdef CONFIG_MTD_NAND_OMAP_BCH 150#ifdef CONFIG_MTD_NAND_OMAP_BCH
138 struct bch_control *bch; 151 struct bch_control *bch;
@@ -141,6 +154,63 @@ struct omap_nand_info {
141}; 154};
142 155
143/** 156/**
157 * omap_prefetch_enable - configures and starts prefetch transfer
158 * @cs: cs (chip select) number
159 * @fifo_th: fifo threshold to be used for read/ write
160 * @dma_mode: dma mode enable (1) or disable (0)
161 * @u32_count: number of bytes to be transferred
162 * @is_write: prefetch read(0) or write post(1) mode
163 */
164static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
165 unsigned int u32_count, int is_write, struct omap_nand_info *info)
166{
167 u32 val;
168
169 if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
170 return -1;
171
172 if (readl(info->reg.gpmc_prefetch_control))
173 return -EBUSY;
174
175 /* Set the amount of bytes to be prefetched */
176 writel(u32_count, info->reg.gpmc_prefetch_config2);
177
178 /* Set dma/mpu mode, the prefetch read / post write and
179 * enable the engine. Set which cs is has requested for.
180 */
181 val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
182 PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
183 (dma_mode << DMA_MPU_MODE_SHIFT) | (0x1 & is_write));
184 writel(val, info->reg.gpmc_prefetch_config1);
185
186 /* Start the prefetch engine */
187 writel(0x1, info->reg.gpmc_prefetch_control);
188
189 return 0;
190}
191
192/**
193 * omap_prefetch_reset - disables and stops the prefetch engine
194 */
195static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
196{
197 u32 config1;
198
199 /* check if the same module/cs is trying to reset */
200 config1 = readl(info->reg.gpmc_prefetch_config1);
201 if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
202 return -EINVAL;
203
204 /* Stop the PFPW engine */
205 writel(0x0, info->reg.gpmc_prefetch_control);
206
207 /* Reset/disable the PFPW engine */
208 writel(0x0, info->reg.gpmc_prefetch_config1);
209
210 return 0;
211}
212
213/**
144 * omap_hwcontrol - hardware specific access to control-lines 214 * omap_hwcontrol - hardware specific access to control-lines
145 * @mtd: MTD device structure 215 * @mtd: MTD device structure
146 * @cmd: command to device 216 * @cmd: command to device
@@ -158,13 +228,13 @@ static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
158 228
159 if (cmd != NAND_CMD_NONE) { 229 if (cmd != NAND_CMD_NONE) {
160 if (ctrl & NAND_CLE) 230 if (ctrl & NAND_CLE)
161 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_COMMAND, cmd); 231 writeb(cmd, info->reg.gpmc_nand_command);
162 232
163 else if (ctrl & NAND_ALE) 233 else if (ctrl & NAND_ALE)
164 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_ADDRESS, cmd); 234 writeb(cmd, info->reg.gpmc_nand_address);
165 235
166 else /* NAND_NCE */ 236 else /* NAND_NCE */
167 gpmc_nand_write(info->gpmc_cs, GPMC_NAND_DATA, cmd); 237 writeb(cmd, info->reg.gpmc_nand_data);
168 } 238 }
169} 239}
170 240
@@ -198,7 +268,8 @@ static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
198 iowrite8(*p++, info->nand.IO_ADDR_W); 268 iowrite8(*p++, info->nand.IO_ADDR_W);
199 /* wait until buffer is available for write */ 269 /* wait until buffer is available for write */
200 do { 270 do {
201 status = gpmc_read_status(GPMC_STATUS_BUFFER); 271 status = readl(info->reg.gpmc_status) &
272 GPMC_STATUS_BUFF_EMPTY;
202 } while (!status); 273 } while (!status);
203 } 274 }
204} 275}
@@ -235,7 +306,8 @@ static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
235 iowrite16(*p++, info->nand.IO_ADDR_W); 306 iowrite16(*p++, info->nand.IO_ADDR_W);
236 /* wait until buffer is available for write */ 307 /* wait until buffer is available for write */
237 do { 308 do {
238 status = gpmc_read_status(GPMC_STATUS_BUFFER); 309 status = readl(info->reg.gpmc_status) &
310 GPMC_STATUS_BUFF_EMPTY;
239 } while (!status); 311 } while (!status);
240 } 312 }
241} 313}
@@ -265,8 +337,8 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
265 } 337 }
266 338
267 /* configure and start prefetch transfer */ 339 /* configure and start prefetch transfer */
268 ret = gpmc_prefetch_enable(info->gpmc_cs, 340 ret = omap_prefetch_enable(info->gpmc_cs,
269 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0); 341 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
270 if (ret) { 342 if (ret) {
271 /* PFPW engine is busy, use cpu copy method */ 343 /* PFPW engine is busy, use cpu copy method */
272 if (info->nand.options & NAND_BUSWIDTH_16) 344 if (info->nand.options & NAND_BUSWIDTH_16)
@@ -275,14 +347,15 @@ static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
275 omap_read_buf8(mtd, (u_char *)p, len); 347 omap_read_buf8(mtd, (u_char *)p, len);
276 } else { 348 } else {
277 do { 349 do {
278 r_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); 350 r_count = readl(info->reg.gpmc_prefetch_status);
351 r_count = GPMC_PREFETCH_STATUS_FIFO_CNT(r_count);
279 r_count = r_count >> 2; 352 r_count = r_count >> 2;
280 ioread32_rep(info->nand.IO_ADDR_R, p, r_count); 353 ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
281 p += r_count; 354 p += r_count;
282 len -= r_count << 2; 355 len -= r_count << 2;
283 } while (len); 356 } while (len);
284 /* disable and stop the PFPW engine */ 357 /* disable and stop the PFPW engine */
285 gpmc_prefetch_reset(info->gpmc_cs); 358 omap_prefetch_reset(info->gpmc_cs, info);
286 } 359 }
287} 360}
288 361
@@ -301,6 +374,7 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
301 int i = 0, ret = 0; 374 int i = 0, ret = 0;
302 u16 *p = (u16 *)buf; 375 u16 *p = (u16 *)buf;
303 unsigned long tim, limit; 376 unsigned long tim, limit;
377 u32 val;
304 378
305 /* take care of subpage writes */ 379 /* take care of subpage writes */
306 if (len % 2 != 0) { 380 if (len % 2 != 0) {
@@ -310,8 +384,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
310 } 384 }
311 385
312 /* configure and start prefetch transfer */ 386 /* configure and start prefetch transfer */
313 ret = gpmc_prefetch_enable(info->gpmc_cs, 387 ret = omap_prefetch_enable(info->gpmc_cs,
314 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1); 388 PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
315 if (ret) { 389 if (ret) {
316 /* PFPW engine is busy, use cpu copy method */ 390 /* PFPW engine is busy, use cpu copy method */
317 if (info->nand.options & NAND_BUSWIDTH_16) 391 if (info->nand.options & NAND_BUSWIDTH_16)
@@ -320,7 +394,8 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
320 omap_write_buf8(mtd, (u_char *)p, len); 394 omap_write_buf8(mtd, (u_char *)p, len);
321 } else { 395 } else {
322 while (len) { 396 while (len) {
323 w_count = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); 397 w_count = readl(info->reg.gpmc_prefetch_status);
398 w_count = GPMC_PREFETCH_STATUS_FIFO_CNT(w_count);
324 w_count = w_count >> 1; 399 w_count = w_count >> 1;
325 for (i = 0; (i < w_count) && len; i++, len -= 2) 400 for (i = 0; (i < w_count) && len; i++, len -= 2)
326 iowrite16(*p++, info->nand.IO_ADDR_W); 401 iowrite16(*p++, info->nand.IO_ADDR_W);
@@ -329,11 +404,14 @@ static void omap_write_buf_pref(struct mtd_info *mtd,
329 tim = 0; 404 tim = 0;
330 limit = (loops_per_jiffy * 405 limit = (loops_per_jiffy *
331 msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); 406 msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
332 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) 407 do {
333 cpu_relax(); 408 cpu_relax();
409 val = readl(info->reg.gpmc_prefetch_status);
410 val = GPMC_PREFETCH_STATUS_COUNT(val);
411 } while (val && (tim++ < limit));
334 412
335 /* disable and stop the PFPW engine */ 413 /* disable and stop the PFPW engine */
336 gpmc_prefetch_reset(info->gpmc_cs); 414 omap_prefetch_reset(info->gpmc_cs, info);
337 } 415 }
338} 416}
339 417
@@ -365,6 +443,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
365 unsigned long tim, limit; 443 unsigned long tim, limit;
366 unsigned n; 444 unsigned n;
367 int ret; 445 int ret;
446 u32 val;
368 447
369 if (addr >= high_memory) { 448 if (addr >= high_memory) {
370 struct page *p1; 449 struct page *p1;
@@ -396,9 +475,9 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
396 tx->callback_param = &info->comp; 475 tx->callback_param = &info->comp;
397 dmaengine_submit(tx); 476 dmaengine_submit(tx);
398 477
399 /* configure and start prefetch transfer */ 478 /* configure and start prefetch transfer */
400 ret = gpmc_prefetch_enable(info->gpmc_cs, 479 ret = omap_prefetch_enable(info->gpmc_cs,
401 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); 480 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
402 if (ret) 481 if (ret)
403 /* PFPW engine is busy, use cpu copy method */ 482 /* PFPW engine is busy, use cpu copy method */
404 goto out_copy_unmap; 483 goto out_copy_unmap;
@@ -410,11 +489,15 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
410 wait_for_completion(&info->comp); 489 wait_for_completion(&info->comp);
411 tim = 0; 490 tim = 0;
412 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); 491 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
413 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) 492
493 do {
414 cpu_relax(); 494 cpu_relax();
495 val = readl(info->reg.gpmc_prefetch_status);
496 val = GPMC_PREFETCH_STATUS_COUNT(val);
497 } while (val && (tim++ < limit));
415 498
416 /* disable and stop the PFPW engine */ 499 /* disable and stop the PFPW engine */
417 gpmc_prefetch_reset(info->gpmc_cs); 500 omap_prefetch_reset(info->gpmc_cs, info);
418 501
419 dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); 502 dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
420 return 0; 503 return 0;
@@ -471,13 +554,12 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev)
471{ 554{
472 struct omap_nand_info *info = (struct omap_nand_info *) dev; 555 struct omap_nand_info *info = (struct omap_nand_info *) dev;
473 u32 bytes; 556 u32 bytes;
474 u32 irq_stat;
475 557
476 irq_stat = gpmc_read_status(GPMC_GET_IRQ_STATUS); 558 bytes = readl(info->reg.gpmc_prefetch_status);
477 bytes = gpmc_read_status(GPMC_PREFETCH_FIFO_CNT); 559 bytes = GPMC_PREFETCH_STATUS_FIFO_CNT(bytes);
478 bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */ 560 bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
479 if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */ 561 if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
480 if (irq_stat & 0x2) 562 if (this_irq == info->gpmc_irq_count)
481 goto done; 563 goto done;
482 564
483 if (info->buf_len && (info->buf_len < bytes)) 565 if (info->buf_len && (info->buf_len < bytes))
@@ -494,20 +576,17 @@ static irqreturn_t omap_nand_irq(int this_irq, void *dev)
494 (u32 *)info->buf, bytes >> 2); 576 (u32 *)info->buf, bytes >> 2);
495 info->buf = info->buf + bytes; 577 info->buf = info->buf + bytes;
496 578
497 if (irq_stat & 0x2) 579 if (this_irq == info->gpmc_irq_count)
498 goto done; 580 goto done;
499 } 581 }
500 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat);
501 582
502 return IRQ_HANDLED; 583 return IRQ_HANDLED;
503 584
504done: 585done:
505 complete(&info->comp); 586 complete(&info->comp);
506 /* disable irq */
507 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 0);
508 587
509 /* clear status */ 588 disable_irq_nosync(info->gpmc_irq_fifo);
510 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, irq_stat); 589 disable_irq_nosync(info->gpmc_irq_count);
511 590
512 return IRQ_HANDLED; 591 return IRQ_HANDLED;
513} 592}
@@ -534,22 +613,22 @@ static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
534 init_completion(&info->comp); 613 init_completion(&info->comp);
535 614
536 /* configure and start prefetch transfer */ 615 /* configure and start prefetch transfer */
537 ret = gpmc_prefetch_enable(info->gpmc_cs, 616 ret = omap_prefetch_enable(info->gpmc_cs,
538 PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0); 617 PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
539 if (ret) 618 if (ret)
540 /* PFPW engine is busy, use cpu copy method */ 619 /* PFPW engine is busy, use cpu copy method */
541 goto out_copy; 620 goto out_copy;
542 621
543 info->buf_len = len; 622 info->buf_len = len;
544 /* enable irq */ 623
545 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 624 enable_irq(info->gpmc_irq_count);
546 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); 625 enable_irq(info->gpmc_irq_fifo);
547 626
548 /* waiting for read to complete */ 627 /* waiting for read to complete */
549 wait_for_completion(&info->comp); 628 wait_for_completion(&info->comp);
550 629
551 /* disable and stop the PFPW engine */ 630 /* disable and stop the PFPW engine */
552 gpmc_prefetch_reset(info->gpmc_cs); 631 omap_prefetch_reset(info->gpmc_cs, info);
553 return; 632 return;
554 633
555out_copy: 634out_copy:
@@ -572,6 +651,7 @@ static void omap_write_buf_irq_pref(struct mtd_info *mtd,
572 struct omap_nand_info, mtd); 651 struct omap_nand_info, mtd);
573 int ret = 0; 652 int ret = 0;
574 unsigned long tim, limit; 653 unsigned long tim, limit;
654 u32 val;
575 655
576 if (len <= mtd->oobsize) { 656 if (len <= mtd->oobsize) {
577 omap_write_buf_pref(mtd, buf, len); 657 omap_write_buf_pref(mtd, buf, len);
@@ -583,27 +663,31 @@ static void omap_write_buf_irq_pref(struct mtd_info *mtd,
583 init_completion(&info->comp); 663 init_completion(&info->comp);
584 664
585 /* configure and start prefetch transfer : size=24 */ 665 /* configure and start prefetch transfer : size=24 */
586 ret = gpmc_prefetch_enable(info->gpmc_cs, 666 ret = omap_prefetch_enable(info->gpmc_cs,
587 (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1); 667 (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
588 if (ret) 668 if (ret)
589 /* PFPW engine is busy, use cpu copy method */ 669 /* PFPW engine is busy, use cpu copy method */
590 goto out_copy; 670 goto out_copy;
591 671
592 info->buf_len = len; 672 info->buf_len = len;
593 /* enable irq */ 673
594 gpmc_cs_configure(info->gpmc_cs, GPMC_ENABLE_IRQ, 674 enable_irq(info->gpmc_irq_count);
595 (GPMC_IRQ_FIFOEVENTENABLE | GPMC_IRQ_COUNT_EVENT)); 675 enable_irq(info->gpmc_irq_fifo);
596 676
597 /* waiting for write to complete */ 677 /* waiting for write to complete */
598 wait_for_completion(&info->comp); 678 wait_for_completion(&info->comp);
679
599 /* wait for data to flushed-out before reset the prefetch */ 680 /* wait for data to flushed-out before reset the prefetch */
600 tim = 0; 681 tim = 0;
601 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS)); 682 limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
602 while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit)) 683 do {
684 val = readl(info->reg.gpmc_prefetch_status);
685 val = GPMC_PREFETCH_STATUS_COUNT(val);
603 cpu_relax(); 686 cpu_relax();
687 } while (val && (tim++ < limit));
604 688
605 /* disable and stop the PFPW engine */ 689 /* disable and stop the PFPW engine */
606 gpmc_prefetch_reset(info->gpmc_cs); 690 omap_prefetch_reset(info->gpmc_cs, info);
607 return; 691 return;
608 692
609out_copy: 693out_copy:
@@ -822,7 +906,20 @@ static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
822{ 906{
823 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 907 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
824 mtd); 908 mtd);
825 return gpmc_calculate_ecc(info->gpmc_cs, dat, ecc_code); 909 u32 val;
910
911 val = readl(info->reg.gpmc_ecc_config);
912 if (((val >> ECC_CONFIG_CS_SHIFT) & ~CS_MASK) != info->gpmc_cs)
913 return -EINVAL;
914
915 /* read ecc result */
916 val = readl(info->reg.gpmc_ecc1_result);
917 *ecc_code++ = val; /* P128e, ..., P1e */
918 *ecc_code++ = val >> 16; /* P128o, ..., P1o */
919 /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
920 *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
921
922 return 0;
826} 923}
827 924
828/** 925/**
@@ -836,8 +933,34 @@ static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
836 mtd); 933 mtd);
837 struct nand_chip *chip = mtd->priv; 934 struct nand_chip *chip = mtd->priv;
838 unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0; 935 unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
936 u32 val;
937
938 /* clear ecc and enable bits */
939 val = ECCCLEAR | ECC1;
940 writel(val, info->reg.gpmc_ecc_control);
839 941
840 gpmc_enable_hwecc(info->gpmc_cs, mode, dev_width, info->nand.ecc.size); 942 /* program ecc and result sizes */
943 val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
944 ECC1RESULTSIZE);
945 writel(val, info->reg.gpmc_ecc_size_config);
946
947 switch (mode) {
948 case NAND_ECC_READ:
949 case NAND_ECC_WRITE:
950 writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
951 break;
952 case NAND_ECC_READSYN:
953 writel(ECCCLEAR, info->reg.gpmc_ecc_control);
954 break;
955 default:
956 dev_info(&info->pdev->dev,
957 "error: unrecognized Mode[%d]!\n", mode);
958 break;
959 }
960
961 /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
962 val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
963 writel(val, info->reg.gpmc_ecc_config);
841} 964}
842 965
843/** 966/**
@@ -865,10 +988,9 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
865 else 988 else
866 timeo += (HZ * 20) / 1000; 989 timeo += (HZ * 20) / 1000;
867 990
868 gpmc_nand_write(info->gpmc_cs, 991 writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
869 GPMC_NAND_COMMAND, (NAND_CMD_STATUS & 0xFF));
870 while (time_before(jiffies, timeo)) { 992 while (time_before(jiffies, timeo)) {
871 status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA); 993 status = readb(info->reg.gpmc_nand_data);
872 if (status & NAND_STATUS_READY) 994 if (status & NAND_STATUS_READY)
873 break; 995 break;
874 cond_resched(); 996 cond_resched();
@@ -888,22 +1010,13 @@ static int omap_dev_ready(struct mtd_info *mtd)
888 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1010 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
889 mtd); 1011 mtd);
890 1012
891 val = gpmc_read_status(GPMC_GET_IRQ_STATUS); 1013 val = readl(info->reg.gpmc_status);
1014
892 if ((val & 0x100) == 0x100) { 1015 if ((val & 0x100) == 0x100) {
893 /* Clear IRQ Interrupt */ 1016 return 1;
894 val |= 0x100;
895 val &= ~(0x0);
896 gpmc_cs_configure(info->gpmc_cs, GPMC_SET_IRQ_STATUS, val);
897 } else { 1017 } else {
898 unsigned int cnt = 0; 1018 return 0;
899 while (cnt++ < 0x1FF) {
900 if ((val & 0x100) == 0x100)
901 return 0;
902 val = gpmc_read_status(GPMC_GET_IRQ_STATUS);
903 }
904 } 1019 }
905
906 return 1;
907} 1020}
908 1021
909#ifdef CONFIG_MTD_NAND_OMAP_BCH 1022#ifdef CONFIG_MTD_NAND_OMAP_BCH
@@ -1134,6 +1247,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1134 int i, offset; 1247 int i, offset;
1135 dma_cap_mask_t mask; 1248 dma_cap_mask_t mask;
1136 unsigned sig; 1249 unsigned sig;
1250 struct resource *res;
1137 1251
1138 pdata = pdev->dev.platform_data; 1252 pdata = pdev->dev.platform_data;
1139 if (pdata == NULL) { 1253 if (pdata == NULL) {
@@ -1153,7 +1267,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1153 info->pdev = pdev; 1267 info->pdev = pdev;
1154 1268
1155 info->gpmc_cs = pdata->cs; 1269 info->gpmc_cs = pdata->cs;
1156 info->phys_base = pdata->phys_base; 1270 info->reg = pdata->reg;
1157 1271
1158 info->mtd.priv = &info->nand; 1272 info->mtd.priv = &info->nand;
1159 info->mtd.name = dev_name(&pdev->dev); 1273 info->mtd.name = dev_name(&pdev->dev);
@@ -1162,16 +1276,23 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1162 info->nand.options = pdata->devsize; 1276 info->nand.options = pdata->devsize;
1163 info->nand.options |= NAND_SKIP_BBTSCAN; 1277 info->nand.options |= NAND_SKIP_BBTSCAN;
1164 1278
1165 /* NAND write protect off */ 1279 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1166 gpmc_cs_configure(info->gpmc_cs, GPMC_CONFIG_WP, 0); 1280 if (res == NULL) {
1281 err = -EINVAL;
1282 dev_err(&pdev->dev, "error getting memory resource\n");
1283 goto out_free_info;
1284 }
1167 1285
1168 if (!request_mem_region(info->phys_base, NAND_IO_SIZE, 1286 info->phys_base = res->start;
1287 info->mem_size = resource_size(res);
1288
1289 if (!request_mem_region(info->phys_base, info->mem_size,
1169 pdev->dev.driver->name)) { 1290 pdev->dev.driver->name)) {
1170 err = -EBUSY; 1291 err = -EBUSY;
1171 goto out_free_info; 1292 goto out_free_info;
1172 } 1293 }
1173 1294
1174 info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE); 1295 info->nand.IO_ADDR_R = ioremap(info->phys_base, info->mem_size);
1175 if (!info->nand.IO_ADDR_R) { 1296 if (!info->nand.IO_ADDR_R) {
1176 err = -ENOMEM; 1297 err = -ENOMEM;
1177 goto out_release_mem_region; 1298 goto out_release_mem_region;
@@ -1244,17 +1365,39 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1244 break; 1365 break;
1245 1366
1246 case NAND_OMAP_PREFETCH_IRQ: 1367 case NAND_OMAP_PREFETCH_IRQ:
1247 err = request_irq(pdata->gpmc_irq, 1368 info->gpmc_irq_fifo = platform_get_irq(pdev, 0);
1248 omap_nand_irq, IRQF_SHARED, "gpmc-nand", info); 1369 if (info->gpmc_irq_fifo <= 0) {
1370 dev_err(&pdev->dev, "error getting fifo irq\n");
1371 err = -ENODEV;
1372 goto out_release_mem_region;
1373 }
1374 err = request_irq(info->gpmc_irq_fifo, omap_nand_irq,
1375 IRQF_SHARED, "gpmc-nand-fifo", info);
1249 if (err) { 1376 if (err) {
1250 dev_err(&pdev->dev, "requesting irq(%d) error:%d", 1377 dev_err(&pdev->dev, "requesting irq(%d) error:%d",
1251 pdata->gpmc_irq, err); 1378 info->gpmc_irq_fifo, err);
1379 info->gpmc_irq_fifo = 0;
1380 goto out_release_mem_region;
1381 }
1382
1383 info->gpmc_irq_count = platform_get_irq(pdev, 1);
1384 if (info->gpmc_irq_count <= 0) {
1385 dev_err(&pdev->dev, "error getting count irq\n");
1386 err = -ENODEV;
1387 goto out_release_mem_region;
1388 }
1389 err = request_irq(info->gpmc_irq_count, omap_nand_irq,
1390 IRQF_SHARED, "gpmc-nand-count", info);
1391 if (err) {
1392 dev_err(&pdev->dev, "requesting irq(%d) error:%d",
1393 info->gpmc_irq_count, err);
1394 info->gpmc_irq_count = 0;
1252 goto out_release_mem_region; 1395 goto out_release_mem_region;
1253 } else {
1254 info->gpmc_irq = pdata->gpmc_irq;
1255 info->nand.read_buf = omap_read_buf_irq_pref;
1256 info->nand.write_buf = omap_write_buf_irq_pref;
1257 } 1396 }
1397
1398 info->nand.read_buf = omap_read_buf_irq_pref;
1399 info->nand.write_buf = omap_write_buf_irq_pref;
1400
1258 break; 1401 break;
1259 1402
1260 default: 1403 default:
@@ -1340,7 +1483,11 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1340out_release_mem_region: 1483out_release_mem_region:
1341 if (info->dma) 1484 if (info->dma)
1342 dma_release_channel(info->dma); 1485 dma_release_channel(info->dma);
1343 release_mem_region(info->phys_base, NAND_IO_SIZE); 1486 if (info->gpmc_irq_count > 0)
1487 free_irq(info->gpmc_irq_count, info);
1488 if (info->gpmc_irq_fifo > 0)
1489 free_irq(info->gpmc_irq_fifo, info);
1490 release_mem_region(info->phys_base, info->mem_size);
1344out_free_info: 1491out_free_info:
1345 kfree(info); 1492 kfree(info);
1346 1493
@@ -1358,8 +1505,10 @@ static int omap_nand_remove(struct platform_device *pdev)
1358 if (info->dma) 1505 if (info->dma)
1359 dma_release_channel(info->dma); 1506 dma_release_channel(info->dma);
1360 1507
1361 if (info->gpmc_irq) 1508 if (info->gpmc_irq_count > 0)
1362 free_irq(info->gpmc_irq, info); 1509 free_irq(info->gpmc_irq_count, info);
1510 if (info->gpmc_irq_fifo > 0)
1511 free_irq(info->gpmc_irq_fifo, info);
1363 1512
1364 /* Release NAND device, its internal structures and partitions */ 1513 /* Release NAND device, its internal structures and partitions */
1365 nand_release(&info->mtd); 1514 nand_release(&info->mtd);
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 9ee436d30932..aefaf8cd31ef 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -21,7 +21,7 @@
21#include <linux/err.h> 21#include <linux/err.h>
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/sizes.h> 23#include <asm/sizes.h>
24#include <plat/orion_nand.h> 24#include <linux/platform_data/mtd-orion_nand.h>
25 25
26static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) 26static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
27{ 27{
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 5df91d554dac..37ee75c7bacb 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -22,9 +22,11 @@
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/irq.h> 23#include <linux/irq.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/of.h>
26#include <linux/of_device.h>
25 27
26#include <mach/dma.h> 28#include <mach/dma.h>
27#include <plat/pxa3xx_nand.h> 29#include <linux/platform_data/mtd-nand-pxa3xx.h>
28 30
29#define CHIP_DELAY_TIMEOUT (2 * HZ/10) 31#define CHIP_DELAY_TIMEOUT (2 * HZ/10)
30#define NAND_STOP_DELAY (2 * HZ/50) 32#define NAND_STOP_DELAY (2 * HZ/50)
@@ -1027,7 +1029,7 @@ static int alloc_nand_resource(struct platform_device *pdev)
1027 struct pxa3xx_nand_platform_data *pdata; 1029 struct pxa3xx_nand_platform_data *pdata;
1028 struct pxa3xx_nand_info *info; 1030 struct pxa3xx_nand_info *info;
1029 struct pxa3xx_nand_host *host; 1031 struct pxa3xx_nand_host *host;
1030 struct nand_chip *chip; 1032 struct nand_chip *chip = NULL;
1031 struct mtd_info *mtd; 1033 struct mtd_info *mtd;
1032 struct resource *r; 1034 struct resource *r;
1033 int ret, irq, cs; 1035 int ret, irq, cs;
@@ -1075,21 +1077,31 @@ static int alloc_nand_resource(struct platform_device *pdev)
1075 } 1077 }
1076 clk_enable(info->clk); 1078 clk_enable(info->clk);
1077 1079
1078 r = platform_get_resource(pdev, IORESOURCE_DMA, 0); 1080 /*
1079 if (r == NULL) { 1081 * This is a dirty hack to make this driver work from devicetree
1080 dev_err(&pdev->dev, "no resource defined for data DMA\n"); 1082 * bindings. It can be removed once we have a prober DMA controller
1081 ret = -ENXIO; 1083 * framework for DT.
1082 goto fail_put_clk; 1084 */
1083 } 1085 if (pdev->dev.of_node && cpu_is_pxa3xx()) {
1084 info->drcmr_dat = r->start; 1086 info->drcmr_dat = 97;
1087 info->drcmr_cmd = 99;
1088 } else {
1089 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1090 if (r == NULL) {
1091 dev_err(&pdev->dev, "no resource defined for data DMA\n");
1092 ret = -ENXIO;
1093 goto fail_put_clk;
1094 }
1095 info->drcmr_dat = r->start;
1085 1096
1086 r = platform_get_resource(pdev, IORESOURCE_DMA, 1); 1097 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1087 if (r == NULL) { 1098 if (r == NULL) {
1088 dev_err(&pdev->dev, "no resource defined for command DMA\n"); 1099 dev_err(&pdev->dev, "no resource defined for command DMA\n");
1089 ret = -ENXIO; 1100 ret = -ENXIO;
1090 goto fail_put_clk; 1101 goto fail_put_clk;
1102 }
1103 info->drcmr_cmd = r->start;
1091 } 1104 }
1092 info->drcmr_cmd = r->start;
1093 1105
1094 irq = platform_get_irq(pdev, 0); 1106 irq = platform_get_irq(pdev, 0);
1095 if (irq < 0) { 1107 if (irq < 0) {
@@ -1194,12 +1206,55 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
1194 return 0; 1206 return 0;
1195} 1207}
1196 1208
1209#ifdef CONFIG_OF
1210static struct of_device_id pxa3xx_nand_dt_ids[] = {
1211 { .compatible = "marvell,pxa3xx-nand" },
1212 {}
1213};
1214MODULE_DEVICE_TABLE(of, i2c_pxa_dt_ids);
1215
1216static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1217{
1218 struct pxa3xx_nand_platform_data *pdata;
1219 struct device_node *np = pdev->dev.of_node;
1220 const struct of_device_id *of_id =
1221 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1222
1223 if (!of_id)
1224 return 0;
1225
1226 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1227 if (!pdata)
1228 return -ENOMEM;
1229
1230 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1231 pdata->enable_arbiter = 1;
1232 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1233 pdata->keep_config = 1;
1234 of_property_read_u32(np, "num-cs", &pdata->num_cs);
1235
1236 pdev->dev.platform_data = pdata;
1237
1238 return 0;
1239}
1240#else
1241static inline int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1242{
1243 return 0;
1244}
1245#endif
1246
1197static int pxa3xx_nand_probe(struct platform_device *pdev) 1247static int pxa3xx_nand_probe(struct platform_device *pdev)
1198{ 1248{
1199 struct pxa3xx_nand_platform_data *pdata; 1249 struct pxa3xx_nand_platform_data *pdata;
1250 struct mtd_part_parser_data ppdata = {};
1200 struct pxa3xx_nand_info *info; 1251 struct pxa3xx_nand_info *info;
1201 int ret, cs, probe_success; 1252 int ret, cs, probe_success;
1202 1253
1254 ret = pxa3xx_nand_probe_dt(pdev);
1255 if (ret)
1256 return ret;
1257
1203 pdata = pdev->dev.platform_data; 1258 pdata = pdev->dev.platform_data;
1204 if (!pdata) { 1259 if (!pdata) {
1205 dev_err(&pdev->dev, "no platform data defined\n"); 1260 dev_err(&pdev->dev, "no platform data defined\n");
@@ -1223,8 +1278,9 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
1223 continue; 1278 continue;
1224 } 1279 }
1225 1280
1281 ppdata.of_node = pdev->dev.of_node;
1226 ret = mtd_device_parse_register(info->host[cs]->mtd, NULL, 1282 ret = mtd_device_parse_register(info->host[cs]->mtd, NULL,
1227 NULL, pdata->parts[cs], 1283 &ppdata, pdata->parts[cs],
1228 pdata->nr_parts[cs]); 1284 pdata->nr_parts[cs]);
1229 if (!ret) 1285 if (!ret)
1230 probe_success = 1; 1286 probe_success = 1;
@@ -1300,6 +1356,7 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)
1300static struct platform_driver pxa3xx_nand_driver = { 1356static struct platform_driver pxa3xx_nand_driver = {
1301 .driver = { 1357 .driver = {
1302 .name = "pxa3xx-nand", 1358 .name = "pxa3xx-nand",
1359 .of_match_table = of_match_ptr(pxa3xx_nand_dt_ids),
1303 }, 1360 },
1304 .probe = pxa3xx_nand_probe, 1361 .probe = pxa3xx_nand_probe,
1305 .remove = pxa3xx_nand_remove, 1362 .remove = pxa3xx_nand_remove,
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 792cee846221..295e4bedad96 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -47,7 +47,7 @@
47#include <linux/mtd/partitions.h> 47#include <linux/mtd/partitions.h>
48 48
49#include <plat/regs-nand.h> 49#include <plat/regs-nand.h>
50#include <plat/nand.h> 50#include <linux/platform_data/mtd-nand-s3c2410.h>
51 51
52/* new oob placement block for use with hardware ecc generation 52/* new oob placement block for use with hardware ecc generation
53 */ 53 */
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 398a82783848..1961be985171 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -39,22 +39,21 @@
39 39
40#include <asm/mach/flash.h> 40#include <asm/mach/flash.h>
41#include <plat/gpmc.h> 41#include <plat/gpmc.h>
42#include <plat/onenand.h> 42#include <linux/platform_data/mtd-onenand-omap2.h>
43#include <asm/gpio.h> 43#include <asm/gpio.h>
44 44
45#include <plat/dma.h> 45#include <plat/dma.h>
46 46#include <plat/cpu.h>
47#include <plat/board.h>
48 47
49#define DRIVER_NAME "omap2-onenand" 48#define DRIVER_NAME "omap2-onenand"
50 49
51#define ONENAND_IO_SIZE SZ_128K
52#define ONENAND_BUFRAM_SIZE (1024 * 5) 50#define ONENAND_BUFRAM_SIZE (1024 * 5)
53 51
54struct omap2_onenand { 52struct omap2_onenand {
55 struct platform_device *pdev; 53 struct platform_device *pdev;
56 int gpmc_cs; 54 int gpmc_cs;
57 unsigned long phys_base; 55 unsigned long phys_base;
56 unsigned int mem_size;
58 int gpio_irq; 57 int gpio_irq;
59 struct mtd_info mtd; 58 struct mtd_info mtd;
60 struct onenand_chip onenand; 59 struct onenand_chip onenand;
@@ -626,6 +625,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
626 struct omap2_onenand *c; 625 struct omap2_onenand *c;
627 struct onenand_chip *this; 626 struct onenand_chip *this;
628 int r; 627 int r;
628 struct resource *res;
629 629
630 pdata = pdev->dev.platform_data; 630 pdata = pdev->dev.platform_data;
631 if (pdata == NULL) { 631 if (pdata == NULL) {
@@ -647,20 +647,24 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev)
647 c->gpio_irq = 0; 647 c->gpio_irq = 0;
648 } 648 }
649 649
650 r = gpmc_cs_request(c->gpmc_cs, ONENAND_IO_SIZE, &c->phys_base); 650 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
651 if (r < 0) { 651 if (res == NULL) {
652 dev_err(&pdev->dev, "Cannot request GPMC CS\n"); 652 r = -EINVAL;
653 dev_err(&pdev->dev, "error getting memory resource\n");
653 goto err_kfree; 654 goto err_kfree;
654 } 655 }
655 656
656 if (request_mem_region(c->phys_base, ONENAND_IO_SIZE, 657 c->phys_base = res->start;
658 c->mem_size = resource_size(res);
659
660 if (request_mem_region(c->phys_base, c->mem_size,
657 pdev->dev.driver->name) == NULL) { 661 pdev->dev.driver->name) == NULL) {
658 dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, " 662 dev_err(&pdev->dev, "Cannot reserve memory region at 0x%08lx, size: 0x%x\n",
659 "size: 0x%x\n", c->phys_base, ONENAND_IO_SIZE); 663 c->phys_base, c->mem_size);
660 r = -EBUSY; 664 r = -EBUSY;
661 goto err_free_cs; 665 goto err_kfree;
662 } 666 }
663 c->onenand.base = ioremap(c->phys_base, ONENAND_IO_SIZE); 667 c->onenand.base = ioremap(c->phys_base, c->mem_size);
664 if (c->onenand.base == NULL) { 668 if (c->onenand.base == NULL) {
665 r = -ENOMEM; 669 r = -ENOMEM;
666 goto err_release_mem_region; 670 goto err_release_mem_region;
@@ -776,9 +780,7 @@ err_release_gpio:
776err_iounmap: 780err_iounmap:
777 iounmap(c->onenand.base); 781 iounmap(c->onenand.base);
778err_release_mem_region: 782err_release_mem_region:
779 release_mem_region(c->phys_base, ONENAND_IO_SIZE); 783 release_mem_region(c->phys_base, c->mem_size);
780err_free_cs:
781 gpmc_cs_free(c->gpmc_cs);
782err_kfree: 784err_kfree:
783 kfree(c); 785 kfree(c);
784 786
@@ -800,7 +802,7 @@ static int __devexit omap2_onenand_remove(struct platform_device *pdev)
800 gpio_free(c->gpio_irq); 802 gpio_free(c->gpio_irq);
801 } 803 }
802 iounmap(c->onenand.base); 804 iounmap(c->onenand.base);
803 release_mem_region(c->phys_base, ONENAND_IO_SIZE); 805 release_mem_region(c->phys_base, c->mem_size);
804 gpmc_cs_free(c->gpmc_cs); 806 gpmc_cs_free(c->gpmc_cs);
805 kfree(c); 807 kfree(c);
806 808
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index ea4b95b5451c..36663af56d89 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -27,20 +27,55 @@ config MTD_UBI_WL_THRESHOLD
27 life-cycle less than 10000, the threshold should be lessened (e.g., 27 life-cycle less than 10000, the threshold should be lessened (e.g.,
28 to 128 or 256, although it does not have to be power of 2). 28 to 128 or 256, although it does not have to be power of 2).
29 29
30config MTD_UBI_BEB_RESERVE 30config MTD_UBI_BEB_LIMIT
31 int "Percentage of reserved eraseblocks for bad eraseblocks handling" 31 int "Maximum expected bad eraseblock count per 1024 eraseblocks"
32 default 2 32 default 20
33 range 0 25 33 range 0 768
34 help 34 help
35 If the MTD device admits of bad eraseblocks (e.g. NAND flash), UBI 35 This option specifies the maximum bad physical eraseblocks UBI
36 reserves some amount of physical eraseblocks to handle new bad 36 expects on the MTD device (per 1024 eraseblocks). If the underlying
37 eraseblocks. For example, if a flash physical eraseblock becomes bad, 37 flash does not admit of bad eraseblocks (e.g. NOR flash), this value
38 UBI uses these reserved physical eraseblocks to relocate the bad one. 38 is ignored.
39 This option specifies how many physical eraseblocks will be reserved 39
40 for bad eraseblock handling (percents of total number of good flash 40 NAND datasheets often specify the minimum and maximum NVM (Number of
41 eraseblocks). If the underlying flash does not admit of bad 41 Valid Blocks) for the flashes' endurance lifetime. The maximum
42 eraseblocks (e.g. NOR flash), this value is ignored and nothing is 42 expected bad eraseblocks per 1024 eraseblocks then can be calculated
43 reserved. Leave the default value if unsure. 43 as "1024 * (1 - MinNVB / MaxNVB)", which gives 20 for most NANDs
44 (MaxNVB is basically the total count of eraseblocks on the chip).
45
46 To put it differently, if this value is 20, UBI will try to reserve
47 about 1.9% of physical eraseblocks for bad blocks handling. And that
48 will be 1.9% of eraseblocks on the entire NAND chip, not just the MTD
49 partition UBI attaches. This means that if you have, say, a NAND
50 flash chip admits maximum 40 bad eraseblocks, and it is split on two
51 MTD partitions of the same size, UBI will reserve 40 eraseblocks when
52 attaching a partition.
53
54 This option can be overridden by the "mtd=" UBI module parameter or
55 by the "attach" ioctl.
56
57 Leave the default value if unsure.
58
59config MTD_UBI_FASTMAP
60 bool "UBI Fastmap (Experimental feature)"
61 default n
62 help
63 Important: this feature is experimental so far and the on-flash
64 format for fastmap may change in the next kernel versions
65
66 Fastmap is a mechanism which allows attaching an UBI device
67 in nearly constant time. Instead of scanning the whole MTD device it
68 only has to locate a checkpoint (called fastmap) on the device.
69 The on-flash fastmap contains all information needed to attach
70 the device. Using fastmap makes only sense on large devices where
71 attaching by scanning takes long. UBI will not automatically install
72 a fastmap on old images, but you can set the UBI module parameter
73 fm_autoconvert to 1 if you want so. Please note that fastmap-enabled
74 images are still usable with UBI implementations without
75 fastmap support. On typical flash devices the whole fastmap fits
76 into one PEB. UBI will reserve PEBs to hold two fastmaps.
77
78 If in doubt, say "N".
44 79
45config MTD_UBI_GLUEBI 80config MTD_UBI_GLUEBI
46 tristate "MTD devices emulation driver (gluebi)" 81 tristate "MTD devices emulation driver (gluebi)"
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index a0803ac74712..b46b0c978581 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -2,5 +2,6 @@ obj-$(CONFIG_MTD_UBI) += ubi.o
2 2
3ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o attach.o 3ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o attach.o
4ubi-y += misc.o debug.o 4ubi-y += misc.o debug.o
5ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
5 6
6obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o 7obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index bd27cbbb4066..fec406b4553d 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -79,7 +79,7 @@
79 * NAND), it is probably a PEB which was being erased when power cut 79 * NAND), it is probably a PEB which was being erased when power cut
80 * happened, so this is corruption type 1. However, this is just a guess, 80 * happened, so this is corruption type 1. However, this is just a guess,
81 * which might be wrong. 81 * which might be wrong.
82 * o Otherwise this it corruption type 2. 82 * o Otherwise this is corruption type 2.
83 */ 83 */
84 84
85#include <linux/err.h> 85#include <linux/err.h>
@@ -300,7 +300,7 @@ static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
300} 300}
301 301
302/** 302/**
303 * compare_lebs - find out which logical eraseblock is newer. 303 * ubi_compare_lebs - find out which logical eraseblock is newer.
304 * @ubi: UBI device description object 304 * @ubi: UBI device description object
305 * @aeb: first logical eraseblock to compare 305 * @aeb: first logical eraseblock to compare
306 * @pnum: physical eraseblock number of the second logical eraseblock to 306 * @pnum: physical eraseblock number of the second logical eraseblock to
@@ -319,7 +319,7 @@ static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
319 * o bit 2 is cleared: the older LEB is not corrupted; 319 * o bit 2 is cleared: the older LEB is not corrupted;
320 * o bit 2 is set: the older LEB is corrupted. 320 * o bit 2 is set: the older LEB is corrupted.
321 */ 321 */
322static int compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, 322int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
323 int pnum, const struct ubi_vid_hdr *vid_hdr) 323 int pnum, const struct ubi_vid_hdr *vid_hdr)
324{ 324{
325 void *buf; 325 void *buf;
@@ -337,7 +337,7 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
337 * support these images anymore. Well, those images still work, 337 * support these images anymore. Well, those images still work,
338 * but only if no unclean reboots happened. 338 * but only if no unclean reboots happened.
339 */ 339 */
340 ubi_err("unsupported on-flash UBI format\n"); 340 ubi_err("unsupported on-flash UBI format");
341 return -EINVAL; 341 return -EINVAL;
342 } 342 }
343 343
@@ -378,8 +378,8 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
378 if (err == UBI_IO_BITFLIPS) 378 if (err == UBI_IO_BITFLIPS)
379 bitflips = 1; 379 bitflips = 1;
380 else { 380 else {
381 ubi_err("VID of PEB %d header is bad, but it " 381 ubi_err("VID of PEB %d header is bad, but it was OK earlier, err %d",
382 "was OK earlier, err %d", pnum, err); 382 pnum, err);
383 if (err > 0) 383 if (err > 0)
384 err = -EIO; 384 err = -EIO;
385 385
@@ -507,7 +507,7 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
507 * sequence numbers. We still can attach these images, unless 507 * sequence numbers. We still can attach these images, unless
508 * there is a need to distinguish between old and new 508 * there is a need to distinguish between old and new
509 * eraseblocks, in which case we'll refuse the image in 509 * eraseblocks, in which case we'll refuse the image in
510 * 'compare_lebs()'. In other words, we attach old clean 510 * 'ubi_compare_lebs()'. In other words, we attach old clean
511 * images, but refuse attaching old images with duplicated 511 * images, but refuse attaching old images with duplicated
512 * logical eraseblocks because there was an unclean reboot. 512 * logical eraseblocks because there was an unclean reboot.
513 */ 513 */
@@ -523,7 +523,7 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
523 * Now we have to drop the older one and preserve the newer 523 * Now we have to drop the older one and preserve the newer
524 * one. 524 * one.
525 */ 525 */
526 cmp_res = compare_lebs(ubi, aeb, pnum, vid_hdr); 526 cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
527 if (cmp_res < 0) 527 if (cmp_res < 0)
528 return cmp_res; 528 return cmp_res;
529 529
@@ -748,7 +748,7 @@ struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
748/** 748/**
749 * check_corruption - check the data area of PEB. 749 * check_corruption - check the data area of PEB.
750 * @ubi: UBI device description object 750 * @ubi: UBI device description object
751 * @vid_hrd: the (corrupted) VID header of this PEB 751 * @vid_hdr: the (corrupted) VID header of this PEB
752 * @pnum: the physical eraseblock number to check 752 * @pnum: the physical eraseblock number to check
753 * 753 *
754 * This is a helper function which is used to distinguish between VID header 754 * This is a helper function which is used to distinguish between VID header
@@ -790,12 +790,12 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
790 if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size)) 790 if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
791 goto out_unlock; 791 goto out_unlock;
792 792
793 ubi_err("PEB %d contains corrupted VID header, and the data does not " 793 ubi_err("PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
794 "contain all 0xFF, this may be a non-UBI PEB or a severe VID " 794 pnum);
795 "header corruption which requires manual inspection", pnum); 795 ubi_err("this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
796 ubi_dump_vid_hdr(vid_hdr); 796 ubi_dump_vid_hdr(vid_hdr);
797 dbg_msg("hexdump of PEB %d offset %d, length %d", 797 pr_err("hexdump of PEB %d offset %d, length %d",
798 pnum, ubi->leb_start, ubi->leb_size); 798 pnum, ubi->leb_start, ubi->leb_size);
799 ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, 799 ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
800 ubi->peb_buf, ubi->leb_size, 1); 800 ubi->peb_buf, ubi->leb_size, 1);
801 err = 1; 801 err = 1;
@@ -810,6 +810,8 @@ out_unlock:
810 * @ubi: UBI device description object 810 * @ubi: UBI device description object
811 * @ai: attaching information 811 * @ai: attaching information
812 * @pnum: the physical eraseblock number 812 * @pnum: the physical eraseblock number
813 * @vid: The volume ID of the found volume will be stored in this pointer
814 * @sqnum: The sqnum of the found volume will be stored in this pointer
813 * 815 *
814 * This function reads UBI headers of PEB @pnum, checks them, and adds 816 * This function reads UBI headers of PEB @pnum, checks them, and adds
815 * information about this PEB to the corresponding list or RB-tree in the 817 * information about this PEB to the corresponding list or RB-tree in the
@@ -817,10 +819,10 @@ out_unlock:
817 * successfully handled and a negative error code in case of failure. 819 * successfully handled and a negative error code in case of failure.
818 */ 820 */
819static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, 821static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
820 int pnum) 822 int pnum, int *vid, unsigned long long *sqnum)
821{ 823{
822 long long uninitialized_var(ec); 824 long long uninitialized_var(ec);
823 int err, bitflips = 0, vol_id, ec_err = 0; 825 int err, bitflips = 0, vol_id = -1, ec_err = 0;
824 826
825 dbg_bld("scan PEB %d", pnum); 827 dbg_bld("scan PEB %d", pnum);
826 828
@@ -907,8 +909,8 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
907 ubi->image_seq = image_seq; 909 ubi->image_seq = image_seq;
908 if (ubi->image_seq && image_seq && 910 if (ubi->image_seq && image_seq &&
909 ubi->image_seq != image_seq) { 911 ubi->image_seq != image_seq) {
910 ubi_err("bad image sequence number %d in PEB %d, " 912 ubi_err("bad image sequence number %d in PEB %d, expected %d",
911 "expected %d", image_seq, pnum, ubi->image_seq); 913 image_seq, pnum, ubi->image_seq);
912 ubi_dump_ec_hdr(ech); 914 ubi_dump_ec_hdr(ech);
913 return -EINVAL; 915 return -EINVAL;
914 } 916 }
@@ -975,7 +977,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
975 return err; 977 return err;
976 goto adjust_mean_ec; 978 goto adjust_mean_ec;
977 case UBI_IO_FF: 979 case UBI_IO_FF:
978 if (ec_err) 980 if (ec_err || bitflips)
979 err = add_to_list(ai, pnum, UBI_UNKNOWN, 981 err = add_to_list(ai, pnum, UBI_UNKNOWN,
980 UBI_UNKNOWN, ec, 1, &ai->erase); 982 UBI_UNKNOWN, ec, 1, &ai->erase);
981 else 983 else
@@ -991,14 +993,21 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
991 } 993 }
992 994
993 vol_id = be32_to_cpu(vidh->vol_id); 995 vol_id = be32_to_cpu(vidh->vol_id);
996 if (vid)
997 *vid = vol_id;
998 if (sqnum)
999 *sqnum = be64_to_cpu(vidh->sqnum);
994 if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) { 1000 if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
995 int lnum = be32_to_cpu(vidh->lnum); 1001 int lnum = be32_to_cpu(vidh->lnum);
996 1002
997 /* Unsupported internal volume */ 1003 /* Unsupported internal volume */
998 switch (vidh->compat) { 1004 switch (vidh->compat) {
999 case UBI_COMPAT_DELETE: 1005 case UBI_COMPAT_DELETE:
1000 ubi_msg("\"delete\" compatible internal volume %d:%d" 1006 if (vol_id != UBI_FM_SB_VOLUME_ID
1001 " found, will remove it", vol_id, lnum); 1007 && vol_id != UBI_FM_DATA_VOLUME_ID) {
1008 ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it",
1009 vol_id, lnum);
1010 }
1002 err = add_to_list(ai, pnum, vol_id, lnum, 1011 err = add_to_list(ai, pnum, vol_id, lnum,
1003 ec, 1, &ai->erase); 1012 ec, 1, &ai->erase);
1004 if (err) 1013 if (err)
@@ -1006,15 +1015,14 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
1006 return 0; 1015 return 0;
1007 1016
1008 case UBI_COMPAT_RO: 1017 case UBI_COMPAT_RO:
1009 ubi_msg("read-only compatible internal volume %d:%d" 1018 ubi_msg("read-only compatible internal volume %d:%d found, switch to read-only mode",
1010 " found, switch to read-only mode",
1011 vol_id, lnum); 1019 vol_id, lnum);
1012 ubi->ro_mode = 1; 1020 ubi->ro_mode = 1;
1013 break; 1021 break;
1014 1022
1015 case UBI_COMPAT_PRESERVE: 1023 case UBI_COMPAT_PRESERVE:
1016 ubi_msg("\"preserve\" compatible internal volume %d:%d" 1024 ubi_msg("\"preserve\" compatible internal volume %d:%d found",
1017 " found", vol_id, lnum); 1025 vol_id, lnum);
1018 err = add_to_list(ai, pnum, vol_id, lnum, 1026 err = add_to_list(ai, pnum, vol_id, lnum,
1019 ec, 0, &ai->alien); 1027 ec, 0, &ai->alien);
1020 if (err) 1028 if (err)
@@ -1075,10 +1083,10 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
1075 if (ai->corr_peb_count) { 1083 if (ai->corr_peb_count) {
1076 ubi_err("%d PEBs are corrupted and preserved", 1084 ubi_err("%d PEBs are corrupted and preserved",
1077 ai->corr_peb_count); 1085 ai->corr_peb_count);
1078 printk(KERN_ERR "Corrupted PEBs are:"); 1086 pr_err("Corrupted PEBs are:");
1079 list_for_each_entry(aeb, &ai->corr, u.list) 1087 list_for_each_entry(aeb, &ai->corr, u.list)
1080 printk(KERN_CONT " %d", aeb->pnum); 1088 pr_cont(" %d", aeb->pnum);
1081 printk(KERN_CONT "\n"); 1089 pr_cont("\n");
1082 1090
1083 /* 1091 /*
1084 * If too many PEBs are corrupted, we refuse attaching, 1092 * If too many PEBs are corrupted, we refuse attaching,
@@ -1112,8 +1120,7 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
1112 get_random_bytes(&ubi->image_seq, 1120 get_random_bytes(&ubi->image_seq,
1113 sizeof(ubi->image_seq)); 1121 sizeof(ubi->image_seq));
1114 } else { 1122 } else {
1115 ubi_err("MTD device is not UBI-formatted and possibly " 1123 ubi_err("MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
1116 "contains non-UBI data - refusing it");
1117 return -EINVAL; 1124 return -EINVAL;
1118 } 1125 }
1119 1126
@@ -1123,56 +1130,131 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
1123} 1130}
1124 1131
1125/** 1132/**
1133 * destroy_av - free volume attaching information.
1134 * @av: volume attaching information
1135 * @ai: attaching information
1136 *
1137 * This function destroys the volume attaching information.
1138 */
1139static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
1140{
1141 struct ubi_ainf_peb *aeb;
1142 struct rb_node *this = av->root.rb_node;
1143
1144 while (this) {
1145 if (this->rb_left)
1146 this = this->rb_left;
1147 else if (this->rb_right)
1148 this = this->rb_right;
1149 else {
1150 aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
1151 this = rb_parent(this);
1152 if (this) {
1153 if (this->rb_left == &aeb->u.rb)
1154 this->rb_left = NULL;
1155 else
1156 this->rb_right = NULL;
1157 }
1158
1159 kmem_cache_free(ai->aeb_slab_cache, aeb);
1160 }
1161 }
1162 kfree(av);
1163}
1164
1165/**
1166 * destroy_ai - destroy attaching information.
1167 * @ai: attaching information
1168 */
1169static void destroy_ai(struct ubi_attach_info *ai)
1170{
1171 struct ubi_ainf_peb *aeb, *aeb_tmp;
1172 struct ubi_ainf_volume *av;
1173 struct rb_node *rb;
1174
1175 list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
1176 list_del(&aeb->u.list);
1177 kmem_cache_free(ai->aeb_slab_cache, aeb);
1178 }
1179 list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
1180 list_del(&aeb->u.list);
1181 kmem_cache_free(ai->aeb_slab_cache, aeb);
1182 }
1183 list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
1184 list_del(&aeb->u.list);
1185 kmem_cache_free(ai->aeb_slab_cache, aeb);
1186 }
1187 list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
1188 list_del(&aeb->u.list);
1189 kmem_cache_free(ai->aeb_slab_cache, aeb);
1190 }
1191
1192 /* Destroy the volume RB-tree */
1193 rb = ai->volumes.rb_node;
1194 while (rb) {
1195 if (rb->rb_left)
1196 rb = rb->rb_left;
1197 else if (rb->rb_right)
1198 rb = rb->rb_right;
1199 else {
1200 av = rb_entry(rb, struct ubi_ainf_volume, rb);
1201
1202 rb = rb_parent(rb);
1203 if (rb) {
1204 if (rb->rb_left == &av->rb)
1205 rb->rb_left = NULL;
1206 else
1207 rb->rb_right = NULL;
1208 }
1209
1210 destroy_av(ai, av);
1211 }
1212 }
1213
1214 if (ai->aeb_slab_cache)
1215 kmem_cache_destroy(ai->aeb_slab_cache);
1216
1217 kfree(ai);
1218}
1219
1220/**
1126 * scan_all - scan entire MTD device. 1221 * scan_all - scan entire MTD device.
1127 * @ubi: UBI device description object 1222 * @ubi: UBI device description object
1223 * @ai: attach info object
1224 * @start: start scanning at this PEB
1128 * 1225 *
1129 * This function does full scanning of an MTD device and returns complete 1226 * This function does full scanning of an MTD device and returns complete
1130 * information about it in form of a "struct ubi_attach_info" object. In case 1227 * information about it in form of a "struct ubi_attach_info" object. In case
1131 * of failure, an error code is returned. 1228 * of failure, an error code is returned.
1132 */ 1229 */
1133static struct ubi_attach_info *scan_all(struct ubi_device *ubi) 1230static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
1231 int start)
1134{ 1232{
1135 int err, pnum; 1233 int err, pnum;
1136 struct rb_node *rb1, *rb2; 1234 struct rb_node *rb1, *rb2;
1137 struct ubi_ainf_volume *av; 1235 struct ubi_ainf_volume *av;
1138 struct ubi_ainf_peb *aeb; 1236 struct ubi_ainf_peb *aeb;
1139 struct ubi_attach_info *ai;
1140
1141 ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
1142 if (!ai)
1143 return ERR_PTR(-ENOMEM);
1144
1145 INIT_LIST_HEAD(&ai->corr);
1146 INIT_LIST_HEAD(&ai->free);
1147 INIT_LIST_HEAD(&ai->erase);
1148 INIT_LIST_HEAD(&ai->alien);
1149 ai->volumes = RB_ROOT;
1150 1237
1151 err = -ENOMEM; 1238 err = -ENOMEM;
1152 ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
1153 sizeof(struct ubi_ainf_peb),
1154 0, 0, NULL);
1155 if (!ai->aeb_slab_cache)
1156 goto out_ai;
1157 1239
1158 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); 1240 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1159 if (!ech) 1241 if (!ech)
1160 goto out_ai; 1242 return err;
1161 1243
1162 vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 1244 vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
1163 if (!vidh) 1245 if (!vidh)
1164 goto out_ech; 1246 goto out_ech;
1165 1247
1166 for (pnum = 0; pnum < ubi->peb_count; pnum++) { 1248 for (pnum = start; pnum < ubi->peb_count; pnum++) {
1167 cond_resched(); 1249 cond_resched();
1168 1250
1169 dbg_gen("process PEB %d", pnum); 1251 dbg_gen("process PEB %d", pnum);
1170 err = scan_peb(ubi, ai, pnum); 1252 err = scan_peb(ubi, ai, pnum, NULL, NULL);
1171 if (err < 0) 1253 if (err < 0)
1172 goto out_vidh; 1254 goto out_vidh;
1173 } 1255 }
1174 1256
1175 dbg_msg("scanning is finished"); 1257 ubi_msg("scanning is finished");
1176 1258
1177 /* Calculate mean erase counter */ 1259 /* Calculate mean erase counter */
1178 if (ai->ec_count) 1260 if (ai->ec_count)
@@ -1212,39 +1294,151 @@ static struct ubi_attach_info *scan_all(struct ubi_device *ubi)
1212 ubi_free_vid_hdr(ubi, vidh); 1294 ubi_free_vid_hdr(ubi, vidh);
1213 kfree(ech); 1295 kfree(ech);
1214 1296
1215 return ai; 1297 return 0;
1216 1298
1217out_vidh: 1299out_vidh:
1218 ubi_free_vid_hdr(ubi, vidh); 1300 ubi_free_vid_hdr(ubi, vidh);
1219out_ech: 1301out_ech:
1220 kfree(ech); 1302 kfree(ech);
1221out_ai: 1303 return err;
1222 ubi_destroy_ai(ai); 1304}
1223 return ERR_PTR(err); 1305
1306#ifdef CONFIG_MTD_UBI_FASTMAP
1307
1308/**
1309 * scan_fastmap - try to find a fastmap and attach from it.
1310 * @ubi: UBI device description object
1311 * @ai: attach info object
1312 *
1313 * Returns 0 on success, negative return values indicate an internal
1314 * error.
1315 * UBI_NO_FASTMAP denotes that no fastmap was found.
1316 * UBI_BAD_FASTMAP denotes that the found fastmap was invalid.
1317 */
1318static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
1319{
1320 int err, pnum, fm_anchor = -1;
1321 unsigned long long max_sqnum = 0;
1322
1323 err = -ENOMEM;
1324
1325 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1326 if (!ech)
1327 goto out;
1328
1329 vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
1330 if (!vidh)
1331 goto out_ech;
1332
1333 for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
1334 int vol_id = -1;
1335 unsigned long long sqnum = -1;
1336 cond_resched();
1337
1338 dbg_gen("process PEB %d", pnum);
1339 err = scan_peb(ubi, ai, pnum, &vol_id, &sqnum);
1340 if (err < 0)
1341 goto out_vidh;
1342
1343 if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
1344 max_sqnum = sqnum;
1345 fm_anchor = pnum;
1346 }
1347 }
1348
1349 ubi_free_vid_hdr(ubi, vidh);
1350 kfree(ech);
1351
1352 if (fm_anchor < 0)
1353 return UBI_NO_FASTMAP;
1354
1355 return ubi_scan_fastmap(ubi, ai, fm_anchor);
1356
1357out_vidh:
1358 ubi_free_vid_hdr(ubi, vidh);
1359out_ech:
1360 kfree(ech);
1361out:
1362 return err;
1363}
1364
1365#endif
1366
1367static struct ubi_attach_info *alloc_ai(const char *slab_name)
1368{
1369 struct ubi_attach_info *ai;
1370
1371 ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
1372 if (!ai)
1373 return ai;
1374
1375 INIT_LIST_HEAD(&ai->corr);
1376 INIT_LIST_HEAD(&ai->free);
1377 INIT_LIST_HEAD(&ai->erase);
1378 INIT_LIST_HEAD(&ai->alien);
1379 ai->volumes = RB_ROOT;
1380 ai->aeb_slab_cache = kmem_cache_create(slab_name,
1381 sizeof(struct ubi_ainf_peb),
1382 0, 0, NULL);
1383 if (!ai->aeb_slab_cache) {
1384 kfree(ai);
1385 ai = NULL;
1386 }
1387
1388 return ai;
1224} 1389}
1225 1390
1226/** 1391/**
1227 * ubi_attach - attach an MTD device. 1392 * ubi_attach - attach an MTD device.
1228 * @ubi: UBI device descriptor 1393 * @ubi: UBI device descriptor
1394 * @force_scan: if set to non-zero attach by scanning
1229 * 1395 *
1230 * This function returns zero in case of success and a negative error code in 1396 * This function returns zero in case of success and a negative error code in
1231 * case of failure. 1397 * case of failure.
1232 */ 1398 */
1233int ubi_attach(struct ubi_device *ubi) 1399int ubi_attach(struct ubi_device *ubi, int force_scan)
1234{ 1400{
1235 int err; 1401 int err;
1236 struct ubi_attach_info *ai; 1402 struct ubi_attach_info *ai;
1237 1403
1238 ai = scan_all(ubi); 1404 ai = alloc_ai("ubi_aeb_slab_cache");
1239 if (IS_ERR(ai)) 1405 if (!ai)
1240 return PTR_ERR(ai); 1406 return -ENOMEM;
1407
1408#ifdef CONFIG_MTD_UBI_FASTMAP
1409 /* On small flash devices we disable fastmap in any case. */
1410 if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) {
1411 ubi->fm_disabled = 1;
1412 force_scan = 1;
1413 }
1414
1415 if (force_scan)
1416 err = scan_all(ubi, ai, 0);
1417 else {
1418 err = scan_fast(ubi, ai);
1419 if (err > 0) {
1420 if (err != UBI_NO_FASTMAP) {
1421 destroy_ai(ai);
1422 ai = alloc_ai("ubi_aeb_slab_cache2");
1423 if (!ai)
1424 return -ENOMEM;
1425 }
1426
1427 err = scan_all(ubi, ai, UBI_FM_MAX_START);
1428 }
1429 }
1430#else
1431 err = scan_all(ubi, ai, 0);
1432#endif
1433 if (err)
1434 goto out_ai;
1241 1435
1242 ubi->bad_peb_count = ai->bad_peb_count; 1436 ubi->bad_peb_count = ai->bad_peb_count;
1243 ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count; 1437 ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
1244 ubi->corr_peb_count = ai->corr_peb_count; 1438 ubi->corr_peb_count = ai->corr_peb_count;
1245 ubi->max_ec = ai->max_ec; 1439 ubi->max_ec = ai->max_ec;
1246 ubi->mean_ec = ai->mean_ec; 1440 ubi->mean_ec = ai->mean_ec;
1247 ubi_msg("max. sequence number: %llu", ai->max_sqnum); 1441 dbg_gen("max. sequence number: %llu", ai->max_sqnum);
1248 1442
1249 err = ubi_read_volume_table(ubi, ai); 1443 err = ubi_read_volume_table(ubi, ai);
1250 if (err) 1444 if (err)
@@ -1258,7 +1452,29 @@ int ubi_attach(struct ubi_device *ubi)
1258 if (err) 1452 if (err)
1259 goto out_wl; 1453 goto out_wl;
1260 1454
1261 ubi_destroy_ai(ai); 1455#ifdef CONFIG_MTD_UBI_FASTMAP
1456 if (ubi->fm && ubi->dbg->chk_gen) {
1457 struct ubi_attach_info *scan_ai;
1458
1459 scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache");
1460 if (!scan_ai)
1461 goto out_wl;
1462
1463 err = scan_all(ubi, scan_ai, 0);
1464 if (err) {
1465 destroy_ai(scan_ai);
1466 goto out_wl;
1467 }
1468
1469 err = self_check_eba(ubi, ai, scan_ai);
1470 destroy_ai(scan_ai);
1471
1472 if (err)
1473 goto out_wl;
1474 }
1475#endif
1476
1477 destroy_ai(ai);
1262 return 0; 1478 return 0;
1263 1479
1264out_wl: 1480out_wl:
@@ -1267,99 +1483,11 @@ out_vtbl:
1267 ubi_free_internal_volumes(ubi); 1483 ubi_free_internal_volumes(ubi);
1268 vfree(ubi->vtbl); 1484 vfree(ubi->vtbl);
1269out_ai: 1485out_ai:
1270 ubi_destroy_ai(ai); 1486 destroy_ai(ai);
1271 return err; 1487 return err;
1272} 1488}
1273 1489
1274/** 1490/**
1275 * destroy_av - free volume attaching information.
1276 * @av: volume attaching information
1277 * @ai: attaching information
1278 *
1279 * This function destroys the volume attaching information.
1280 */
1281static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
1282{
1283 struct ubi_ainf_peb *aeb;
1284 struct rb_node *this = av->root.rb_node;
1285
1286 while (this) {
1287 if (this->rb_left)
1288 this = this->rb_left;
1289 else if (this->rb_right)
1290 this = this->rb_right;
1291 else {
1292 aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
1293 this = rb_parent(this);
1294 if (this) {
1295 if (this->rb_left == &aeb->u.rb)
1296 this->rb_left = NULL;
1297 else
1298 this->rb_right = NULL;
1299 }
1300
1301 kmem_cache_free(ai->aeb_slab_cache, aeb);
1302 }
1303 }
1304 kfree(av);
1305}
1306
1307/**
1308 * ubi_destroy_ai - destroy attaching information.
1309 * @ai: attaching information
1310 */
1311void ubi_destroy_ai(struct ubi_attach_info *ai)
1312{
1313 struct ubi_ainf_peb *aeb, *aeb_tmp;
1314 struct ubi_ainf_volume *av;
1315 struct rb_node *rb;
1316
1317 list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
1318 list_del(&aeb->u.list);
1319 kmem_cache_free(ai->aeb_slab_cache, aeb);
1320 }
1321 list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
1322 list_del(&aeb->u.list);
1323 kmem_cache_free(ai->aeb_slab_cache, aeb);
1324 }
1325 list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
1326 list_del(&aeb->u.list);
1327 kmem_cache_free(ai->aeb_slab_cache, aeb);
1328 }
1329 list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
1330 list_del(&aeb->u.list);
1331 kmem_cache_free(ai->aeb_slab_cache, aeb);
1332 }
1333
1334 /* Destroy the volume RB-tree */
1335 rb = ai->volumes.rb_node;
1336 while (rb) {
1337 if (rb->rb_left)
1338 rb = rb->rb_left;
1339 else if (rb->rb_right)
1340 rb = rb->rb_right;
1341 else {
1342 av = rb_entry(rb, struct ubi_ainf_volume, rb);
1343
1344 rb = rb_parent(rb);
1345 if (rb) {
1346 if (rb->rb_left == &av->rb)
1347 rb->rb_left = NULL;
1348 else
1349 rb->rb_right = NULL;
1350 }
1351
1352 destroy_av(ai, av);
1353 }
1354 }
1355
1356 if (ai->aeb_slab_cache)
1357 kmem_cache_destroy(ai->aeb_slab_cache);
1358
1359 kfree(ai);
1360}
1361
1362/**
1363 * self_check_ai - check the attaching information. 1491 * self_check_ai - check the attaching information.
1364 * @ubi: UBI device description object 1492 * @ubi: UBI device description object
1365 * @ai: attaching information 1493 * @ai: attaching information
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 2c5ed5ca9c33..344b4cb49d4e 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -36,6 +36,7 @@
36#include <linux/namei.h> 36#include <linux/namei.h>
37#include <linux/stat.h> 37#include <linux/stat.h>
38#include <linux/miscdevice.h> 38#include <linux/miscdevice.h>
39#include <linux/mtd/partitions.h>
39#include <linux/log2.h> 40#include <linux/log2.h>
40#include <linux/kthread.h> 41#include <linux/kthread.h>
41#include <linux/kernel.h> 42#include <linux/kernel.h>
@@ -45,6 +46,12 @@
45/* Maximum length of the 'mtd=' parameter */ 46/* Maximum length of the 'mtd=' parameter */
46#define MTD_PARAM_LEN_MAX 64 47#define MTD_PARAM_LEN_MAX 64
47 48
49/* Maximum number of comma-separated items in the 'mtd=' parameter */
50#define MTD_PARAM_MAX_COUNT 3
51
52/* Maximum value for the number of bad PEBs per 1024 PEBs */
53#define MAX_MTD_UBI_BEB_LIMIT 768
54
48#ifdef CONFIG_MTD_UBI_MODULE 55#ifdef CONFIG_MTD_UBI_MODULE
49#define ubi_is_module() 1 56#define ubi_is_module() 1
50#else 57#else
@@ -56,10 +63,12 @@
56 * @name: MTD character device node path, MTD device name, or MTD device number 63 * @name: MTD character device node path, MTD device name, or MTD device number
57 * string 64 * string
58 * @vid_hdr_offs: VID header offset 65 * @vid_hdr_offs: VID header offset
66 * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
59 */ 67 */
60struct mtd_dev_param { 68struct mtd_dev_param {
61 char name[MTD_PARAM_LEN_MAX]; 69 char name[MTD_PARAM_LEN_MAX];
62 int vid_hdr_offs; 70 int vid_hdr_offs;
71 int max_beb_per1024;
63}; 72};
64 73
65/* Numbers of elements set in the @mtd_dev_param array */ 74/* Numbers of elements set in the @mtd_dev_param array */
@@ -67,7 +76,10 @@ static int __initdata mtd_devs;
67 76
68/* MTD devices specification parameters */ 77/* MTD devices specification parameters */
69static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES]; 78static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
70 79#ifdef CONFIG_MTD_UBI_FASTMAP
80/* UBI module parameter to enable fastmap automatically on non-fastmap images */
81static bool fm_autoconvert;
82#endif
71/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ 83/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
72struct class *ubi_class; 84struct class *ubi_class;
73 85
@@ -144,6 +156,19 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
144 156
145 ubi_do_get_device_info(ubi, &nt.di); 157 ubi_do_get_device_info(ubi, &nt.di);
146 ubi_do_get_volume_info(ubi, vol, &nt.vi); 158 ubi_do_get_volume_info(ubi, vol, &nt.vi);
159
160#ifdef CONFIG_MTD_UBI_FASTMAP
161 switch (ntype) {
162 case UBI_VOLUME_ADDED:
163 case UBI_VOLUME_REMOVED:
164 case UBI_VOLUME_RESIZED:
165 case UBI_VOLUME_RENAMED:
166 if (ubi_update_fastmap(ubi)) {
167 ubi_err("Unable to update fastmap!");
168 ubi_ro_mode(ubi);
169 }
170 }
171#endif
147 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt); 172 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
148} 173}
149 174
@@ -564,9 +589,38 @@ void ubi_free_internal_volumes(struct ubi_device *ubi)
564 } 589 }
565} 590}
566 591
592static int get_bad_peb_limit(const struct ubi_device *ubi, int max_beb_per1024)
593{
594 int limit, device_pebs;
595 uint64_t device_size;
596
597 if (!max_beb_per1024)
598 return 0;
599
600 /*
601 * Here we are using size of the entire flash chip and
602 * not just the MTD partition size because the maximum
603 * number of bad eraseblocks is a percentage of the
604 * whole device and bad eraseblocks are not fairly
605 * distributed over the flash chip. So the worst case
606 * is that all the bad eraseblocks of the chip are in
607 * the MTD partition we are attaching (ubi->mtd).
608 */
609 device_size = mtd_get_device_size(ubi->mtd);
610 device_pebs = mtd_div_by_eb(device_size, ubi->mtd);
611 limit = mult_frac(device_pebs, max_beb_per1024, 1024);
612
613 /* Round it up */
614 if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs)
615 limit += 1;
616
617 return limit;
618}
619
567/** 620/**
568 * io_init - initialize I/O sub-system for a given UBI device. 621 * io_init - initialize I/O sub-system for a given UBI device.
569 * @ubi: UBI device description object 622 * @ubi: UBI device description object
623 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
570 * 624 *
571 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are 625 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
572 * assumed: 626 * assumed:
@@ -579,8 +633,11 @@ void ubi_free_internal_volumes(struct ubi_device *ubi)
579 * This function returns zero in case of success and a negative error code in 633 * This function returns zero in case of success and a negative error code in
580 * case of failure. 634 * case of failure.
581 */ 635 */
582static int io_init(struct ubi_device *ubi) 636static int io_init(struct ubi_device *ubi, int max_beb_per1024)
583{ 637{
638 dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
639 dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
640
584 if (ubi->mtd->numeraseregions != 0) { 641 if (ubi->mtd->numeraseregions != 0) {
585 /* 642 /*
586 * Some flashes have several erase regions. Different regions 643 * Some flashes have several erase regions. Different regions
@@ -607,8 +664,10 @@ static int io_init(struct ubi_device *ubi)
607 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd); 664 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
608 ubi->flash_size = ubi->mtd->size; 665 ubi->flash_size = ubi->mtd->size;
609 666
610 if (mtd_can_have_bb(ubi->mtd)) 667 if (mtd_can_have_bb(ubi->mtd)) {
611 ubi->bad_allowed = 1; 668 ubi->bad_allowed = 1;
669 ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
670 }
612 671
613 if (ubi->mtd->type == MTD_NORFLASH) { 672 if (ubi->mtd->type == MTD_NORFLASH) {
614 ubi_assert(ubi->mtd->writesize == 1); 673 ubi_assert(ubi->mtd->writesize == 1);
@@ -650,11 +709,11 @@ static int io_init(struct ubi_device *ubi)
650 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); 709 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size);
651 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); 710 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size);
652 711
653 dbg_msg("min_io_size %d", ubi->min_io_size); 712 dbg_gen("min_io_size %d", ubi->min_io_size);
654 dbg_msg("max_write_size %d", ubi->max_write_size); 713 dbg_gen("max_write_size %d", ubi->max_write_size);
655 dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size); 714 dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size);
656 dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize); 715 dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize);
657 dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize); 716 dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize);
658 717
659 if (ubi->vid_hdr_offset == 0) 718 if (ubi->vid_hdr_offset == 0)
660 /* Default offset */ 719 /* Default offset */
@@ -671,10 +730,10 @@ static int io_init(struct ubi_device *ubi)
671 ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE; 730 ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE;
672 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); 731 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
673 732
674 dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); 733 dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset);
675 dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); 734 dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
676 dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift); 735 dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift);
677 dbg_msg("leb_start %d", ubi->leb_start); 736 dbg_gen("leb_start %d", ubi->leb_start);
678 737
679 /* The shift must be aligned to 32-bit boundary */ 738 /* The shift must be aligned to 32-bit boundary */
680 if (ubi->vid_hdr_shift % 4) { 739 if (ubi->vid_hdr_shift % 4) {
@@ -700,7 +759,7 @@ static int io_init(struct ubi_device *ubi)
700 ubi->max_erroneous = ubi->peb_count / 10; 759 ubi->max_erroneous = ubi->peb_count / 10;
701 if (ubi->max_erroneous < 16) 760 if (ubi->max_erroneous < 16)
702 ubi->max_erroneous = 16; 761 ubi->max_erroneous = 16;
703 dbg_msg("max_erroneous %d", ubi->max_erroneous); 762 dbg_gen("max_erroneous %d", ubi->max_erroneous);
704 763
705 /* 764 /*
706 * It may happen that EC and VID headers are situated in one minimal 765 * It may happen that EC and VID headers are situated in one minimal
@@ -708,30 +767,18 @@ static int io_init(struct ubi_device *ubi)
708 * read-only mode. 767 * read-only mode.
709 */ 768 */
710 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) { 769 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
711 ubi_warn("EC and VID headers are in the same minimal I/O unit, " 770 ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
712 "switch to read-only mode");
713 ubi->ro_mode = 1; 771 ubi->ro_mode = 1;
714 } 772 }
715 773
716 ubi->leb_size = ubi->peb_size - ubi->leb_start; 774 ubi->leb_size = ubi->peb_size - ubi->leb_start;
717 775
718 if (!(ubi->mtd->flags & MTD_WRITEABLE)) { 776 if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
719 ubi_msg("MTD device %d is write-protected, attach in " 777 ubi_msg("MTD device %d is write-protected, attach in read-only mode",
720 "read-only mode", ubi->mtd->index); 778 ubi->mtd->index);
721 ubi->ro_mode = 1; 779 ubi->ro_mode = 1;
722 } 780 }
723 781
724 ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
725 ubi->peb_size, ubi->peb_size >> 10);
726 ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size);
727 ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size);
728 if (ubi->hdrs_min_io_size != ubi->min_io_size)
729 ubi_msg("sub-page size: %d",
730 ubi->hdrs_min_io_size);
731 ubi_msg("VID header offset: %d (aligned %d)",
732 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset);
733 ubi_msg("data offset: %d", ubi->leb_start);
734
735 /* 782 /*
736 * Note, ideally, we have to initialize @ubi->bad_peb_count here. But 783 * Note, ideally, we have to initialize @ubi->bad_peb_count here. But
737 * unfortunately, MTD does not provide this information. We should loop 784 * unfortunately, MTD does not provide this information. We should loop
@@ -759,6 +806,11 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
759 struct ubi_volume *vol = ubi->volumes[vol_id]; 806 struct ubi_volume *vol = ubi->volumes[vol_id];
760 int err, old_reserved_pebs = vol->reserved_pebs; 807 int err, old_reserved_pebs = vol->reserved_pebs;
761 808
809 if (ubi->ro_mode) {
810 ubi_warn("skip auto-resize because of R/O mode");
811 return 0;
812 }
813
762 /* 814 /*
763 * Clear the auto-resize flag in the volume in-memory copy of the 815 * Clear the auto-resize flag in the volume in-memory copy of the
764 * volume table, and 'ubi_resize_volume()' will propagate this change 816 * volume table, and 'ubi_resize_volume()' will propagate this change
@@ -800,6 +852,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
800 * @mtd: MTD device description object 852 * @mtd: MTD device description object
801 * @ubi_num: number to assign to the new UBI device 853 * @ubi_num: number to assign to the new UBI device
802 * @vid_hdr_offset: VID header offset 854 * @vid_hdr_offset: VID header offset
855 * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
803 * 856 *
804 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number 857 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
805 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in 858 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
@@ -810,11 +863,18 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
810 * Note, the invocations of this function has to be serialized by the 863 * Note, the invocations of this function has to be serialized by the
811 * @ubi_devices_mutex. 864 * @ubi_devices_mutex.
812 */ 865 */
813int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) 866int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
867 int vid_hdr_offset, int max_beb_per1024)
814{ 868{
815 struct ubi_device *ubi; 869 struct ubi_device *ubi;
816 int i, err, ref = 0; 870 int i, err, ref = 0;
817 871
872 if (max_beb_per1024 < 0 || max_beb_per1024 > MAX_MTD_UBI_BEB_LIMIT)
873 return -EINVAL;
874
875 if (!max_beb_per1024)
876 max_beb_per1024 = CONFIG_MTD_UBI_BEB_LIMIT;
877
818 /* 878 /*
819 * Check if we already have the same MTD device attached. 879 * Check if we already have the same MTD device attached.
820 * 880 *
@@ -839,8 +899,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
839 * no sense to attach emulated MTD devices, so we prohibit this. 899 * no sense to attach emulated MTD devices, so we prohibit this.
840 */ 900 */
841 if (mtd->type == MTD_UBIVOLUME) { 901 if (mtd->type == MTD_UBIVOLUME) {
842 ubi_err("refuse attaching mtd%d - it is already emulated on " 902 ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI",
843 "top of UBI", mtd->index); 903 mtd->index);
844 return -EINVAL; 904 return -EINVAL;
845 } 905 }
846 906
@@ -874,16 +934,44 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
874 ubi->vid_hdr_offset = vid_hdr_offset; 934 ubi->vid_hdr_offset = vid_hdr_offset;
875 ubi->autoresize_vol_id = -1; 935 ubi->autoresize_vol_id = -1;
876 936
937#ifdef CONFIG_MTD_UBI_FASTMAP
938 ubi->fm_pool.used = ubi->fm_pool.size = 0;
939 ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
940
941 /*
942 * fm_pool.max_size is 5% of the total number of PEBs but it's also
943 * between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
944 */
945 ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
946 ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
947 if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
948 ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
949
950 ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE;
951 ubi->fm_disabled = !fm_autoconvert;
952
953 if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
954 <= UBI_FM_MAX_START) {
955 ubi_err("More than %i PEBs are needed for fastmap, sorry.",
956 UBI_FM_MAX_START);
957 ubi->fm_disabled = 1;
958 }
959
960 ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size);
961 ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
962#else
963 ubi->fm_disabled = 1;
964#endif
877 mutex_init(&ubi->buf_mutex); 965 mutex_init(&ubi->buf_mutex);
878 mutex_init(&ubi->ckvol_mutex); 966 mutex_init(&ubi->ckvol_mutex);
879 mutex_init(&ubi->device_mutex); 967 mutex_init(&ubi->device_mutex);
880 spin_lock_init(&ubi->volumes_lock); 968 spin_lock_init(&ubi->volumes_lock);
969 mutex_init(&ubi->fm_mutex);
970 init_rwsem(&ubi->fm_sem);
881 971
882 ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); 972 ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
883 dbg_msg("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb));
884 dbg_msg("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry));
885 973
886 err = io_init(ubi); 974 err = io_init(ubi, max_beb_per1024);
887 if (err) 975 if (err)
888 goto out_free; 976 goto out_free;
889 977
@@ -892,11 +980,17 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
892 if (!ubi->peb_buf) 980 if (!ubi->peb_buf)
893 goto out_free; 981 goto out_free;
894 982
983#ifdef CONFIG_MTD_UBI_FASTMAP
984 ubi->fm_size = ubi_calc_fm_size(ubi);
985 ubi->fm_buf = vzalloc(ubi->fm_size);
986 if (!ubi->fm_buf)
987 goto out_free;
988#endif
895 err = ubi_debugging_init_dev(ubi); 989 err = ubi_debugging_init_dev(ubi);
896 if (err) 990 if (err)
897 goto out_free; 991 goto out_free;
898 992
899 err = ubi_attach(ubi); 993 err = ubi_attach(ubi, 0);
900 if (err) { 994 if (err) {
901 ubi_err("failed to attach mtd%d, error %d", mtd->index, err); 995 ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
902 goto out_debugging; 996 goto out_debugging;
@@ -924,23 +1018,24 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
924 goto out_debugfs; 1018 goto out_debugfs;
925 } 1019 }
926 1020
927 ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num); 1021 ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d",
928 ubi_msg("MTD device name: \"%s\"", mtd->name); 1022 mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num);
929 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); 1023 ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes",
930 ubi_msg("number of good PEBs: %d", ubi->good_peb_count); 1024 ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
931 ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count); 1025 ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d",
932 ubi_msg("number of corrupted PEBs: %d", ubi->corr_peb_count); 1026 ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
933 ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots); 1027 ubi_msg("VID header offset: %d (aligned %d), data offset: %d",
934 ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD); 1028 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
935 ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT); 1029 ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
936 ubi_msg("number of user volumes: %d", 1030 ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
937 ubi->vol_count - UBI_INT_VOL_COUNT); 1031 ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d",
938 ubi_msg("available PEBs: %d", ubi->avail_pebs); 1032 ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
939 ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs); 1033 ubi->vtbl_slots);
940 ubi_msg("number of PEBs reserved for bad PEB handling: %d", 1034 ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
941 ubi->beb_rsvd_pebs); 1035 ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
942 ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); 1036 ubi->image_seq);
943 ubi_msg("image sequence number: %d", ubi->image_seq); 1037 ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
1038 ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
944 1039
945 /* 1040 /*
946 * The below lock makes sure we do not race with 'ubi_thread()' which 1041 * The below lock makes sure we do not race with 'ubi_thread()' which
@@ -969,6 +1064,7 @@ out_debugging:
969 ubi_debugging_exit_dev(ubi); 1064 ubi_debugging_exit_dev(ubi);
970out_free: 1065out_free:
971 vfree(ubi->peb_buf); 1066 vfree(ubi->peb_buf);
1067 vfree(ubi->fm_buf);
972 if (ref) 1068 if (ref)
973 put_device(&ubi->dev); 1069 put_device(&ubi->dev);
974 else 1070 else
@@ -1017,8 +1113,12 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
1017 1113
1018 ubi_assert(ubi_num == ubi->ubi_num); 1114 ubi_assert(ubi_num == ubi->ubi_num);
1019 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL); 1115 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
1020 dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); 1116 ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
1021 1117#ifdef CONFIG_MTD_UBI_FASTMAP
1118 /* If we don't write a new fastmap at detach time we lose all
1119 * EC updates that have been made since the last written fastmap. */
1120 ubi_update_fastmap(ubi);
1121#endif
1022 /* 1122 /*
1023 * Before freeing anything, we have to stop the background thread to 1123 * Before freeing anything, we have to stop the background thread to
1024 * prevent it from doing anything on this device while we are freeing. 1124 * prevent it from doing anything on this device while we are freeing.
@@ -1034,12 +1134,14 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
1034 1134
1035 ubi_debugfs_exit_dev(ubi); 1135 ubi_debugfs_exit_dev(ubi);
1036 uif_close(ubi); 1136 uif_close(ubi);
1137
1037 ubi_wl_close(ubi); 1138 ubi_wl_close(ubi);
1038 ubi_free_internal_volumes(ubi); 1139 ubi_free_internal_volumes(ubi);
1039 vfree(ubi->vtbl); 1140 vfree(ubi->vtbl);
1040 put_mtd_device(ubi->mtd); 1141 put_mtd_device(ubi->mtd);
1041 ubi_debugging_exit_dev(ubi); 1142 ubi_debugging_exit_dev(ubi);
1042 vfree(ubi->peb_buf); 1143 vfree(ubi->peb_buf);
1144 vfree(ubi->fm_buf);
1043 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); 1145 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
1044 put_device(&ubi->dev); 1146 put_device(&ubi->dev);
1045 return 0; 1147 return 0;
@@ -1172,7 +1274,7 @@ static int __init ubi_init(void)
1172 1274
1173 mutex_lock(&ubi_devices_mutex); 1275 mutex_lock(&ubi_devices_mutex);
1174 err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 1276 err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO,
1175 p->vid_hdr_offs); 1277 p->vid_hdr_offs, p->max_beb_per1024);
1176 mutex_unlock(&ubi_devices_mutex); 1278 mutex_unlock(&ubi_devices_mutex);
1177 if (err < 0) { 1279 if (err < 0) {
1178 ubi_err("cannot attach mtd%d", mtd->index); 1280 ubi_err("cannot attach mtd%d", mtd->index);
@@ -1218,7 +1320,7 @@ out:
1218 ubi_err("UBI error: cannot initialize UBI, error %d", err); 1320 ubi_err("UBI error: cannot initialize UBI, error %d", err);
1219 return err; 1321 return err;
1220} 1322}
1221module_init(ubi_init); 1323late_initcall(ubi_init);
1222 1324
1223static void __exit ubi_exit(void) 1325static void __exit ubi_exit(void)
1224{ 1326{
@@ -1252,8 +1354,7 @@ static int __init bytes_str_to_int(const char *str)
1252 1354
1253 result = simple_strtoul(str, &endp, 0); 1355 result = simple_strtoul(str, &endp, 0);
1254 if (str == endp || result >= INT_MAX) { 1356 if (str == endp || result >= INT_MAX) {
1255 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", 1357 ubi_err("UBI error: incorrect bytes count: \"%s\"\n", str);
1256 str);
1257 return -EINVAL; 1358 return -EINVAL;
1258 } 1359 }
1259 1360
@@ -1269,8 +1370,7 @@ static int __init bytes_str_to_int(const char *str)
1269 case '\0': 1370 case '\0':
1270 break; 1371 break;
1271 default: 1372 default:
1272 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", 1373 ubi_err("UBI error: incorrect bytes count: \"%s\"\n", str);
1273 str);
1274 return -EINVAL; 1374 return -EINVAL;
1275 } 1375 }
1276 1376
@@ -1291,27 +1391,26 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
1291 struct mtd_dev_param *p; 1391 struct mtd_dev_param *p;
1292 char buf[MTD_PARAM_LEN_MAX]; 1392 char buf[MTD_PARAM_LEN_MAX];
1293 char *pbuf = &buf[0]; 1393 char *pbuf = &buf[0];
1294 char *tokens[2] = {NULL, NULL}; 1394 char *tokens[MTD_PARAM_MAX_COUNT];
1295 1395
1296 if (!val) 1396 if (!val)
1297 return -EINVAL; 1397 return -EINVAL;
1298 1398
1299 if (mtd_devs == UBI_MAX_DEVICES) { 1399 if (mtd_devs == UBI_MAX_DEVICES) {
1300 printk(KERN_ERR "UBI error: too many parameters, max. is %d\n", 1400 ubi_err("UBI error: too many parameters, max. is %d\n",
1301 UBI_MAX_DEVICES); 1401 UBI_MAX_DEVICES);
1302 return -EINVAL; 1402 return -EINVAL;
1303 } 1403 }
1304 1404
1305 len = strnlen(val, MTD_PARAM_LEN_MAX); 1405 len = strnlen(val, MTD_PARAM_LEN_MAX);
1306 if (len == MTD_PARAM_LEN_MAX) { 1406 if (len == MTD_PARAM_LEN_MAX) {
1307 printk(KERN_ERR "UBI error: parameter \"%s\" is too long, " 1407 ubi_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
1308 "max. is %d\n", val, MTD_PARAM_LEN_MAX); 1408 val, MTD_PARAM_LEN_MAX);
1309 return -EINVAL; 1409 return -EINVAL;
1310 } 1410 }
1311 1411
1312 if (len == 0) { 1412 if (len == 0) {
1313 printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - " 1413 pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n");
1314 "ignored\n");
1315 return 0; 1414 return 0;
1316 } 1415 }
1317 1416
@@ -1321,12 +1420,11 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
1321 if (buf[len - 1] == '\n') 1420 if (buf[len - 1] == '\n')
1322 buf[len - 1] = '\0'; 1421 buf[len - 1] = '\0';
1323 1422
1324 for (i = 0; i < 2; i++) 1423 for (i = 0; i < MTD_PARAM_MAX_COUNT; i++)
1325 tokens[i] = strsep(&pbuf, ","); 1424 tokens[i] = strsep(&pbuf, ",");
1326 1425
1327 if (pbuf) { 1426 if (pbuf) {
1328 printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n", 1427 ubi_err("UBI error: too many arguments at \"%s\"\n", val);
1329 val);
1330 return -EINVAL; 1428 return -EINVAL;
1331 } 1429 }
1332 1430
@@ -1339,24 +1437,36 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
1339 if (p->vid_hdr_offs < 0) 1437 if (p->vid_hdr_offs < 0)
1340 return p->vid_hdr_offs; 1438 return p->vid_hdr_offs;
1341 1439
1440 if (tokens[2]) {
1441 int err = kstrtoint(tokens[2], 10, &p->max_beb_per1024);
1442
1443 if (err) {
1444 ubi_err("UBI error: bad value for max_beb_per1024 parameter: %s",
1445 tokens[2]);
1446 return -EINVAL;
1447 }
1448 }
1449
1342 mtd_devs += 1; 1450 mtd_devs += 1;
1343 return 0; 1451 return 0;
1344} 1452}
1345 1453
1346module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); 1454module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
1347MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: " 1455MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|path>[,<vid_hdr_offs>[,max_beb_per1024]].\n"
1348 "mtd=<name|num|path>[,<vid_hdr_offs>].\n"
1349 "Multiple \"mtd\" parameters may be specified.\n" 1456 "Multiple \"mtd\" parameters may be specified.\n"
1350 "MTD devices may be specified by their number, name, or " 1457 "MTD devices may be specified by their number, name, or path to the MTD character device node.\n"
1351 "path to the MTD character device node.\n" 1458 "Optional \"vid_hdr_offs\" parameter specifies UBI VID header position to be used by UBI. (default value if 0)\n"
1352 "Optional \"vid_hdr_offs\" parameter specifies UBI VID " 1459 "Optional \"max_beb_per1024\" parameter specifies the maximum expected bad eraseblock per 1024 eraseblocks. (default value ("
1353 "header position to be used by UBI.\n" 1460 __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
1354 "Example 1: mtd=/dev/mtd0 - attach MTD device " 1461 "\n"
1355 "/dev/mtd0.\n" 1462 "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
1356 "Example 2: mtd=content,1984 mtd=4 - attach MTD device " 1463 "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
1357 "with name \"content\" using VID header offset 1984, and " 1464 "Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
1358 "MTD device number 4 with default VID header offset."); 1465 "\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
1359 1466#ifdef CONFIG_MTD_UBI_FASTMAP
1467module_param(fm_autoconvert, bool, 0644);
1468MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
1469#endif
1360MODULE_VERSION(__stringify(UBI_VERSION)); 1470MODULE_VERSION(__stringify(UBI_VERSION));
1361MODULE_DESCRIPTION("UBI - Unsorted Block Images"); 1471MODULE_DESCRIPTION("UBI - Unsorted Block Images");
1362MODULE_AUTHOR("Artem Bityutskiy"); 1472MODULE_AUTHOR("Artem Bityutskiy");
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index fb5567878181..dfcc65b33e99 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -140,9 +140,9 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
140 vol->updating = 0; 140 vol->updating = 0;
141 vfree(vol->upd_buf); 141 vfree(vol->upd_buf);
142 } else if (vol->changing_leb) { 142 } else if (vol->changing_leb) {
143 dbg_gen("only %lld of %lld bytes received for atomic LEB change" 143 dbg_gen("only %lld of %lld bytes received for atomic LEB change for volume %d:%d, cancel",
144 " for volume %d:%d, cancel", vol->upd_received, 144 vol->upd_received, vol->upd_bytes, vol->ubi->ubi_num,
145 vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id); 145 vol->vol_id);
146 vol->changing_leb = 0; 146 vol->changing_leb = 0;
147 vfree(vol->upd_buf); 147 vfree(vol->upd_buf);
148 } 148 }
@@ -189,7 +189,8 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
189 return new_offset; 189 return new_offset;
190} 190}
191 191
192static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end, int datasync) 192static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end,
193 int datasync)
193{ 194{
194 struct ubi_volume_desc *desc = file->private_data; 195 struct ubi_volume_desc *desc = file->private_data;
195 struct ubi_device *ubi = desc->vol->ubi; 196 struct ubi_device *ubi = desc->vol->ubi;
@@ -753,7 +754,7 @@ static int rename_volumes(struct ubi_device *ubi,
753 re->new_name_len = name_len; 754 re->new_name_len = name_len;
754 memcpy(re->new_name, name, name_len); 755 memcpy(re->new_name, name, name_len);
755 list_add_tail(&re->list, &rename_list); 756 list_add_tail(&re->list, &rename_list);
756 dbg_msg("will rename volume %d from \"%s\" to \"%s\"", 757 dbg_gen("will rename volume %d from \"%s\" to \"%s\"",
757 vol_id, re->desc->vol->name, name); 758 vol_id, re->desc->vol->name, name);
758 } 759 }
759 760
@@ -811,7 +812,7 @@ static int rename_volumes(struct ubi_device *ubi,
811 re1->remove = 1; 812 re1->remove = 1;
812 re1->desc = desc; 813 re1->desc = desc;
813 list_add(&re1->list, &rename_list); 814 list_add(&re1->list, &rename_list);
814 dbg_msg("will remove volume %d, name \"%s\"", 815 dbg_gen("will remove volume %d, name \"%s\"",
815 re1->desc->vol->vol_id, re1->desc->vol->name); 816 re1->desc->vol->vol_id, re1->desc->vol->name);
816 } 817 }
817 818
@@ -942,7 +943,7 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
942 { 943 {
943 struct ubi_rnvol_req *req; 944 struct ubi_rnvol_req *req;
944 945
945 dbg_msg("re-name volumes"); 946 dbg_gen("re-name volumes");
946 req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL); 947 req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL);
947 if (!req) { 948 if (!req) {
948 err = -ENOMEM; 949 err = -ENOMEM;
@@ -1010,7 +1011,8 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
1010 * 'ubi_attach_mtd_dev()'. 1011 * 'ubi_attach_mtd_dev()'.
1011 */ 1012 */
1012 mutex_lock(&ubi_devices_mutex); 1013 mutex_lock(&ubi_devices_mutex);
1013 err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset); 1014 err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset,
1015 req.max_beb_per1024);
1014 mutex_unlock(&ubi_devices_mutex); 1016 mutex_unlock(&ubi_devices_mutex);
1015 if (err < 0) 1017 if (err < 0)
1016 put_mtd_device(mtd); 1018 put_mtd_device(mtd);
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 7c1380305219..26908a59506b 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -43,8 +43,8 @@ void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
43 return; 43 return;
44 err = mtd_read(ubi->mtd, addr, len, &read, buf); 44 err = mtd_read(ubi->mtd, addr, len, &read, buf);
45 if (err && err != -EUCLEAN) { 45 if (err && err != -EUCLEAN) {
46 ubi_err("error %d while reading %d bytes from PEB %d:%d, " 46 ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
47 "read %zd bytes", err, len, pnum, offset, read); 47 err, len, pnum, offset, read);
48 goto out; 48 goto out;
49 } 49 }
50 50
@@ -62,21 +62,15 @@ out:
62 */ 62 */
63void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) 63void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
64{ 64{
65 printk(KERN_DEBUG "Erase counter header dump:\n"); 65 pr_err("Erase counter header dump:\n");
66 printk(KERN_DEBUG "\tmagic %#08x\n", 66 pr_err("\tmagic %#08x\n", be32_to_cpu(ec_hdr->magic));
67 be32_to_cpu(ec_hdr->magic)); 67 pr_err("\tversion %d\n", (int)ec_hdr->version);
68 printk(KERN_DEBUG "\tversion %d\n", (int)ec_hdr->version); 68 pr_err("\tec %llu\n", (long long)be64_to_cpu(ec_hdr->ec));
69 printk(KERN_DEBUG "\tec %llu\n", 69 pr_err("\tvid_hdr_offset %d\n", be32_to_cpu(ec_hdr->vid_hdr_offset));
70 (long long)be64_to_cpu(ec_hdr->ec)); 70 pr_err("\tdata_offset %d\n", be32_to_cpu(ec_hdr->data_offset));
71 printk(KERN_DEBUG "\tvid_hdr_offset %d\n", 71 pr_err("\timage_seq %d\n", be32_to_cpu(ec_hdr->image_seq));
72 be32_to_cpu(ec_hdr->vid_hdr_offset)); 72 pr_err("\thdr_crc %#08x\n", be32_to_cpu(ec_hdr->hdr_crc));
73 printk(KERN_DEBUG "\tdata_offset %d\n", 73 pr_err("erase counter header hexdump:\n");
74 be32_to_cpu(ec_hdr->data_offset));
75 printk(KERN_DEBUG "\timage_seq %d\n",
76 be32_to_cpu(ec_hdr->image_seq));
77 printk(KERN_DEBUG "\thdr_crc %#08x\n",
78 be32_to_cpu(ec_hdr->hdr_crc));
79 printk(KERN_DEBUG "erase counter header hexdump:\n");
80 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, 74 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
81 ec_hdr, UBI_EC_HDR_SIZE, 1); 75 ec_hdr, UBI_EC_HDR_SIZE, 1);
82} 76}
@@ -87,21 +81,21 @@ void ubi_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
87 */ 81 */
88void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr) 82void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
89{ 83{
90 printk(KERN_DEBUG "Volume identifier header dump:\n"); 84 pr_err("Volume identifier header dump:\n");
91 printk(KERN_DEBUG "\tmagic %08x\n", be32_to_cpu(vid_hdr->magic)); 85 pr_err("\tmagic %08x\n", be32_to_cpu(vid_hdr->magic));
92 printk(KERN_DEBUG "\tversion %d\n", (int)vid_hdr->version); 86 pr_err("\tversion %d\n", (int)vid_hdr->version);
93 printk(KERN_DEBUG "\tvol_type %d\n", (int)vid_hdr->vol_type); 87 pr_err("\tvol_type %d\n", (int)vid_hdr->vol_type);
94 printk(KERN_DEBUG "\tcopy_flag %d\n", (int)vid_hdr->copy_flag); 88 pr_err("\tcopy_flag %d\n", (int)vid_hdr->copy_flag);
95 printk(KERN_DEBUG "\tcompat %d\n", (int)vid_hdr->compat); 89 pr_err("\tcompat %d\n", (int)vid_hdr->compat);
96 printk(KERN_DEBUG "\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id)); 90 pr_err("\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id));
97 printk(KERN_DEBUG "\tlnum %d\n", be32_to_cpu(vid_hdr->lnum)); 91 pr_err("\tlnum %d\n", be32_to_cpu(vid_hdr->lnum));
98 printk(KERN_DEBUG "\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size)); 92 pr_err("\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size));
99 printk(KERN_DEBUG "\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs)); 93 pr_err("\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs));
100 printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad)); 94 pr_err("\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad));
101 printk(KERN_DEBUG "\tsqnum %llu\n", 95 pr_err("\tsqnum %llu\n",
102 (unsigned long long)be64_to_cpu(vid_hdr->sqnum)); 96 (unsigned long long)be64_to_cpu(vid_hdr->sqnum));
103 printk(KERN_DEBUG "\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc)); 97 pr_err("\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc));
104 printk(KERN_DEBUG "Volume identifier header hexdump:\n"); 98 pr_err("Volume identifier header hexdump:\n");
105 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, 99 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
106 vid_hdr, UBI_VID_HDR_SIZE, 1); 100 vid_hdr, UBI_VID_HDR_SIZE, 1);
107} 101}
@@ -112,25 +106,25 @@ void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr)
112 */ 106 */
113void ubi_dump_vol_info(const struct ubi_volume *vol) 107void ubi_dump_vol_info(const struct ubi_volume *vol)
114{ 108{
115 printk(KERN_DEBUG "Volume information dump:\n"); 109 pr_err("Volume information dump:\n");
116 printk(KERN_DEBUG "\tvol_id %d\n", vol->vol_id); 110 pr_err("\tvol_id %d\n", vol->vol_id);
117 printk(KERN_DEBUG "\treserved_pebs %d\n", vol->reserved_pebs); 111 pr_err("\treserved_pebs %d\n", vol->reserved_pebs);
118 printk(KERN_DEBUG "\talignment %d\n", vol->alignment); 112 pr_err("\talignment %d\n", vol->alignment);
119 printk(KERN_DEBUG "\tdata_pad %d\n", vol->data_pad); 113 pr_err("\tdata_pad %d\n", vol->data_pad);
120 printk(KERN_DEBUG "\tvol_type %d\n", vol->vol_type); 114 pr_err("\tvol_type %d\n", vol->vol_type);
121 printk(KERN_DEBUG "\tname_len %d\n", vol->name_len); 115 pr_err("\tname_len %d\n", vol->name_len);
122 printk(KERN_DEBUG "\tusable_leb_size %d\n", vol->usable_leb_size); 116 pr_err("\tusable_leb_size %d\n", vol->usable_leb_size);
123 printk(KERN_DEBUG "\tused_ebs %d\n", vol->used_ebs); 117 pr_err("\tused_ebs %d\n", vol->used_ebs);
124 printk(KERN_DEBUG "\tused_bytes %lld\n", vol->used_bytes); 118 pr_err("\tused_bytes %lld\n", vol->used_bytes);
125 printk(KERN_DEBUG "\tlast_eb_bytes %d\n", vol->last_eb_bytes); 119 pr_err("\tlast_eb_bytes %d\n", vol->last_eb_bytes);
126 printk(KERN_DEBUG "\tcorrupted %d\n", vol->corrupted); 120 pr_err("\tcorrupted %d\n", vol->corrupted);
127 printk(KERN_DEBUG "\tupd_marker %d\n", vol->upd_marker); 121 pr_err("\tupd_marker %d\n", vol->upd_marker);
128 122
129 if (vol->name_len <= UBI_VOL_NAME_MAX && 123 if (vol->name_len <= UBI_VOL_NAME_MAX &&
130 strnlen(vol->name, vol->name_len + 1) == vol->name_len) { 124 strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
131 printk(KERN_DEBUG "\tname %s\n", vol->name); 125 pr_err("\tname %s\n", vol->name);
132 } else { 126 } else {
133 printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n", 127 pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
134 vol->name[0], vol->name[1], vol->name[2], 128 vol->name[0], vol->name[1], vol->name[2],
135 vol->name[3], vol->name[4]); 129 vol->name[3], vol->name[4]);
136 } 130 }
@@ -145,29 +139,28 @@ void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
145{ 139{
146 int name_len = be16_to_cpu(r->name_len); 140 int name_len = be16_to_cpu(r->name_len);
147 141
148 printk(KERN_DEBUG "Volume table record %d dump:\n", idx); 142 pr_err("Volume table record %d dump:\n", idx);
149 printk(KERN_DEBUG "\treserved_pebs %d\n", 143 pr_err("\treserved_pebs %d\n", be32_to_cpu(r->reserved_pebs));
150 be32_to_cpu(r->reserved_pebs)); 144 pr_err("\talignment %d\n", be32_to_cpu(r->alignment));
151 printk(KERN_DEBUG "\talignment %d\n", be32_to_cpu(r->alignment)); 145 pr_err("\tdata_pad %d\n", be32_to_cpu(r->data_pad));
152 printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(r->data_pad)); 146 pr_err("\tvol_type %d\n", (int)r->vol_type);
153 printk(KERN_DEBUG "\tvol_type %d\n", (int)r->vol_type); 147 pr_err("\tupd_marker %d\n", (int)r->upd_marker);
154 printk(KERN_DEBUG "\tupd_marker %d\n", (int)r->upd_marker); 148 pr_err("\tname_len %d\n", name_len);
155 printk(KERN_DEBUG "\tname_len %d\n", name_len);
156 149
157 if (r->name[0] == '\0') { 150 if (r->name[0] == '\0') {
158 printk(KERN_DEBUG "\tname NULL\n"); 151 pr_err("\tname NULL\n");
159 return; 152 return;
160 } 153 }
161 154
162 if (name_len <= UBI_VOL_NAME_MAX && 155 if (name_len <= UBI_VOL_NAME_MAX &&
163 strnlen(&r->name[0], name_len + 1) == name_len) { 156 strnlen(&r->name[0], name_len + 1) == name_len) {
164 printk(KERN_DEBUG "\tname %s\n", &r->name[0]); 157 pr_err("\tname %s\n", &r->name[0]);
165 } else { 158 } else {
166 printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n", 159 pr_err("\t1st 5 characters of name: %c%c%c%c%c\n",
167 r->name[0], r->name[1], r->name[2], r->name[3], 160 r->name[0], r->name[1], r->name[2], r->name[3],
168 r->name[4]); 161 r->name[4]);
169 } 162 }
170 printk(KERN_DEBUG "\tcrc %#08x\n", be32_to_cpu(r->crc)); 163 pr_err("\tcrc %#08x\n", be32_to_cpu(r->crc));
171} 164}
172 165
173/** 166/**
@@ -176,15 +169,15 @@ void ubi_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx)
176 */ 169 */
177void ubi_dump_av(const struct ubi_ainf_volume *av) 170void ubi_dump_av(const struct ubi_ainf_volume *av)
178{ 171{
179 printk(KERN_DEBUG "Volume attaching information dump:\n"); 172 pr_err("Volume attaching information dump:\n");
180 printk(KERN_DEBUG "\tvol_id %d\n", av->vol_id); 173 pr_err("\tvol_id %d\n", av->vol_id);
181 printk(KERN_DEBUG "\thighest_lnum %d\n", av->highest_lnum); 174 pr_err("\thighest_lnum %d\n", av->highest_lnum);
182 printk(KERN_DEBUG "\tleb_count %d\n", av->leb_count); 175 pr_err("\tleb_count %d\n", av->leb_count);
183 printk(KERN_DEBUG "\tcompat %d\n", av->compat); 176 pr_err("\tcompat %d\n", av->compat);
184 printk(KERN_DEBUG "\tvol_type %d\n", av->vol_type); 177 pr_err("\tvol_type %d\n", av->vol_type);
185 printk(KERN_DEBUG "\tused_ebs %d\n", av->used_ebs); 178 pr_err("\tused_ebs %d\n", av->used_ebs);
186 printk(KERN_DEBUG "\tlast_data_size %d\n", av->last_data_size); 179 pr_err("\tlast_data_size %d\n", av->last_data_size);
187 printk(KERN_DEBUG "\tdata_pad %d\n", av->data_pad); 180 pr_err("\tdata_pad %d\n", av->data_pad);
188} 181}
189 182
190/** 183/**
@@ -194,13 +187,13 @@ void ubi_dump_av(const struct ubi_ainf_volume *av)
194 */ 187 */
195void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type) 188void ubi_dump_aeb(const struct ubi_ainf_peb *aeb, int type)
196{ 189{
197 printk(KERN_DEBUG "eraseblock attaching information dump:\n"); 190 pr_err("eraseblock attaching information dump:\n");
198 printk(KERN_DEBUG "\tec %d\n", aeb->ec); 191 pr_err("\tec %d\n", aeb->ec);
199 printk(KERN_DEBUG "\tpnum %d\n", aeb->pnum); 192 pr_err("\tpnum %d\n", aeb->pnum);
200 if (type == 0) { 193 if (type == 0) {
201 printk(KERN_DEBUG "\tlnum %d\n", aeb->lnum); 194 pr_err("\tlnum %d\n", aeb->lnum);
202 printk(KERN_DEBUG "\tscrub %d\n", aeb->scrub); 195 pr_err("\tscrub %d\n", aeb->scrub);
203 printk(KERN_DEBUG "\tsqnum %llu\n", aeb->sqnum); 196 pr_err("\tsqnum %llu\n", aeb->sqnum);
204 } 197 }
205} 198}
206 199
@@ -212,16 +205,16 @@ void ubi_dump_mkvol_req(const struct ubi_mkvol_req *req)
212{ 205{
213 char nm[17]; 206 char nm[17];
214 207
215 printk(KERN_DEBUG "Volume creation request dump:\n"); 208 pr_err("Volume creation request dump:\n");
216 printk(KERN_DEBUG "\tvol_id %d\n", req->vol_id); 209 pr_err("\tvol_id %d\n", req->vol_id);
217 printk(KERN_DEBUG "\talignment %d\n", req->alignment); 210 pr_err("\talignment %d\n", req->alignment);
218 printk(KERN_DEBUG "\tbytes %lld\n", (long long)req->bytes); 211 pr_err("\tbytes %lld\n", (long long)req->bytes);
219 printk(KERN_DEBUG "\tvol_type %d\n", req->vol_type); 212 pr_err("\tvol_type %d\n", req->vol_type);
220 printk(KERN_DEBUG "\tname_len %d\n", req->name_len); 213 pr_err("\tname_len %d\n", req->name_len);
221 214
222 memcpy(nm, req->name, 16); 215 memcpy(nm, req->name, 16);
223 nm[16] = 0; 216 nm[16] = 0;
224 printk(KERN_DEBUG "\t1st 16 characters of name: %s\n", nm); 217 pr_err("\t1st 16 characters of name: %s\n", nm);
225} 218}
226 219
227/** 220/**
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index d5d2645b51a7..3dbc877d9663 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -29,22 +29,18 @@ void ubi_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr);
29 29
30#define ubi_assert(expr) do { \ 30#define ubi_assert(expr) do { \
31 if (unlikely(!(expr))) { \ 31 if (unlikely(!(expr))) { \
32 printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \ 32 pr_crit("UBI assert failed in %s at %u (pid %d)\n", \
33 __func__, __LINE__, current->pid); \ 33 __func__, __LINE__, current->pid); \
34 dump_stack(); \ 34 dump_stack(); \
35 } \ 35 } \
36} while (0) 36} while (0)
37 37
38#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \ 38#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \
39 print_hex_dump(l, ps, pt, r, g, b, len, a) 39 print_hex_dump(l, ps, pt, r, g, b, len, a)
40 40
41#define ubi_dbg_msg(type, fmt, ...) \ 41#define ubi_dbg_msg(type, fmt, ...) \
42 pr_debug("UBI DBG " type ": " fmt "\n", ##__VA_ARGS__) 42 pr_debug("UBI DBG " type " (pid %d): " fmt "\n", current->pid, \
43 43 ##__VA_ARGS__)
44/* Just a debugging messages not related to any specific UBI subsystem */
45#define dbg_msg(fmt, ...) \
46 printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
47 current->pid, __func__, ##__VA_ARGS__)
48 44
49/* General debugging messages */ 45/* General debugging messages */
50#define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__) 46#define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index b703ac7729cf..0e11671dadc4 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -57,7 +57,7 @@
57 * global sequence counter value. It also increases the global sequence 57 * global sequence counter value. It also increases the global sequence
58 * counter. 58 * counter.
59 */ 59 */
60static unsigned long long next_sqnum(struct ubi_device *ubi) 60unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
61{ 61{
62 unsigned long long sqnum; 62 unsigned long long sqnum;
63 63
@@ -340,7 +340,9 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
340 340
341 dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum); 341 dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
342 342
343 down_read(&ubi->fm_sem);
343 vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED; 344 vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
345 up_read(&ubi->fm_sem);
344 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0); 346 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
345 347
346out_unlock: 348out_unlock:
@@ -420,9 +422,8 @@ retry:
420 */ 422 */
421 if (err == UBI_IO_BAD_HDR_EBADMSG || 423 if (err == UBI_IO_BAD_HDR_EBADMSG ||
422 err == UBI_IO_BAD_HDR) { 424 err == UBI_IO_BAD_HDR) {
423 ubi_warn("corrupted VID header at PEB " 425 ubi_warn("corrupted VID header at PEB %d, LEB %d:%d",
424 "%d, LEB %d:%d", pnum, vol_id, 426 pnum, vol_id, lnum);
425 lnum);
426 err = -EBADMSG; 427 err = -EBADMSG;
427 } else 428 } else
428 ubi_ro_mode(ubi); 429 ubi_ro_mode(ubi);
@@ -522,7 +523,7 @@ retry:
522 goto out_put; 523 goto out_put;
523 } 524 }
524 525
525 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 526 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
526 err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); 527 err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
527 if (err) 528 if (err)
528 goto write_error; 529 goto write_error;
@@ -549,7 +550,9 @@ retry:
549 mutex_unlock(&ubi->buf_mutex); 550 mutex_unlock(&ubi->buf_mutex);
550 ubi_free_vid_hdr(ubi, vid_hdr); 551 ubi_free_vid_hdr(ubi, vid_hdr);
551 552
553 down_read(&ubi->fm_sem);
552 vol->eba_tbl[lnum] = new_pnum; 554 vol->eba_tbl[lnum] = new_pnum;
555 up_read(&ubi->fm_sem);
553 ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); 556 ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
554 557
555 ubi_msg("data was successfully recovered"); 558 ubi_msg("data was successfully recovered");
@@ -633,7 +636,7 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
633 } 636 }
634 637
635 vid_hdr->vol_type = UBI_VID_DYNAMIC; 638 vid_hdr->vol_type = UBI_VID_DYNAMIC;
636 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 639 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
637 vid_hdr->vol_id = cpu_to_be32(vol_id); 640 vid_hdr->vol_id = cpu_to_be32(vol_id);
638 vid_hdr->lnum = cpu_to_be32(lnum); 641 vid_hdr->lnum = cpu_to_be32(lnum);
639 vid_hdr->compat = ubi_get_compat(ubi, vol_id); 642 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -660,14 +663,15 @@ retry:
660 if (len) { 663 if (len) {
661 err = ubi_io_write_data(ubi, buf, pnum, offset, len); 664 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
662 if (err) { 665 if (err) {
663 ubi_warn("failed to write %d bytes at offset %d of " 666 ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
664 "LEB %d:%d, PEB %d", len, offset, vol_id, 667 len, offset, vol_id, lnum, pnum);
665 lnum, pnum);
666 goto write_error; 668 goto write_error;
667 } 669 }
668 } 670 }
669 671
672 down_read(&ubi->fm_sem);
670 vol->eba_tbl[lnum] = pnum; 673 vol->eba_tbl[lnum] = pnum;
674 up_read(&ubi->fm_sem);
671 675
672 leb_write_unlock(ubi, vol_id, lnum); 676 leb_write_unlock(ubi, vol_id, lnum);
673 ubi_free_vid_hdr(ubi, vid_hdr); 677 ubi_free_vid_hdr(ubi, vid_hdr);
@@ -694,7 +698,7 @@ write_error:
694 return err; 698 return err;
695 } 699 }
696 700
697 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 701 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
698 ubi_msg("try another PEB"); 702 ubi_msg("try another PEB");
699 goto retry; 703 goto retry;
700} 704}
@@ -747,7 +751,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
747 return err; 751 return err;
748 } 752 }
749 753
750 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 754 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
751 vid_hdr->vol_id = cpu_to_be32(vol_id); 755 vid_hdr->vol_id = cpu_to_be32(vol_id);
752 vid_hdr->lnum = cpu_to_be32(lnum); 756 vid_hdr->lnum = cpu_to_be32(lnum);
753 vid_hdr->compat = ubi_get_compat(ubi, vol_id); 757 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -785,7 +789,9 @@ retry:
785 } 789 }
786 790
787 ubi_assert(vol->eba_tbl[lnum] < 0); 791 ubi_assert(vol->eba_tbl[lnum] < 0);
792 down_read(&ubi->fm_sem);
788 vol->eba_tbl[lnum] = pnum; 793 vol->eba_tbl[lnum] = pnum;
794 up_read(&ubi->fm_sem);
789 795
790 leb_write_unlock(ubi, vol_id, lnum); 796 leb_write_unlock(ubi, vol_id, lnum);
791 ubi_free_vid_hdr(ubi, vid_hdr); 797 ubi_free_vid_hdr(ubi, vid_hdr);
@@ -812,7 +818,7 @@ write_error:
812 return err; 818 return err;
813 } 819 }
814 820
815 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 821 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
816 ubi_msg("try another PEB"); 822 ubi_msg("try another PEB");
817 goto retry; 823 goto retry;
818} 824}
@@ -864,7 +870,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
864 if (err) 870 if (err)
865 goto out_mutex; 871 goto out_mutex;
866 872
867 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 873 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
868 vid_hdr->vol_id = cpu_to_be32(vol_id); 874 vid_hdr->vol_id = cpu_to_be32(vol_id);
869 vid_hdr->lnum = cpu_to_be32(lnum); 875 vid_hdr->lnum = cpu_to_be32(lnum);
870 vid_hdr->compat = ubi_get_compat(ubi, vol_id); 876 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
@@ -906,7 +912,9 @@ retry:
906 goto out_leb_unlock; 912 goto out_leb_unlock;
907 } 913 }
908 914
915 down_read(&ubi->fm_sem);
909 vol->eba_tbl[lnum] = pnum; 916 vol->eba_tbl[lnum] = pnum;
917 up_read(&ubi->fm_sem);
910 918
911out_leb_unlock: 919out_leb_unlock:
912 leb_write_unlock(ubi, vol_id, lnum); 920 leb_write_unlock(ubi, vol_id, lnum);
@@ -932,7 +940,7 @@ write_error:
932 goto out_leb_unlock; 940 goto out_leb_unlock;
933 } 941 }
934 942
935 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 943 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
936 ubi_msg("try another PEB"); 944 ubi_msg("try another PEB");
937 goto retry; 945 goto retry;
938} 946}
@@ -1040,9 +1048,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1040 * cancel it. 1048 * cancel it.
1041 */ 1049 */
1042 if (vol->eba_tbl[lnum] != from) { 1050 if (vol->eba_tbl[lnum] != from) {
1043 dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to " 1051 dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
1044 "PEB %d, cancel", vol_id, lnum, from, 1052 vol_id, lnum, from, vol->eba_tbl[lnum]);
1045 vol->eba_tbl[lnum]);
1046 err = MOVE_CANCEL_RACE; 1053 err = MOVE_CANCEL_RACE;
1047 goto out_unlock_leb; 1054 goto out_unlock_leb;
1048 } 1055 }
@@ -1092,7 +1099,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1092 vid_hdr->data_size = cpu_to_be32(data_size); 1099 vid_hdr->data_size = cpu_to_be32(data_size);
1093 vid_hdr->data_crc = cpu_to_be32(crc); 1100 vid_hdr->data_crc = cpu_to_be32(crc);
1094 } 1101 }
1095 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 1102 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1096 1103
1097 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); 1104 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
1098 if (err) { 1105 if (err) {
@@ -1107,8 +1114,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1107 err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); 1114 err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
1108 if (err) { 1115 if (err) {
1109 if (err != UBI_IO_BITFLIPS) { 1116 if (err != UBI_IO_BITFLIPS) {
1110 ubi_warn("error %d while reading VID header back from " 1117 ubi_warn("error %d while reading VID header back from PEB %d",
1111 "PEB %d", err, to); 1118 err, to);
1112 if (is_error_sane(err)) 1119 if (is_error_sane(err))
1113 err = MOVE_TARGET_RD_ERR; 1120 err = MOVE_TARGET_RD_ERR;
1114 } else 1121 } else
@@ -1134,8 +1141,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1134 err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size); 1141 err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
1135 if (err) { 1142 if (err) {
1136 if (err != UBI_IO_BITFLIPS) { 1143 if (err != UBI_IO_BITFLIPS) {
1137 ubi_warn("error %d while reading data back " 1144 ubi_warn("error %d while reading data back from PEB %d",
1138 "from PEB %d", err, to); 1145 err, to);
1139 if (is_error_sane(err)) 1146 if (is_error_sane(err))
1140 err = MOVE_TARGET_RD_ERR; 1147 err = MOVE_TARGET_RD_ERR;
1141 } else 1148 } else
@@ -1146,15 +1153,17 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1146 cond_resched(); 1153 cond_resched();
1147 1154
1148 if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) { 1155 if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
1149 ubi_warn("read data back from PEB %d and it is " 1156 ubi_warn("read data back from PEB %d and it is different",
1150 "different", to); 1157 to);
1151 err = -EINVAL; 1158 err = -EINVAL;
1152 goto out_unlock_buf; 1159 goto out_unlock_buf;
1153 } 1160 }
1154 } 1161 }
1155 1162
1156 ubi_assert(vol->eba_tbl[lnum] == from); 1163 ubi_assert(vol->eba_tbl[lnum] == from);
1164 down_read(&ubi->fm_sem);
1157 vol->eba_tbl[lnum] = to; 1165 vol->eba_tbl[lnum] = to;
1166 up_read(&ubi->fm_sem);
1158 1167
1159out_unlock_buf: 1168out_unlock_buf:
1160 mutex_unlock(&ubi->buf_mutex); 1169 mutex_unlock(&ubi->buf_mutex);
@@ -1197,11 +1206,107 @@ static void print_rsvd_warning(struct ubi_device *ubi,
1197 return; 1206 return;
1198 } 1207 }
1199 1208
1200 ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d," 1209 ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
1201 " need %d", ubi->beb_rsvd_pebs, ubi->beb_rsvd_level); 1210 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
1202 if (ubi->corr_peb_count) 1211 if (ubi->corr_peb_count)
1203 ubi_warn("%d PEBs are corrupted and not used", 1212 ubi_warn("%d PEBs are corrupted and not used",
1204 ubi->corr_peb_count); 1213 ubi->corr_peb_count);
1214}
1215
1216/**
1217 * self_check_eba - run a self check on the EBA table constructed by fastmap.
1218 * @ubi: UBI device description object
1219 * @ai_fastmap: UBI attach info object created by fastmap
1220 * @ai_scan: UBI attach info object created by scanning
1221 *
1222 * Returns < 0 in case of an internal error, 0 otherwise.
1223 * If a bad EBA table entry was found it will be printed out and
1224 * ubi_assert() triggers.
1225 */
1226int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
1227 struct ubi_attach_info *ai_scan)
1228{
1229 int i, j, num_volumes, ret = 0;
1230 int **scan_eba, **fm_eba;
1231 struct ubi_ainf_volume *av;
1232 struct ubi_volume *vol;
1233 struct ubi_ainf_peb *aeb;
1234 struct rb_node *rb;
1235
1236 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1237
1238 scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL);
1239 if (!scan_eba)
1240 return -ENOMEM;
1241
1242 fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL);
1243 if (!fm_eba) {
1244 kfree(scan_eba);
1245 return -ENOMEM;
1246 }
1247
1248 for (i = 0; i < num_volumes; i++) {
1249 vol = ubi->volumes[i];
1250 if (!vol)
1251 continue;
1252
1253 scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba),
1254 GFP_KERNEL);
1255 if (!scan_eba[i]) {
1256 ret = -ENOMEM;
1257 goto out_free;
1258 }
1259
1260 fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba),
1261 GFP_KERNEL);
1262 if (!fm_eba[i]) {
1263 ret = -ENOMEM;
1264 goto out_free;
1265 }
1266
1267 for (j = 0; j < vol->reserved_pebs; j++)
1268 scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
1269
1270 av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
1271 if (!av)
1272 continue;
1273
1274 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1275 scan_eba[i][aeb->lnum] = aeb->pnum;
1276
1277 av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
1278 if (!av)
1279 continue;
1280
1281 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1282 fm_eba[i][aeb->lnum] = aeb->pnum;
1283
1284 for (j = 0; j < vol->reserved_pebs; j++) {
1285 if (scan_eba[i][j] != fm_eba[i][j]) {
1286 if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
1287 fm_eba[i][j] == UBI_LEB_UNMAPPED)
1288 continue;
1289
1290 ubi_err("LEB:%i:%i is PEB:%i instead of %i!",
1291 vol->vol_id, i, fm_eba[i][j],
1292 scan_eba[i][j]);
1293 ubi_assert(0);
1294 }
1295 }
1296 }
1297
1298out_free:
1299 for (i = 0; i < num_volumes; i++) {
1300 if (!ubi->volumes[i])
1301 continue;
1302
1303 kfree(scan_eba[i]);
1304 kfree(fm_eba[i]);
1305 }
1306
1307 kfree(scan_eba);
1308 kfree(fm_eba);
1309 return ret;
1205} 1310}
1206 1311
1207/** 1312/**
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
new file mode 100644
index 000000000000..1a5f53c090d4
--- /dev/null
+++ b/drivers/mtd/ubi/fastmap.c
@@ -0,0 +1,1537 @@
1/*
2 * Copyright (c) 2012 Linutronix GmbH
3 * Author: Richard Weinberger <richard@nod.at>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 */
15
16#include <linux/crc32.h>
17#include "ubi.h"
18
19/**
20 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
21 * @ubi: UBI device description object
22 */
23size_t ubi_calc_fm_size(struct ubi_device *ubi)
24{
25 size_t size;
26
27 size = sizeof(struct ubi_fm_hdr) + \
28 sizeof(struct ubi_fm_scan_pool) + \
29 sizeof(struct ubi_fm_scan_pool) + \
30 (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
31 (sizeof(struct ubi_fm_eba) + \
32 (ubi->peb_count * sizeof(__be32))) + \
33 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
34 return roundup(size, ubi->leb_size);
35}
36
37
38/**
39 * new_fm_vhdr - allocate a new volume header for fastmap usage.
40 * @ubi: UBI device description object
41 * @vol_id: the VID of the new header
42 *
43 * Returns a new struct ubi_vid_hdr on success.
44 * NULL indicates out of memory.
45 */
46static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
47{
48 struct ubi_vid_hdr *new;
49
50 new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
51 if (!new)
52 goto out;
53
54 new->vol_type = UBI_VID_DYNAMIC;
55 new->vol_id = cpu_to_be32(vol_id);
56
57 /* UBI implementations without fastmap support have to delete the
58 * fastmap.
59 */
60 new->compat = UBI_COMPAT_DELETE;
61
62out:
63 return new;
64}
65
66/**
67 * add_aeb - create and add a attach erase block to a given list.
68 * @ai: UBI attach info object
69 * @list: the target list
70 * @pnum: PEB number of the new attach erase block
71 * @ec: erease counter of the new LEB
72 * @scrub: scrub this PEB after attaching
73 *
74 * Returns 0 on success, < 0 indicates an internal error.
75 */
76static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
77 int pnum, int ec, int scrub)
78{
79 struct ubi_ainf_peb *aeb;
80
81 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
82 if (!aeb)
83 return -ENOMEM;
84
85 aeb->pnum = pnum;
86 aeb->ec = ec;
87 aeb->lnum = -1;
88 aeb->scrub = scrub;
89 aeb->copy_flag = aeb->sqnum = 0;
90
91 ai->ec_sum += aeb->ec;
92 ai->ec_count++;
93
94 if (ai->max_ec < aeb->ec)
95 ai->max_ec = aeb->ec;
96
97 if (ai->min_ec > aeb->ec)
98 ai->min_ec = aeb->ec;
99
100 list_add_tail(&aeb->u.list, list);
101
102 return 0;
103}
104
105/**
106 * add_vol - create and add a new volume to ubi_attach_info.
107 * @ai: ubi_attach_info object
108 * @vol_id: VID of the new volume
109 * @used_ebs: number of used EBS
110 * @data_pad: data padding value of the new volume
111 * @vol_type: volume type
112 * @last_eb_bytes: number of bytes in the last LEB
113 *
114 * Returns the new struct ubi_ainf_volume on success.
115 * NULL indicates an error.
116 */
117static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
118 int used_ebs, int data_pad, u8 vol_type,
119 int last_eb_bytes)
120{
121 struct ubi_ainf_volume *av;
122 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
123
124 while (*p) {
125 parent = *p;
126 av = rb_entry(parent, struct ubi_ainf_volume, rb);
127
128 if (vol_id > av->vol_id)
129 p = &(*p)->rb_left;
130 else if (vol_id > av->vol_id)
131 p = &(*p)->rb_right;
132 }
133
134 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
135 if (!av)
136 goto out;
137
138 av->highest_lnum = av->leb_count = 0;
139 av->vol_id = vol_id;
140 av->used_ebs = used_ebs;
141 av->data_pad = data_pad;
142 av->last_data_size = last_eb_bytes;
143 av->compat = 0;
144 av->vol_type = vol_type;
145 av->root = RB_ROOT;
146
147 dbg_bld("found volume (ID %i)", vol_id);
148
149 rb_link_node(&av->rb, parent, p);
150 rb_insert_color(&av->rb, &ai->volumes);
151
152out:
153 return av;
154}
155
156/**
157 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
158 * from it's original list.
159 * @ai: ubi_attach_info object
160 * @aeb: the to be assigned SEB
161 * @av: target scan volume
162 */
163static void assign_aeb_to_av(struct ubi_attach_info *ai,
164 struct ubi_ainf_peb *aeb,
165 struct ubi_ainf_volume *av)
166{
167 struct ubi_ainf_peb *tmp_aeb;
168 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
169
170 p = &av->root.rb_node;
171 while (*p) {
172 parent = *p;
173
174 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
175 if (aeb->lnum != tmp_aeb->lnum) {
176 if (aeb->lnum < tmp_aeb->lnum)
177 p = &(*p)->rb_left;
178 else
179 p = &(*p)->rb_right;
180
181 continue;
182 } else
183 break;
184 }
185
186 list_del(&aeb->u.list);
187 av->leb_count++;
188
189 rb_link_node(&aeb->u.rb, parent, p);
190 rb_insert_color(&aeb->u.rb, &av->root);
191}
192
193/**
194 * update_vol - inserts or updates a LEB which was found a pool.
195 * @ubi: the UBI device object
196 * @ai: attach info object
197 * @av: the volume this LEB belongs to
198 * @new_vh: the volume header derived from new_aeb
199 * @new_aeb: the AEB to be examined
200 *
201 * Returns 0 on success, < 0 indicates an internal error.
202 */
203static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
204 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
205 struct ubi_ainf_peb *new_aeb)
206{
207 struct rb_node **p = &av->root.rb_node, *parent = NULL;
208 struct ubi_ainf_peb *aeb, *victim;
209 int cmp_res;
210
211 while (*p) {
212 parent = *p;
213 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
214
215 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
216 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
217 p = &(*p)->rb_left;
218 else
219 p = &(*p)->rb_right;
220
221 continue;
222 }
223
224 /* This case can happen if the fastmap gets written
225 * because of a volume change (creation, deletion, ..).
226 * Then a PEB can be within the persistent EBA and the pool.
227 */
228 if (aeb->pnum == new_aeb->pnum) {
229 ubi_assert(aeb->lnum == new_aeb->lnum);
230 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
231
232 return 0;
233 }
234
235 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
236 if (cmp_res < 0)
237 return cmp_res;
238
239 /* new_aeb is newer */
240 if (cmp_res & 1) {
241 victim = kmem_cache_alloc(ai->aeb_slab_cache,
242 GFP_KERNEL);
243 if (!victim)
244 return -ENOMEM;
245
246 victim->ec = aeb->ec;
247 victim->pnum = aeb->pnum;
248 list_add_tail(&victim->u.list, &ai->erase);
249
250 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
251 av->last_data_size = \
252 be32_to_cpu(new_vh->data_size);
253
254 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
255 av->vol_id, aeb->lnum, new_aeb->pnum);
256
257 aeb->ec = new_aeb->ec;
258 aeb->pnum = new_aeb->pnum;
259 aeb->copy_flag = new_vh->copy_flag;
260 aeb->scrub = new_aeb->scrub;
261 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
262
263 /* new_aeb is older */
264 } else {
265 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
266 av->vol_id, aeb->lnum, new_aeb->pnum);
267 list_add_tail(&new_aeb->u.list, &ai->erase);
268 }
269
270 return 0;
271 }
272 /* This LEB is new, let's add it to the volume */
273
274 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
275 av->highest_lnum = be32_to_cpu(new_vh->lnum);
276 av->last_data_size = be32_to_cpu(new_vh->data_size);
277 }
278
279 if (av->vol_type == UBI_STATIC_VOLUME)
280 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
281
282 av->leb_count++;
283
284 rb_link_node(&new_aeb->u.rb, parent, p);
285 rb_insert_color(&new_aeb->u.rb, &av->root);
286
287 return 0;
288}
289
290/**
291 * process_pool_aeb - we found a non-empty PEB in a pool.
292 * @ubi: UBI device object
293 * @ai: attach info object
294 * @new_vh: the volume header derived from new_aeb
295 * @new_aeb: the AEB to be examined
296 *
297 * Returns 0 on success, < 0 indicates an internal error.
298 */
299static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
300 struct ubi_vid_hdr *new_vh,
301 struct ubi_ainf_peb *new_aeb)
302{
303 struct ubi_ainf_volume *av, *tmp_av = NULL;
304 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
305 int found = 0;
306
307 if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
308 be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
309 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
310
311 return 0;
312 }
313
314 /* Find the volume this SEB belongs to */
315 while (*p) {
316 parent = *p;
317 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
318
319 if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
320 p = &(*p)->rb_left;
321 else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
322 p = &(*p)->rb_right;
323 else {
324 found = 1;
325 break;
326 }
327 }
328
329 if (found)
330 av = tmp_av;
331 else {
332 ubi_err("orphaned volume in fastmap pool!");
333 return UBI_BAD_FASTMAP;
334 }
335
336 ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
337
338 return update_vol(ubi, ai, av, new_vh, new_aeb);
339}
340
341/**
342 * unmap_peb - unmap a PEB.
343 * If fastmap detects a free PEB in the pool it has to check whether
344 * this PEB has been unmapped after writing the fastmap.
345 *
346 * @ai: UBI attach info object
347 * @pnum: The PEB to be unmapped
348 */
349static void unmap_peb(struct ubi_attach_info *ai, int pnum)
350{
351 struct ubi_ainf_volume *av;
352 struct rb_node *node, *node2;
353 struct ubi_ainf_peb *aeb;
354
355 for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
356 av = rb_entry(node, struct ubi_ainf_volume, rb);
357
358 for (node2 = rb_first(&av->root); node2;
359 node2 = rb_next(node2)) {
360 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
361 if (aeb->pnum == pnum) {
362 rb_erase(&aeb->u.rb, &av->root);
363 kmem_cache_free(ai->aeb_slab_cache, aeb);
364 return;
365 }
366 }
367 }
368}
369
370/**
371 * scan_pool - scans a pool for changed (no longer empty PEBs).
372 * @ubi: UBI device object
373 * @ai: attach info object
374 * @pebs: an array of all PEB numbers in the to be scanned pool
375 * @pool_size: size of the pool (number of entries in @pebs)
376 * @max_sqnum: pointer to the maximal sequence number
377 * @eba_orphans: list of PEBs which need to be scanned
378 * @free: list of PEBs which are most likely free (and go into @ai->free)
379 *
380 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
381 * < 0 indicates an internal error.
382 */
383static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
384 int *pebs, int pool_size, unsigned long long *max_sqnum,
385 struct list_head *eba_orphans, struct list_head *free)
386{
387 struct ubi_vid_hdr *vh;
388 struct ubi_ec_hdr *ech;
389 struct ubi_ainf_peb *new_aeb, *tmp_aeb;
390 int i, pnum, err, found_orphan, ret = 0;
391
392 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
393 if (!ech)
394 return -ENOMEM;
395
396 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
397 if (!vh) {
398 kfree(ech);
399 return -ENOMEM;
400 }
401
402 dbg_bld("scanning fastmap pool: size = %i", pool_size);
403
404 /*
405 * Now scan all PEBs in the pool to find changes which have been made
406 * after the creation of the fastmap
407 */
408 for (i = 0; i < pool_size; i++) {
409 int scrub = 0;
410
411 pnum = be32_to_cpu(pebs[i]);
412
413 if (ubi_io_is_bad(ubi, pnum)) {
414 ubi_err("bad PEB in fastmap pool!");
415 ret = UBI_BAD_FASTMAP;
416 goto out;
417 }
418
419 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
420 if (err && err != UBI_IO_BITFLIPS) {
421 ubi_err("unable to read EC header! PEB:%i err:%i",
422 pnum, err);
423 ret = err > 0 ? UBI_BAD_FASTMAP : err;
424 goto out;
425 } else if (ret == UBI_IO_BITFLIPS)
426 scrub = 1;
427
428 if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
429 ubi_err("bad image seq: 0x%x, expected: 0x%x",
430 be32_to_cpu(ech->image_seq), ubi->image_seq);
431 err = UBI_BAD_FASTMAP;
432 goto out;
433 }
434
435 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
436 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
437 unsigned long long ec = be64_to_cpu(ech->ec);
438 unmap_peb(ai, pnum);
439 dbg_bld("Adding PEB to free: %i", pnum);
440 if (err == UBI_IO_FF_BITFLIPS)
441 add_aeb(ai, free, pnum, ec, 1);
442 else
443 add_aeb(ai, free, pnum, ec, 0);
444 continue;
445 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
446 dbg_bld("Found non empty PEB:%i in pool", pnum);
447
448 if (err == UBI_IO_BITFLIPS)
449 scrub = 1;
450
451 found_orphan = 0;
452 list_for_each_entry(tmp_aeb, eba_orphans, u.list) {
453 if (tmp_aeb->pnum == pnum) {
454 found_orphan = 1;
455 break;
456 }
457 }
458 if (found_orphan) {
459 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
460 list_del(&tmp_aeb->u.list);
461 }
462
463 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
464 GFP_KERNEL);
465 if (!new_aeb) {
466 ret = -ENOMEM;
467 goto out;
468 }
469
470 new_aeb->ec = be64_to_cpu(ech->ec);
471 new_aeb->pnum = pnum;
472 new_aeb->lnum = be32_to_cpu(vh->lnum);
473 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
474 new_aeb->copy_flag = vh->copy_flag;
475 new_aeb->scrub = scrub;
476
477 if (*max_sqnum < new_aeb->sqnum)
478 *max_sqnum = new_aeb->sqnum;
479
480 err = process_pool_aeb(ubi, ai, vh, new_aeb);
481 if (err) {
482 ret = err > 0 ? UBI_BAD_FASTMAP : err;
483 goto out;
484 }
485 } else {
486 /* We are paranoid and fall back to scanning mode */
487 ubi_err("fastmap pool PEBs contains damaged PEBs!");
488 ret = err > 0 ? UBI_BAD_FASTMAP : err;
489 goto out;
490 }
491
492 }
493
494out:
495 ubi_free_vid_hdr(ubi, vh);
496 kfree(ech);
497 return ret;
498}
499
500/**
501 * count_fastmap_pebs - Counts the PEBs found by fastmap.
502 * @ai: The UBI attach info object
503 */
504static int count_fastmap_pebs(struct ubi_attach_info *ai)
505{
506 struct ubi_ainf_peb *aeb;
507 struct ubi_ainf_volume *av;
508 struct rb_node *rb1, *rb2;
509 int n = 0;
510
511 list_for_each_entry(aeb, &ai->erase, u.list)
512 n++;
513
514 list_for_each_entry(aeb, &ai->free, u.list)
515 n++;
516
517 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
518 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
519 n++;
520
521 return n;
522}
523
524/**
525 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
526 * @ubi: UBI device object
527 * @ai: UBI attach info object
528 * @fm: the fastmap to be attached
529 *
530 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
531 * < 0 indicates an internal error.
532 */
533static int ubi_attach_fastmap(struct ubi_device *ubi,
534 struct ubi_attach_info *ai,
535 struct ubi_fastmap_layout *fm)
536{
537 struct list_head used, eba_orphans, free;
538 struct ubi_ainf_volume *av;
539 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
540 struct ubi_ec_hdr *ech;
541 struct ubi_fm_sb *fmsb;
542 struct ubi_fm_hdr *fmhdr;
543 struct ubi_fm_scan_pool *fmpl1, *fmpl2;
544 struct ubi_fm_ec *fmec;
545 struct ubi_fm_volhdr *fmvhdr;
546 struct ubi_fm_eba *fm_eba;
547 int ret, i, j, pool_size, wl_pool_size;
548 size_t fm_pos = 0, fm_size = ubi->fm_size;
549 unsigned long long max_sqnum = 0;
550 void *fm_raw = ubi->fm_buf;
551
552 INIT_LIST_HEAD(&used);
553 INIT_LIST_HEAD(&free);
554 INIT_LIST_HEAD(&eba_orphans);
555 INIT_LIST_HEAD(&ai->corr);
556 INIT_LIST_HEAD(&ai->free);
557 INIT_LIST_HEAD(&ai->erase);
558 INIT_LIST_HEAD(&ai->alien);
559 ai->volumes = RB_ROOT;
560 ai->min_ec = UBI_MAX_ERASECOUNTER;
561
562 ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab",
563 sizeof(struct ubi_ainf_peb),
564 0, 0, NULL);
565 if (!ai->aeb_slab_cache) {
566 ret = -ENOMEM;
567 goto fail;
568 }
569
570 fmsb = (struct ubi_fm_sb *)(fm_raw);
571 ai->max_sqnum = fmsb->sqnum;
572 fm_pos += sizeof(struct ubi_fm_sb);
573 if (fm_pos >= fm_size)
574 goto fail_bad;
575
576 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
577 fm_pos += sizeof(*fmhdr);
578 if (fm_pos >= fm_size)
579 goto fail_bad;
580
581 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
582 ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
583 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
584 goto fail_bad;
585 }
586
587 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
588 fm_pos += sizeof(*fmpl1);
589 if (fm_pos >= fm_size)
590 goto fail_bad;
591 if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
592 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
593 be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
594 goto fail_bad;
595 }
596
597 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
598 fm_pos += sizeof(*fmpl2);
599 if (fm_pos >= fm_size)
600 goto fail_bad;
601 if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
602 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
603 be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
604 goto fail_bad;
605 }
606
607 pool_size = be16_to_cpu(fmpl1->size);
608 wl_pool_size = be16_to_cpu(fmpl2->size);
609 fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
610 fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
611
612 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
613 ubi_err("bad pool size: %i", pool_size);
614 goto fail_bad;
615 }
616
617 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
618 ubi_err("bad WL pool size: %i", wl_pool_size);
619 goto fail_bad;
620 }
621
622
623 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
624 fm->max_pool_size < 0) {
625 ubi_err("bad maximal pool size: %i", fm->max_pool_size);
626 goto fail_bad;
627 }
628
629 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
630 fm->max_wl_pool_size < 0) {
631 ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
632 goto fail_bad;
633 }
634
635 /* read EC values from free list */
636 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
637 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
638 fm_pos += sizeof(*fmec);
639 if (fm_pos >= fm_size)
640 goto fail_bad;
641
642 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
643 be32_to_cpu(fmec->ec), 0);
644 }
645
646 /* read EC values from used list */
647 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
648 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
649 fm_pos += sizeof(*fmec);
650 if (fm_pos >= fm_size)
651 goto fail_bad;
652
653 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
654 be32_to_cpu(fmec->ec), 0);
655 }
656
657 /* read EC values from scrub list */
658 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
659 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
660 fm_pos += sizeof(*fmec);
661 if (fm_pos >= fm_size)
662 goto fail_bad;
663
664 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
665 be32_to_cpu(fmec->ec), 1);
666 }
667
668 /* read EC values from erase list */
669 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
670 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
671 fm_pos += sizeof(*fmec);
672 if (fm_pos >= fm_size)
673 goto fail_bad;
674
675 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
676 be32_to_cpu(fmec->ec), 1);
677 }
678
679 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
680 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
681
682 /* Iterate over all volumes and read their EBA table */
683 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
684 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
685 fm_pos += sizeof(*fmvhdr);
686 if (fm_pos >= fm_size)
687 goto fail_bad;
688
689 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
690 ubi_err("bad fastmap vol header magic: 0x%x, " \
691 "expected: 0x%x",
692 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
693 goto fail_bad;
694 }
695
696 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
697 be32_to_cpu(fmvhdr->used_ebs),
698 be32_to_cpu(fmvhdr->data_pad),
699 fmvhdr->vol_type,
700 be32_to_cpu(fmvhdr->last_eb_bytes));
701
702 if (!av)
703 goto fail_bad;
704
705 ai->vols_found++;
706 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
707 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
708
709 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
710 fm_pos += sizeof(*fm_eba);
711 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
712 if (fm_pos >= fm_size)
713 goto fail_bad;
714
715 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
716 ubi_err("bad fastmap EBA header magic: 0x%x, " \
717 "expected: 0x%x",
718 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
719 goto fail_bad;
720 }
721
722 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
723 int pnum = be32_to_cpu(fm_eba->pnum[j]);
724
725 if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
726 continue;
727
728 aeb = NULL;
729 list_for_each_entry(tmp_aeb, &used, u.list) {
730 if (tmp_aeb->pnum == pnum)
731 aeb = tmp_aeb;
732 }
733
734 /* This can happen if a PEB is already in an EBA known
735 * by this fastmap but the PEB itself is not in the used
736 * list.
737 * In this case the PEB can be within the fastmap pool
738 * or while writing the fastmap it was in the protection
739 * queue.
740 */
741 if (!aeb) {
742 aeb = kmem_cache_alloc(ai->aeb_slab_cache,
743 GFP_KERNEL);
744 if (!aeb) {
745 ret = -ENOMEM;
746
747 goto fail;
748 }
749
750 aeb->lnum = j;
751 aeb->pnum = be32_to_cpu(fm_eba->pnum[j]);
752 aeb->ec = -1;
753 aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
754 list_add_tail(&aeb->u.list, &eba_orphans);
755 continue;
756 }
757
758 aeb->lnum = j;
759
760 if (av->highest_lnum <= aeb->lnum)
761 av->highest_lnum = aeb->lnum;
762
763 assign_aeb_to_av(ai, aeb, av);
764
765 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
766 aeb->pnum, aeb->lnum, av->vol_id);
767 }
768
769 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
770 if (!ech) {
771 ret = -ENOMEM;
772 goto fail;
773 }
774
775 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans,
776 u.list) {
777 int err;
778
779 if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
780 ubi_err("bad PEB in fastmap EBA orphan list");
781 ret = UBI_BAD_FASTMAP;
782 kfree(ech);
783 goto fail;
784 }
785
786 err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
787 if (err && err != UBI_IO_BITFLIPS) {
788 ubi_err("unable to read EC header! PEB:%i " \
789 "err:%i", tmp_aeb->pnum, err);
790 ret = err > 0 ? UBI_BAD_FASTMAP : err;
791 kfree(ech);
792
793 goto fail;
794 } else if (err == UBI_IO_BITFLIPS)
795 tmp_aeb->scrub = 1;
796
797 tmp_aeb->ec = be64_to_cpu(ech->ec);
798 assign_aeb_to_av(ai, tmp_aeb, av);
799 }
800
801 kfree(ech);
802 }
803
804 ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum,
805 &eba_orphans, &free);
806 if (ret)
807 goto fail;
808
809 ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum,
810 &eba_orphans, &free);
811 if (ret)
812 goto fail;
813
814 if (max_sqnum > ai->max_sqnum)
815 ai->max_sqnum = max_sqnum;
816
817 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
818 list_del(&tmp_aeb->u.list);
819 list_add_tail(&tmp_aeb->u.list, &ai->free);
820 }
821
822 /*
823 * If fastmap is leaking PEBs (must not happen), raise a
824 * fat warning and fall back to scanning mode.
825 * We do this here because in ubi_wl_init() it's too late
826 * and we cannot fall back to scanning.
827 */
828 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
829 ai->bad_peb_count - fm->used_blocks))
830 goto fail_bad;
831
832 return 0;
833
834fail_bad:
835 ret = UBI_BAD_FASTMAP;
836fail:
837 return ret;
838}
839
840/**
841 * ubi_scan_fastmap - scan the fastmap.
842 * @ubi: UBI device object
843 * @ai: UBI attach info to be filled
844 * @fm_anchor: The fastmap starts at this PEB
845 *
846 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
847 * UBI_BAD_FASTMAP if one was found but is not usable.
848 * < 0 indicates an internal error.
849 */
850int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
851 int fm_anchor)
852{
853 struct ubi_fm_sb *fmsb, *fmsb2;
854 struct ubi_vid_hdr *vh;
855 struct ubi_ec_hdr *ech;
856 struct ubi_fastmap_layout *fm;
857 int i, used_blocks, pnum, ret = 0;
858 size_t fm_size;
859 __be32 crc, tmp_crc;
860 unsigned long long sqnum = 0;
861
862 mutex_lock(&ubi->fm_mutex);
863 memset(ubi->fm_buf, 0, ubi->fm_size);
864
865 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
866 if (!fmsb) {
867 ret = -ENOMEM;
868 goto out;
869 }
870
871 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
872 if (!fm) {
873 ret = -ENOMEM;
874 kfree(fmsb);
875 goto out;
876 }
877
878 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
879 if (ret && ret != UBI_IO_BITFLIPS)
880 goto free_fm_sb;
881 else if (ret == UBI_IO_BITFLIPS)
882 fm->to_be_tortured[0] = 1;
883
884 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
885 ubi_err("bad super block magic: 0x%x, expected: 0x%x",
886 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
887 ret = UBI_BAD_FASTMAP;
888 goto free_fm_sb;
889 }
890
891 if (fmsb->version != UBI_FM_FMT_VERSION) {
892 ubi_err("bad fastmap version: %i, expected: %i",
893 fmsb->version, UBI_FM_FMT_VERSION);
894 ret = UBI_BAD_FASTMAP;
895 goto free_fm_sb;
896 }
897
898 used_blocks = be32_to_cpu(fmsb->used_blocks);
899 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
900 ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
901 ret = UBI_BAD_FASTMAP;
902 goto free_fm_sb;
903 }
904
905 fm_size = ubi->leb_size * used_blocks;
906 if (fm_size != ubi->fm_size) {
907 ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
908 ubi->fm_size);
909 ret = UBI_BAD_FASTMAP;
910 goto free_fm_sb;
911 }
912
913 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
914 if (!ech) {
915 ret = -ENOMEM;
916 goto free_fm_sb;
917 }
918
919 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
920 if (!vh) {
921 ret = -ENOMEM;
922 goto free_hdr;
923 }
924
925 for (i = 0; i < used_blocks; i++) {
926 pnum = be32_to_cpu(fmsb->block_loc[i]);
927
928 if (ubi_io_is_bad(ubi, pnum)) {
929 ret = UBI_BAD_FASTMAP;
930 goto free_hdr;
931 }
932
933 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
934 if (ret && ret != UBI_IO_BITFLIPS) {
935 ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
936 i, pnum);
937 if (ret > 0)
938 ret = UBI_BAD_FASTMAP;
939 goto free_hdr;
940 } else if (ret == UBI_IO_BITFLIPS)
941 fm->to_be_tortured[i] = 1;
942
943 if (!ubi->image_seq)
944 ubi->image_seq = be32_to_cpu(ech->image_seq);
945
946 if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
947 ret = UBI_BAD_FASTMAP;
948 goto free_hdr;
949 }
950
951 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
952 if (ret && ret != UBI_IO_BITFLIPS) {
953 ubi_err("unable to read fastmap block# %i (PEB: %i)",
954 i, pnum);
955 goto free_hdr;
956 }
957
958 if (i == 0) {
959 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
960 ubi_err("bad fastmap anchor vol_id: 0x%x," \
961 " expected: 0x%x",
962 be32_to_cpu(vh->vol_id),
963 UBI_FM_SB_VOLUME_ID);
964 ret = UBI_BAD_FASTMAP;
965 goto free_hdr;
966 }
967 } else {
968 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
969 ubi_err("bad fastmap data vol_id: 0x%x," \
970 " expected: 0x%x",
971 be32_to_cpu(vh->vol_id),
972 UBI_FM_DATA_VOLUME_ID);
973 ret = UBI_BAD_FASTMAP;
974 goto free_hdr;
975 }
976 }
977
978 if (sqnum < be64_to_cpu(vh->sqnum))
979 sqnum = be64_to_cpu(vh->sqnum);
980
981 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
982 ubi->leb_start, ubi->leb_size);
983 if (ret && ret != UBI_IO_BITFLIPS) {
984 ubi_err("unable to read fastmap block# %i (PEB: %i, " \
985 "err: %i)", i, pnum, ret);
986 goto free_hdr;
987 }
988 }
989
990 kfree(fmsb);
991 fmsb = NULL;
992
993 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
994 tmp_crc = be32_to_cpu(fmsb2->data_crc);
995 fmsb2->data_crc = 0;
996 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
997 if (crc != tmp_crc) {
998 ubi_err("fastmap data CRC is invalid");
999 ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
1000 ret = UBI_BAD_FASTMAP;
1001 goto free_hdr;
1002 }
1003
1004 fmsb2->sqnum = sqnum;
1005
1006 fm->used_blocks = used_blocks;
1007
1008 ret = ubi_attach_fastmap(ubi, ai, fm);
1009 if (ret) {
1010 if (ret > 0)
1011 ret = UBI_BAD_FASTMAP;
1012 goto free_hdr;
1013 }
1014
1015 for (i = 0; i < used_blocks; i++) {
1016 struct ubi_wl_entry *e;
1017
1018 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1019 if (!e) {
1020 while (i--)
1021 kfree(fm->e[i]);
1022
1023 ret = -ENOMEM;
1024 goto free_hdr;
1025 }
1026
1027 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1028 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1029 fm->e[i] = e;
1030 }
1031
1032 ubi->fm = fm;
1033 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1034 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1035 ubi_msg("attached by fastmap");
1036 ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size);
1037 ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
1038 ubi->fm_disabled = 0;
1039
1040 ubi_free_vid_hdr(ubi, vh);
1041 kfree(ech);
1042out:
1043 mutex_unlock(&ubi->fm_mutex);
1044 if (ret == UBI_BAD_FASTMAP)
1045 ubi_err("Attach by fastmap failed, doing a full scan!");
1046 return ret;
1047
1048free_hdr:
1049 ubi_free_vid_hdr(ubi, vh);
1050 kfree(ech);
1051free_fm_sb:
1052 kfree(fmsb);
1053 kfree(fm);
1054 goto out;
1055}
1056
1057/**
1058 * ubi_write_fastmap - writes a fastmap.
1059 * @ubi: UBI device object
1060 * @new_fm: the to be written fastmap
1061 *
1062 * Returns 0 on success, < 0 indicates an internal error.
1063 */
1064static int ubi_write_fastmap(struct ubi_device *ubi,
1065 struct ubi_fastmap_layout *new_fm)
1066{
1067 size_t fm_pos = 0;
1068 void *fm_raw;
1069 struct ubi_fm_sb *fmsb;
1070 struct ubi_fm_hdr *fmh;
1071 struct ubi_fm_scan_pool *fmpl1, *fmpl2;
1072 struct ubi_fm_ec *fec;
1073 struct ubi_fm_volhdr *fvh;
1074 struct ubi_fm_eba *feba;
1075 struct rb_node *node;
1076 struct ubi_wl_entry *wl_e;
1077 struct ubi_volume *vol;
1078 struct ubi_vid_hdr *avhdr, *dvhdr;
1079 struct ubi_work *ubi_wrk;
1080 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1081 int scrub_peb_count, erase_peb_count;
1082
1083 fm_raw = ubi->fm_buf;
1084 memset(ubi->fm_buf, 0, ubi->fm_size);
1085
1086 avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1087 if (!avhdr) {
1088 ret = -ENOMEM;
1089 goto out;
1090 }
1091
1092 dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
1093 if (!dvhdr) {
1094 ret = -ENOMEM;
1095 goto out_kfree;
1096 }
1097
1098 spin_lock(&ubi->volumes_lock);
1099 spin_lock(&ubi->wl_lock);
1100
1101 fmsb = (struct ubi_fm_sb *)fm_raw;
1102 fm_pos += sizeof(*fmsb);
1103 ubi_assert(fm_pos <= ubi->fm_size);
1104
1105 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1106 fm_pos += sizeof(*fmh);
1107 ubi_assert(fm_pos <= ubi->fm_size);
1108
1109 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1110 fmsb->version = UBI_FM_FMT_VERSION;
1111 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1112 /* the max sqnum will be filled in while *reading* the fastmap */
1113 fmsb->sqnum = 0;
1114
1115 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1116 free_peb_count = 0;
1117 used_peb_count = 0;
1118 scrub_peb_count = 0;
1119 erase_peb_count = 0;
1120 vol_count = 0;
1121
1122 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1123 fm_pos += sizeof(*fmpl1);
1124 fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1125 fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
1126 fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1127
1128 for (i = 0; i < ubi->fm_pool.size; i++)
1129 fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1130
1131 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1132 fm_pos += sizeof(*fmpl2);
1133 fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1134 fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
1135 fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1136
1137 for (i = 0; i < ubi->fm_wl_pool.size; i++)
1138 fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1139
1140 for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
1141 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1142 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1143
1144 fec->pnum = cpu_to_be32(wl_e->pnum);
1145 fec->ec = cpu_to_be32(wl_e->ec);
1146
1147 free_peb_count++;
1148 fm_pos += sizeof(*fec);
1149 ubi_assert(fm_pos <= ubi->fm_size);
1150 }
1151 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1152
1153 for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
1154 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1155 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1156
1157 fec->pnum = cpu_to_be32(wl_e->pnum);
1158 fec->ec = cpu_to_be32(wl_e->ec);
1159
1160 used_peb_count++;
1161 fm_pos += sizeof(*fec);
1162 ubi_assert(fm_pos <= ubi->fm_size);
1163 }
1164 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1165
1166 for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
1167 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1168 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1169
1170 fec->pnum = cpu_to_be32(wl_e->pnum);
1171 fec->ec = cpu_to_be32(wl_e->ec);
1172
1173 scrub_peb_count++;
1174 fm_pos += sizeof(*fec);
1175 ubi_assert(fm_pos <= ubi->fm_size);
1176 }
1177 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1178
1179
1180 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1181 if (ubi_is_erase_work(ubi_wrk)) {
1182 wl_e = ubi_wrk->e;
1183 ubi_assert(wl_e);
1184
1185 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1186
1187 fec->pnum = cpu_to_be32(wl_e->pnum);
1188 fec->ec = cpu_to_be32(wl_e->ec);
1189
1190 erase_peb_count++;
1191 fm_pos += sizeof(*fec);
1192 ubi_assert(fm_pos <= ubi->fm_size);
1193 }
1194 }
1195 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1196
1197 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1198 vol = ubi->volumes[i];
1199
1200 if (!vol)
1201 continue;
1202
1203 vol_count++;
1204
1205 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1206 fm_pos += sizeof(*fvh);
1207 ubi_assert(fm_pos <= ubi->fm_size);
1208
1209 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1210 fvh->vol_id = cpu_to_be32(vol->vol_id);
1211 fvh->vol_type = vol->vol_type;
1212 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1213 fvh->data_pad = cpu_to_be32(vol->data_pad);
1214 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1215
1216 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1217 vol->vol_type == UBI_STATIC_VOLUME);
1218
1219 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1220 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1221 ubi_assert(fm_pos <= ubi->fm_size);
1222
1223 for (j = 0; j < vol->reserved_pebs; j++)
1224 feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
1225
1226 feba->reserved_pebs = cpu_to_be32(j);
1227 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1228 }
1229 fmh->vol_count = cpu_to_be32(vol_count);
1230 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1231
1232 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1233 avhdr->lnum = 0;
1234
1235 spin_unlock(&ubi->wl_lock);
1236 spin_unlock(&ubi->volumes_lock);
1237
1238 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1239 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1240 if (ret) {
1241 ubi_err("unable to write vid_hdr to fastmap SB!");
1242 goto out_kfree;
1243 }
1244
1245 for (i = 0; i < new_fm->used_blocks; i++) {
1246 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1247 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1248 }
1249
1250 fmsb->data_crc = 0;
1251 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1252 ubi->fm_size));
1253
1254 for (i = 1; i < new_fm->used_blocks; i++) {
1255 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1256 dvhdr->lnum = cpu_to_be32(i);
1257 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1258 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1259 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1260 if (ret) {
1261 ubi_err("unable to write vid_hdr to PEB %i!",
1262 new_fm->e[i]->pnum);
1263 goto out_kfree;
1264 }
1265 }
1266
1267 for (i = 0; i < new_fm->used_blocks; i++) {
1268 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
1269 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
1270 if (ret) {
1271 ubi_err("unable to write fastmap to PEB %i!",
1272 new_fm->e[i]->pnum);
1273 goto out_kfree;
1274 }
1275 }
1276
1277 ubi_assert(new_fm);
1278 ubi->fm = new_fm;
1279
1280 dbg_bld("fastmap written!");
1281
1282out_kfree:
1283 ubi_free_vid_hdr(ubi, avhdr);
1284 ubi_free_vid_hdr(ubi, dvhdr);
1285out:
1286 return ret;
1287}
1288
1289/**
1290 * erase_block - Manually erase a PEB.
1291 * @ubi: UBI device object
1292 * @pnum: PEB to be erased
1293 *
1294 * Returns the new EC value on success, < 0 indicates an internal error.
1295 */
1296static int erase_block(struct ubi_device *ubi, int pnum)
1297{
1298 int ret;
1299 struct ubi_ec_hdr *ec_hdr;
1300 long long ec;
1301
1302 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1303 if (!ec_hdr)
1304 return -ENOMEM;
1305
1306 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1307 if (ret < 0)
1308 goto out;
1309 else if (ret && ret != UBI_IO_BITFLIPS) {
1310 ret = -EINVAL;
1311 goto out;
1312 }
1313
1314 ret = ubi_io_sync_erase(ubi, pnum, 0);
1315 if (ret < 0)
1316 goto out;
1317
1318 ec = be64_to_cpu(ec_hdr->ec);
1319 ec += ret;
1320 if (ec > UBI_MAX_ERASECOUNTER) {
1321 ret = -EINVAL;
1322 goto out;
1323 }
1324
1325 ec_hdr->ec = cpu_to_be64(ec);
1326 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1327 if (ret < 0)
1328 goto out;
1329
1330 ret = ec;
1331out:
1332 kfree(ec_hdr);
1333 return ret;
1334}
1335
1336/**
1337 * invalidate_fastmap - destroys a fastmap.
1338 * @ubi: UBI device object
1339 * @fm: the fastmap to be destroyed
1340 *
1341 * Returns 0 on success, < 0 indicates an internal error.
1342 */
1343static int invalidate_fastmap(struct ubi_device *ubi,
1344 struct ubi_fastmap_layout *fm)
1345{
1346 int ret, i;
1347 struct ubi_vid_hdr *vh;
1348
1349 ret = erase_block(ubi, fm->e[0]->pnum);
1350 if (ret < 0)
1351 return ret;
1352
1353 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1354 if (!vh)
1355 return -ENOMEM;
1356
1357 /* deleting the current fastmap SB is not enough, an old SB may exist,
1358 * so create a (corrupted) SB such that fastmap will find it and fall
1359 * back to scanning mode in any case */
1360 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1361 ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
1362
1363 for (i = 0; i < fm->used_blocks; i++)
1364 ubi_wl_put_fm_peb(ubi, fm->e[i], i, fm->to_be_tortured[i]);
1365
1366 return ret;
1367}
1368
1369/**
1370 * ubi_update_fastmap - will be called by UBI if a volume changes or
1371 * a fastmap pool becomes full.
1372 * @ubi: UBI device object
1373 *
1374 * Returns 0 on success, < 0 indicates an internal error.
1375 */
1376int ubi_update_fastmap(struct ubi_device *ubi)
1377{
1378 int ret, i;
1379 struct ubi_fastmap_layout *new_fm, *old_fm;
1380 struct ubi_wl_entry *tmp_e;
1381
1382 mutex_lock(&ubi->fm_mutex);
1383
1384 ubi_refill_pools(ubi);
1385
1386 if (ubi->ro_mode || ubi->fm_disabled) {
1387 mutex_unlock(&ubi->fm_mutex);
1388 return 0;
1389 }
1390
1391 ret = ubi_ensure_anchor_pebs(ubi);
1392 if (ret) {
1393 mutex_unlock(&ubi->fm_mutex);
1394 return ret;
1395 }
1396
1397 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1398 if (!new_fm) {
1399 mutex_unlock(&ubi->fm_mutex);
1400 return -ENOMEM;
1401 }
1402
1403 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1404
1405 for (i = 0; i < new_fm->used_blocks; i++) {
1406 new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1407 if (!new_fm->e[i]) {
1408 while (i--)
1409 kfree(new_fm->e[i]);
1410
1411 kfree(new_fm);
1412 mutex_unlock(&ubi->fm_mutex);
1413 return -ENOMEM;
1414 }
1415 }
1416
1417 old_fm = ubi->fm;
1418 ubi->fm = NULL;
1419
1420 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1421 ubi_err("fastmap too large");
1422 ret = -ENOSPC;
1423 goto err;
1424 }
1425
1426 for (i = 1; i < new_fm->used_blocks; i++) {
1427 spin_lock(&ubi->wl_lock);
1428 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1429 spin_unlock(&ubi->wl_lock);
1430
1431 if (!tmp_e && !old_fm) {
1432 int j;
1433 ubi_err("could not get any free erase block");
1434
1435 for (j = 1; j < i; j++)
1436 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1437
1438 ret = -ENOSPC;
1439 goto err;
1440 } else if (!tmp_e && old_fm) {
1441 ret = erase_block(ubi, old_fm->e[i]->pnum);
1442 if (ret < 0) {
1443 int j;
1444
1445 for (j = 1; j < i; j++)
1446 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1447 j, 0);
1448
1449 ubi_err("could not erase old fastmap PEB");
1450 goto err;
1451 }
1452
1453 new_fm->e[i]->pnum = old_fm->e[i]->pnum;
1454 new_fm->e[i]->ec = old_fm->e[i]->ec;
1455 } else {
1456 new_fm->e[i]->pnum = tmp_e->pnum;
1457 new_fm->e[i]->ec = tmp_e->ec;
1458
1459 if (old_fm)
1460 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1461 old_fm->to_be_tortured[i]);
1462 }
1463 }
1464
1465 spin_lock(&ubi->wl_lock);
1466 tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1467 spin_unlock(&ubi->wl_lock);
1468
1469 if (old_fm) {
1470 /* no fresh anchor PEB was found, reuse the old one */
1471 if (!tmp_e) {
1472 ret = erase_block(ubi, old_fm->e[0]->pnum);
1473 if (ret < 0) {
1474 int i;
1475 ubi_err("could not erase old anchor PEB");
1476
1477 for (i = 1; i < new_fm->used_blocks; i++)
1478 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1479 i, 0);
1480 goto err;
1481 }
1482
1483 new_fm->e[0]->pnum = old_fm->e[0]->pnum;
1484 new_fm->e[0]->ec = ret;
1485 } else {
1486 /* we've got a new anchor PEB, return the old one */
1487 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1488 old_fm->to_be_tortured[0]);
1489
1490 new_fm->e[0]->pnum = tmp_e->pnum;
1491 new_fm->e[0]->ec = tmp_e->ec;
1492 }
1493 } else {
1494 if (!tmp_e) {
1495 int i;
1496 ubi_err("could not find any anchor PEB");
1497
1498 for (i = 1; i < new_fm->used_blocks; i++)
1499 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1500
1501 ret = -ENOSPC;
1502 goto err;
1503 }
1504
1505 new_fm->e[0]->pnum = tmp_e->pnum;
1506 new_fm->e[0]->ec = tmp_e->ec;
1507 }
1508
1509 down_write(&ubi->work_sem);
1510 down_write(&ubi->fm_sem);
1511 ret = ubi_write_fastmap(ubi, new_fm);
1512 up_write(&ubi->fm_sem);
1513 up_write(&ubi->work_sem);
1514
1515 if (ret)
1516 goto err;
1517
1518out_unlock:
1519 mutex_unlock(&ubi->fm_mutex);
1520 kfree(old_fm);
1521 return ret;
1522
1523err:
1524 kfree(new_fm);
1525
1526 ubi_warn("Unable to write new fastmap, err=%i", ret);
1527
1528 ret = 0;
1529 if (old_fm) {
1530 ret = invalidate_fastmap(ubi, old_fm);
1531 if (ret < 0)
1532 ubi_err("Unable to invalidiate current fastmap!");
1533 else if (ret)
1534 ret = 0;
1535 }
1536 goto out_unlock;
1537}
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 4e44bee4c564..4bd4db8c84c9 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -41,7 +41,7 @@
41#include "ubi-media.h" 41#include "ubi-media.h"
42 42
43#define err_msg(fmt, ...) \ 43#define err_msg(fmt, ...) \
44 printk(KERN_DEBUG "gluebi (pid %d): %s: " fmt "\n", \ 44 pr_err("gluebi (pid %d): %s: " fmt "\n", \
45 current->pid, __func__, ##__VA_ARGS__) 45 current->pid, __func__, ##__VA_ARGS__)
46 46
47/** 47/**
@@ -341,9 +341,8 @@ static int gluebi_create(struct ubi_device_info *di,
341 mutex_lock(&devices_mutex); 341 mutex_lock(&devices_mutex);
342 g = find_gluebi_nolock(vi->ubi_num, vi->vol_id); 342 g = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
343 if (g) 343 if (g)
344 err_msg("gluebi MTD device %d form UBI device %d volume %d " 344 err_msg("gluebi MTD device %d form UBI device %d volume %d already exists",
345 "already exists", g->mtd.index, vi->ubi_num, 345 g->mtd.index, vi->ubi_num, vi->vol_id);
346 vi->vol_id);
347 mutex_unlock(&devices_mutex); 346 mutex_unlock(&devices_mutex);
348 347
349 if (mtd_device_register(mtd, NULL, 0)) { 348 if (mtd_device_register(mtd, NULL, 0)) {
@@ -376,8 +375,8 @@ static int gluebi_remove(struct ubi_volume_info *vi)
376 mutex_lock(&devices_mutex); 375 mutex_lock(&devices_mutex);
377 gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); 376 gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
378 if (!gluebi) { 377 if (!gluebi) {
379 err_msg("got remove notification for unknown UBI device %d " 378 err_msg("got remove notification for unknown UBI device %d volume %d",
380 "volume %d", vi->ubi_num, vi->vol_id); 379 vi->ubi_num, vi->vol_id);
381 err = -ENOENT; 380 err = -ENOENT;
382 } else if (gluebi->refcnt) 381 } else if (gluebi->refcnt)
383 err = -EBUSY; 382 err = -EBUSY;
@@ -390,9 +389,8 @@ static int gluebi_remove(struct ubi_volume_info *vi)
390 mtd = &gluebi->mtd; 389 mtd = &gluebi->mtd;
391 err = mtd_device_unregister(mtd); 390 err = mtd_device_unregister(mtd);
392 if (err) { 391 if (err) {
393 err_msg("cannot remove fake MTD device %d, UBI device %d, " 392 err_msg("cannot remove fake MTD device %d, UBI device %d, volume %d, error %d",
394 "volume %d, error %d", mtd->index, gluebi->ubi_num, 393 mtd->index, gluebi->ubi_num, gluebi->vol_id, err);
395 gluebi->vol_id, err);
396 mutex_lock(&devices_mutex); 394 mutex_lock(&devices_mutex);
397 list_add_tail(&gluebi->list, &gluebi_devices); 395 list_add_tail(&gluebi->list, &gluebi_devices);
398 mutex_unlock(&devices_mutex); 396 mutex_unlock(&devices_mutex);
@@ -422,8 +420,8 @@ static int gluebi_updated(struct ubi_volume_info *vi)
422 gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); 420 gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
423 if (!gluebi) { 421 if (!gluebi) {
424 mutex_unlock(&devices_mutex); 422 mutex_unlock(&devices_mutex);
425 err_msg("got update notification for unknown UBI device %d " 423 err_msg("got update notification for unknown UBI device %d volume %d",
426 "volume %d", vi->ubi_num, vi->vol_id); 424 vi->ubi_num, vi->vol_id);
427 return -ENOENT; 425 return -ENOENT;
428 } 426 }
429 427
@@ -449,8 +447,8 @@ static int gluebi_resized(struct ubi_volume_info *vi)
449 gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); 447 gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id);
450 if (!gluebi) { 448 if (!gluebi) {
451 mutex_unlock(&devices_mutex); 449 mutex_unlock(&devices_mutex);
452 err_msg("got update notification for unknown UBI device %d " 450 err_msg("got update notification for unknown UBI device %d volume %d",
453 "volume %d", vi->ubi_num, vi->vol_id); 451 vi->ubi_num, vi->vol_id);
454 return -ENOENT; 452 return -ENOENT;
455 } 453 }
456 gluebi->mtd.size = vi->used_bytes; 454 gluebi->mtd.size = vi->used_bytes;
@@ -507,9 +505,9 @@ static void __exit ubi_gluebi_exit(void)
507 505
508 err = mtd_device_unregister(mtd); 506 err = mtd_device_unregister(mtd);
509 if (err) 507 if (err)
510 err_msg("error %d while removing gluebi MTD device %d, " 508 err_msg("error %d while removing gluebi MTD device %d, UBI device %d, volume %d - ignoring",
511 "UBI device %d, volume %d - ignoring", err, 509 err, mtd->index, gluebi->ubi_num,
512 mtd->index, gluebi->ubi_num, gluebi->vol_id); 510 gluebi->vol_id);
513 kfree(mtd->name); 511 kfree(mtd->name);
514 kfree(gluebi); 512 kfree(gluebi);
515 } 513 }
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index a8d523794b52..78a1dcbf2107 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -177,21 +177,20 @@ retry:
177 * enabled. A corresponding message will be printed 177 * enabled. A corresponding message will be printed
178 * later, when it is has been scrubbed. 178 * later, when it is has been scrubbed.
179 */ 179 */
180 dbg_msg("fixable bit-flip detected at PEB %d", pnum); 180 ubi_msg("fixable bit-flip detected at PEB %d", pnum);
181 ubi_assert(len == read); 181 ubi_assert(len == read);
182 return UBI_IO_BITFLIPS; 182 return UBI_IO_BITFLIPS;
183 } 183 }
184 184
185 if (retries++ < UBI_IO_RETRIES) { 185 if (retries++ < UBI_IO_RETRIES) {
186 ubi_warn("error %d%s while reading %d bytes from PEB " 186 ubi_warn("error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
187 "%d:%d, read only %zd bytes, retry",
188 err, errstr, len, pnum, offset, read); 187 err, errstr, len, pnum, offset, read);
189 yield(); 188 yield();
190 goto retry; 189 goto retry;
191 } 190 }
192 191
193 ubi_err("error %d%s while reading %d bytes from PEB %d:%d, " 192 ubi_err("error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
194 "read %zd bytes", err, errstr, len, pnum, offset, read); 193 err, errstr, len, pnum, offset, read);
195 dump_stack(); 194 dump_stack();
196 195
197 /* 196 /*
@@ -274,8 +273,8 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
274 } 273 }
275 274
276 if (ubi_dbg_is_write_failure(ubi)) { 275 if (ubi_dbg_is_write_failure(ubi)) {
277 ubi_err("cannot write %d bytes to PEB %d:%d " 276 ubi_err("cannot write %d bytes to PEB %d:%d (emulated)",
278 "(emulated)", len, pnum, offset); 277 len, pnum, offset);
279 dump_stack(); 278 dump_stack();
280 return -EIO; 279 return -EIO;
281 } 280 }
@@ -283,8 +282,8 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
283 addr = (loff_t)pnum * ubi->peb_size + offset; 282 addr = (loff_t)pnum * ubi->peb_size + offset;
284 err = mtd_write(ubi->mtd, addr, len, &written, buf); 283 err = mtd_write(ubi->mtd, addr, len, &written, buf);
285 if (err) { 284 if (err) {
286 ubi_err("error %d while writing %d bytes to PEB %d:%d, written " 285 ubi_err("error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
287 "%zd bytes", err, len, pnum, offset, written); 286 err, len, pnum, offset, written);
288 dump_stack(); 287 dump_stack();
289 ubi_dump_flash(ubi, pnum, offset, len); 288 ubi_dump_flash(ubi, pnum, offset, len);
290 } else 289 } else
@@ -685,8 +684,7 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
685 leb_start = be32_to_cpu(ec_hdr->data_offset); 684 leb_start = be32_to_cpu(ec_hdr->data_offset);
686 685
687 if (ec_hdr->version != UBI_VERSION) { 686 if (ec_hdr->version != UBI_VERSION) {
688 ubi_err("node with incompatible UBI version found: " 687 ubi_err("node with incompatible UBI version found: this UBI version is %d, image version is %d",
689 "this UBI version is %d, image version is %d",
690 UBI_VERSION, (int)ec_hdr->version); 688 UBI_VERSION, (int)ec_hdr->version);
691 goto bad; 689 goto bad;
692 } 690 }
@@ -777,10 +775,10 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
777 if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) { 775 if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
778 /* The physical eraseblock is supposedly empty */ 776 /* The physical eraseblock is supposedly empty */
779 if (verbose) 777 if (verbose)
780 ubi_warn("no EC header found at PEB %d, " 778 ubi_warn("no EC header found at PEB %d, only 0xFF bytes",
781 "only 0xFF bytes", pnum); 779 pnum);
782 dbg_bld("no EC header found at PEB %d, " 780 dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
783 "only 0xFF bytes", pnum); 781 pnum);
784 if (!read_err) 782 if (!read_err)
785 return UBI_IO_FF; 783 return UBI_IO_FF;
786 else 784 else
@@ -792,12 +790,12 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
792 * 0xFF bytes. Report that the header is corrupted. 790 * 0xFF bytes. Report that the header is corrupted.
793 */ 791 */
794 if (verbose) { 792 if (verbose) {
795 ubi_warn("bad magic number at PEB %d: %08x instead of " 793 ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
796 "%08x", pnum, magic, UBI_EC_HDR_MAGIC); 794 pnum, magic, UBI_EC_HDR_MAGIC);
797 ubi_dump_ec_hdr(ec_hdr); 795 ubi_dump_ec_hdr(ec_hdr);
798 } 796 }
799 dbg_bld("bad magic number at PEB %d: %08x instead of " 797 dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
800 "%08x", pnum, magic, UBI_EC_HDR_MAGIC); 798 pnum, magic, UBI_EC_HDR_MAGIC);
801 return UBI_IO_BAD_HDR; 799 return UBI_IO_BAD_HDR;
802 } 800 }
803 801
@@ -806,12 +804,12 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
806 804
807 if (hdr_crc != crc) { 805 if (hdr_crc != crc) {
808 if (verbose) { 806 if (verbose) {
809 ubi_warn("bad EC header CRC at PEB %d, calculated " 807 ubi_warn("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
810 "%#08x, read %#08x", pnum, crc, hdr_crc); 808 pnum, crc, hdr_crc);
811 ubi_dump_ec_hdr(ec_hdr); 809 ubi_dump_ec_hdr(ec_hdr);
812 } 810 }
813 dbg_bld("bad EC header CRC at PEB %d, calculated " 811 dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
814 "%#08x, read %#08x", pnum, crc, hdr_crc); 812 pnum, crc, hdr_crc);
815 813
816 if (!read_err) 814 if (!read_err)
817 return UBI_IO_BAD_HDR; 815 return UBI_IO_BAD_HDR;
@@ -1032,10 +1030,10 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
1032 1030
1033 if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) { 1031 if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
1034 if (verbose) 1032 if (verbose)
1035 ubi_warn("no VID header found at PEB %d, " 1033 ubi_warn("no VID header found at PEB %d, only 0xFF bytes",
1036 "only 0xFF bytes", pnum); 1034 pnum);
1037 dbg_bld("no VID header found at PEB %d, " 1035 dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
1038 "only 0xFF bytes", pnum); 1036 pnum);
1039 if (!read_err) 1037 if (!read_err)
1040 return UBI_IO_FF; 1038 return UBI_IO_FF;
1041 else 1039 else
@@ -1043,12 +1041,12 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
1043 } 1041 }
1044 1042
1045 if (verbose) { 1043 if (verbose) {
1046 ubi_warn("bad magic number at PEB %d: %08x instead of " 1044 ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
1047 "%08x", pnum, magic, UBI_VID_HDR_MAGIC); 1045 pnum, magic, UBI_VID_HDR_MAGIC);
1048 ubi_dump_vid_hdr(vid_hdr); 1046 ubi_dump_vid_hdr(vid_hdr);
1049 } 1047 }
1050 dbg_bld("bad magic number at PEB %d: %08x instead of " 1048 dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
1051 "%08x", pnum, magic, UBI_VID_HDR_MAGIC); 1049 pnum, magic, UBI_VID_HDR_MAGIC);
1052 return UBI_IO_BAD_HDR; 1050 return UBI_IO_BAD_HDR;
1053 } 1051 }
1054 1052
@@ -1057,12 +1055,12 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
1057 1055
1058 if (hdr_crc != crc) { 1056 if (hdr_crc != crc) {
1059 if (verbose) { 1057 if (verbose) {
1060 ubi_warn("bad CRC at PEB %d, calculated %#08x, " 1058 ubi_warn("bad CRC at PEB %d, calculated %#08x, read %#08x",
1061 "read %#08x", pnum, crc, hdr_crc); 1059 pnum, crc, hdr_crc);
1062 ubi_dump_vid_hdr(vid_hdr); 1060 ubi_dump_vid_hdr(vid_hdr);
1063 } 1061 }
1064 dbg_bld("bad CRC at PEB %d, calculated %#08x, " 1062 dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
1065 "read %#08x", pnum, crc, hdr_crc); 1063 pnum, crc, hdr_crc);
1066 if (!read_err) 1064 if (!read_err)
1067 return UBI_IO_BAD_HDR; 1065 return UBI_IO_BAD_HDR;
1068 else 1066 else
@@ -1300,8 +1298,8 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
1300 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC); 1298 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
1301 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc); 1299 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
1302 if (hdr_crc != crc) { 1300 if (hdr_crc != crc) {
1303 ubi_err("bad VID header CRC at PEB %d, calculated %#08x, " 1301 ubi_err("bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
1304 "read %#08x", pnum, crc, hdr_crc); 1302 pnum, crc, hdr_crc);
1305 ubi_err("self-check failed for PEB %d", pnum); 1303 ubi_err("self-check failed for PEB %d", pnum);
1306 ubi_dump_vid_hdr(vid_hdr); 1304 ubi_dump_vid_hdr(vid_hdr);
1307 dump_stack(); 1305 dump_stack();
@@ -1411,15 +1409,15 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
1411 1409
1412 err = mtd_read(ubi->mtd, addr, len, &read, buf); 1410 err = mtd_read(ubi->mtd, addr, len, &read, buf);
1413 if (err && !mtd_is_bitflip(err)) { 1411 if (err && !mtd_is_bitflip(err)) {
1414 ubi_err("error %d while reading %d bytes from PEB %d:%d, " 1412 ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
1415 "read %zd bytes", err, len, pnum, offset, read); 1413 err, len, pnum, offset, read);
1416 goto error; 1414 goto error;
1417 } 1415 }
1418 1416
1419 err = ubi_check_pattern(buf, 0xFF, len); 1417 err = ubi_check_pattern(buf, 0xFF, len);
1420 if (err == 0) { 1418 if (err == 0) {
1421 ubi_err("flash region at PEB %d:%d, length %d does not " 1419 ubi_err("flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
1422 "contain all 0xFF bytes", pnum, offset, len); 1420 pnum, offset, len);
1423 goto fail; 1421 goto fail;
1424 } 1422 }
1425 1423
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index 8bbfb444b895..f913d701a5b3 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -121,10 +121,16 @@ void ubi_update_reserved(struct ubi_device *ubi)
121 */ 121 */
122void ubi_calculate_reserved(struct ubi_device *ubi) 122void ubi_calculate_reserved(struct ubi_device *ubi)
123{ 123{
124 ubi->beb_rsvd_level = ubi->good_peb_count/100; 124 /*
125 ubi->beb_rsvd_level *= CONFIG_MTD_UBI_BEB_RESERVE; 125 * Calculate the actual number of PEBs currently needed to be reserved
126 if (ubi->beb_rsvd_level < MIN_RESEVED_PEBS) 126 * for future bad eraseblock handling.
127 ubi->beb_rsvd_level = MIN_RESEVED_PEBS; 127 */
128 ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count;
129 if (ubi->beb_rsvd_level < 0) {
130 ubi->beb_rsvd_level = 0;
131 ubi_warn("number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
132 ubi->bad_peb_count, ubi->bad_peb_limit);
133 }
128} 134}
129 135
130/** 136/**
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index 468ffbc0eabd..ac2b24d1783d 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -375,4 +375,141 @@ struct ubi_vtbl_record {
375 __be32 crc; 375 __be32 crc;
376} __packed; 376} __packed;
377 377
378/* UBI fastmap on-flash data structures */
379
380#define UBI_FM_SB_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 1)
381#define UBI_FM_DATA_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 2)
382
383/* fastmap on-flash data structure format version */
384#define UBI_FM_FMT_VERSION 1
385
386#define UBI_FM_SB_MAGIC 0x7B11D69F
387#define UBI_FM_HDR_MAGIC 0xD4B82EF7
388#define UBI_FM_VHDR_MAGIC 0xFA370ED1
389#define UBI_FM_POOL_MAGIC 0x67AF4D08
390#define UBI_FM_EBA_MAGIC 0xf0c040a8
391
392/* A fastmap supber block can be located between PEB 0 and
393 * UBI_FM_MAX_START */
394#define UBI_FM_MAX_START 64
395
396/* A fastmap can use up to UBI_FM_MAX_BLOCKS PEBs */
397#define UBI_FM_MAX_BLOCKS 32
398
399/* 5% of the total number of PEBs have to be scanned while attaching
400 * from a fastmap.
401 * But the size of this pool is limited to be between UBI_FM_MIN_POOL_SIZE and
402 * UBI_FM_MAX_POOL_SIZE */
403#define UBI_FM_MIN_POOL_SIZE 8
404#define UBI_FM_MAX_POOL_SIZE 256
405
406#define UBI_FM_WL_POOL_SIZE 25
407
408/**
409 * struct ubi_fm_sb - UBI fastmap super block
410 * @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC)
411 * @version: format version of this fastmap
412 * @data_crc: CRC over the fastmap data
413 * @used_blocks: number of PEBs used by this fastmap
414 * @block_loc: an array containing the location of all PEBs of the fastmap
415 * @block_ec: the erase counter of each used PEB
416 * @sqnum: highest sequence number value at the time while taking the fastmap
417 *
418 */
419struct ubi_fm_sb {
420 __be32 magic;
421 __u8 version;
422 __u8 padding1[3];
423 __be32 data_crc;
424 __be32 used_blocks;
425 __be32 block_loc[UBI_FM_MAX_BLOCKS];
426 __be32 block_ec[UBI_FM_MAX_BLOCKS];
427 __be64 sqnum;
428 __u8 padding2[32];
429} __packed;
430
431/**
432 * struct ubi_fm_hdr - header of the fastmap data set
433 * @magic: fastmap header magic number (%UBI_FM_HDR_MAGIC)
434 * @free_peb_count: number of free PEBs known by this fastmap
435 * @used_peb_count: number of used PEBs known by this fastmap
436 * @scrub_peb_count: number of to be scrubbed PEBs known by this fastmap
437 * @bad_peb_count: number of bad PEBs known by this fastmap
438 * @erase_peb_count: number of bad PEBs which have to be erased
439 * @vol_count: number of UBI volumes known by this fastmap
440 */
441struct ubi_fm_hdr {
442 __be32 magic;
443 __be32 free_peb_count;
444 __be32 used_peb_count;
445 __be32 scrub_peb_count;
446 __be32 bad_peb_count;
447 __be32 erase_peb_count;
448 __be32 vol_count;
449 __u8 padding[4];
450} __packed;
451
452/* struct ubi_fm_hdr is followed by two struct ubi_fm_scan_pool */
453
454/**
455 * struct ubi_fm_scan_pool - Fastmap pool PEBs to be scanned while attaching
456 * @magic: pool magic numer (%UBI_FM_POOL_MAGIC)
457 * @size: current pool size
458 * @max_size: maximal pool size
459 * @pebs: an array containing the location of all PEBs in this pool
460 */
461struct ubi_fm_scan_pool {
462 __be32 magic;
463 __be16 size;
464 __be16 max_size;
465 __be32 pebs[UBI_FM_MAX_POOL_SIZE];
466 __be32 padding[4];
467} __packed;
468
469/* ubi_fm_scan_pool is followed by nfree+nused struct ubi_fm_ec records */
470
471/**
472 * struct ubi_fm_ec - stores the erase counter of a PEB
473 * @pnum: PEB number
474 * @ec: ec of this PEB
475 */
476struct ubi_fm_ec {
477 __be32 pnum;
478 __be32 ec;
479} __packed;
480
481/**
482 * struct ubi_fm_volhdr - Fastmap volume header
483 * it identifies the start of an eba table
484 * @magic: Fastmap volume header magic number (%UBI_FM_VHDR_MAGIC)
485 * @vol_id: volume id of the fastmapped volume
486 * @vol_type: type of the fastmapped volume
487 * @data_pad: data_pad value of the fastmapped volume
488 * @used_ebs: number of used LEBs within this volume
489 * @last_eb_bytes: number of bytes used in the last LEB
490 */
491struct ubi_fm_volhdr {
492 __be32 magic;
493 __be32 vol_id;
494 __u8 vol_type;
495 __u8 padding1[3];
496 __be32 data_pad;
497 __be32 used_ebs;
498 __be32 last_eb_bytes;
499 __u8 padding2[8];
500} __packed;
501
502/* struct ubi_fm_volhdr is followed by one struct ubi_fm_eba records */
503
504/**
505 * struct ubi_fm_eba - denotes an association beween a PEB and LEB
506 * @magic: EBA table magic number
507 * @reserved_pebs: number of table entries
508 * @pnum: PEB number of LEB (LEB is the index)
509 */
510struct ubi_fm_eba {
511 __be32 magic;
512 __be32 reserved_pebs;
513 __be32 pnum[0];
514} __packed;
378#endif /* !__UBI_MEDIA_H__ */ 515#endif /* !__UBI_MEDIA_H__ */
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 84f66e3fa05d..7d57469723cf 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -51,17 +51,14 @@
51#define UBI_NAME_STR "ubi" 51#define UBI_NAME_STR "ubi"
52 52
53/* Normal UBI messages */ 53/* Normal UBI messages */
54#define ubi_msg(fmt, ...) printk(KERN_NOTICE "UBI: " fmt "\n", ##__VA_ARGS__) 54#define ubi_msg(fmt, ...) pr_notice("UBI: " fmt "\n", ##__VA_ARGS__)
55/* UBI warning messages */ 55/* UBI warning messages */
56#define ubi_warn(fmt, ...) printk(KERN_WARNING "UBI warning: %s: " fmt "\n", \ 56#define ubi_warn(fmt, ...) pr_warn("UBI warning: %s: " fmt "\n", \
57 __func__, ##__VA_ARGS__) 57 __func__, ##__VA_ARGS__)
58/* UBI error messages */ 58/* UBI error messages */
59#define ubi_err(fmt, ...) printk(KERN_ERR "UBI error: %s: " fmt "\n", \ 59#define ubi_err(fmt, ...) pr_err("UBI error: %s: " fmt "\n", \
60 __func__, ##__VA_ARGS__) 60 __func__, ##__VA_ARGS__)
61 61
62/* Lowest number PEBs reserved for bad PEB handling */
63#define MIN_RESEVED_PEBS 2
64
65/* Background thread name pattern */ 62/* Background thread name pattern */
66#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd" 63#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
67 64
@@ -136,6 +133,17 @@ enum {
136 MOVE_RETRY, 133 MOVE_RETRY,
137}; 134};
138 135
136/*
137 * Return codes of the fastmap sub-system
138 *
139 * UBI_NO_FASTMAP: No fastmap super block was found
140 * UBI_BAD_FASTMAP: A fastmap was found but it's unusable
141 */
142enum {
143 UBI_NO_FASTMAP = 1,
144 UBI_BAD_FASTMAP,
145};
146
139/** 147/**
140 * struct ubi_wl_entry - wear-leveling entry. 148 * struct ubi_wl_entry - wear-leveling entry.
141 * @u.rb: link in the corresponding (free/used) RB-tree 149 * @u.rb: link in the corresponding (free/used) RB-tree
@@ -202,6 +210,41 @@ struct ubi_rename_entry {
202struct ubi_volume_desc; 210struct ubi_volume_desc;
203 211
204/** 212/**
213 * struct ubi_fastmap_layout - in-memory fastmap data structure.
214 * @e: PEBs used by the current fastmap
215 * @to_be_tortured: if non-zero tortured this PEB
216 * @used_blocks: number of used PEBs
217 * @max_pool_size: maximal size of the user pool
218 * @max_wl_pool_size: maximal size of the pool used by the WL sub-system
219 */
220struct ubi_fastmap_layout {
221 struct ubi_wl_entry *e[UBI_FM_MAX_BLOCKS];
222 int to_be_tortured[UBI_FM_MAX_BLOCKS];
223 int used_blocks;
224 int max_pool_size;
225 int max_wl_pool_size;
226};
227
228/**
229 * struct ubi_fm_pool - in-memory fastmap pool
230 * @pebs: PEBs in this pool
231 * @used: number of used PEBs
232 * @size: total number of PEBs in this pool
233 * @max_size: maximal size of the pool
234 *
235 * A pool gets filled with up to max_size.
236 * If all PEBs within the pool are used a new fastmap will be written
237 * to the flash and the pool gets refilled with empty PEBs.
238 *
239 */
240struct ubi_fm_pool {
241 int pebs[UBI_FM_MAX_POOL_SIZE];
242 int used;
243 int size;
244 int max_size;
245};
246
247/**
205 * struct ubi_volume - UBI volume description data structure. 248 * struct ubi_volume - UBI volume description data structure.
206 * @dev: device object to make use of the the Linux device model 249 * @dev: device object to make use of the the Linux device model
207 * @cdev: character device object to create character device 250 * @cdev: character device object to create character device
@@ -336,9 +379,21 @@ struct ubi_wl_entry;
336 * @ltree: the lock tree 379 * @ltree: the lock tree
337 * @alc_mutex: serializes "atomic LEB change" operations 380 * @alc_mutex: serializes "atomic LEB change" operations
338 * 381 *
382 * @fm_disabled: non-zero if fastmap is disabled (default)
383 * @fm: in-memory data structure of the currently used fastmap
384 * @fm_pool: in-memory data structure of the fastmap pool
385 * @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL
386 * sub-system
387 * @fm_mutex: serializes ubi_update_fastmap() and protects @fm_buf
388 * @fm_buf: vmalloc()'d buffer which holds the raw fastmap
389 * @fm_size: fastmap size in bytes
390 * @fm_sem: allows ubi_update_fastmap() to block EBA table changes
391 * @fm_work: fastmap work queue
392 *
339 * @used: RB-tree of used physical eraseblocks 393 * @used: RB-tree of used physical eraseblocks
340 * @erroneous: RB-tree of erroneous used physical eraseblocks 394 * @erroneous: RB-tree of erroneous used physical eraseblocks
341 * @free: RB-tree of free physical eraseblocks 395 * @free: RB-tree of free physical eraseblocks
396 * @free_count: Contains the number of elements in @free
342 * @scrub: RB-tree of physical eraseblocks which need scrubbing 397 * @scrub: RB-tree of physical eraseblocks which need scrubbing
343 * @pq: protection queue (contain physical eraseblocks which are temporarily 398 * @pq: protection queue (contain physical eraseblocks which are temporarily
344 * protected from the wear-leveling worker) 399 * protected from the wear-leveling worker)
@@ -363,6 +418,7 @@ struct ubi_wl_entry;
363 * @flash_size: underlying MTD device size (in bytes) 418 * @flash_size: underlying MTD device size (in bytes)
364 * @peb_count: count of physical eraseblocks on the MTD device 419 * @peb_count: count of physical eraseblocks on the MTD device
365 * @peb_size: physical eraseblock size 420 * @peb_size: physical eraseblock size
421 * @bad_peb_limit: top limit of expected bad physical eraseblocks
366 * @bad_peb_count: count of bad physical eraseblocks 422 * @bad_peb_count: count of bad physical eraseblocks
367 * @good_peb_count: count of good physical eraseblocks 423 * @good_peb_count: count of good physical eraseblocks
368 * @corr_peb_count: count of corrupted physical eraseblocks (preserved and not 424 * @corr_peb_count: count of corrupted physical eraseblocks (preserved and not
@@ -410,6 +466,7 @@ struct ubi_device {
410 int avail_pebs; 466 int avail_pebs;
411 int beb_rsvd_pebs; 467 int beb_rsvd_pebs;
412 int beb_rsvd_level; 468 int beb_rsvd_level;
469 int bad_peb_limit;
413 470
414 int autoresize_vol_id; 471 int autoresize_vol_id;
415 int vtbl_slots; 472 int vtbl_slots;
@@ -427,10 +484,22 @@ struct ubi_device {
427 struct rb_root ltree; 484 struct rb_root ltree;
428 struct mutex alc_mutex; 485 struct mutex alc_mutex;
429 486
487 /* Fastmap stuff */
488 int fm_disabled;
489 struct ubi_fastmap_layout *fm;
490 struct ubi_fm_pool fm_pool;
491 struct ubi_fm_pool fm_wl_pool;
492 struct rw_semaphore fm_sem;
493 struct mutex fm_mutex;
494 void *fm_buf;
495 size_t fm_size;
496 struct work_struct fm_work;
497
430 /* Wear-leveling sub-system's stuff */ 498 /* Wear-leveling sub-system's stuff */
431 struct rb_root used; 499 struct rb_root used;
432 struct rb_root erroneous; 500 struct rb_root erroneous;
433 struct rb_root free; 501 struct rb_root free;
502 int free_count;
434 struct rb_root scrub; 503 struct rb_root scrub;
435 struct list_head pq[UBI_PROT_QUEUE_LEN]; 504 struct list_head pq[UBI_PROT_QUEUE_LEN];
436 int pq_head; 505 int pq_head;
@@ -597,6 +666,32 @@ struct ubi_attach_info {
597 struct kmem_cache *aeb_slab_cache; 666 struct kmem_cache *aeb_slab_cache;
598}; 667};
599 668
669/**
670 * struct ubi_work - UBI work description data structure.
671 * @list: a link in the list of pending works
672 * @func: worker function
673 * @e: physical eraseblock to erase
674 * @vol_id: the volume ID on which this erasure is being performed
675 * @lnum: the logical eraseblock number
676 * @torture: if the physical eraseblock has to be tortured
677 * @anchor: produce a anchor PEB to by used by fastmap
678 *
679 * The @func pointer points to the worker function. If the @cancel argument is
680 * not zero, the worker has to free the resources and exit immediately. The
681 * worker has to return zero in case of success and a negative error code in
682 * case of failure.
683 */
684struct ubi_work {
685 struct list_head list;
686 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
687 /* The below fields are only relevant to erasure works */
688 struct ubi_wl_entry *e;
689 int vol_id;
690 int lnum;
691 int torture;
692 int anchor;
693};
694
600#include "debug.h" 695#include "debug.h"
601 696
602extern struct kmem_cache *ubi_wl_entry_slab; 697extern struct kmem_cache *ubi_wl_entry_slab;
@@ -607,7 +702,7 @@ extern struct class *ubi_class;
607extern struct mutex ubi_devices_mutex; 702extern struct mutex ubi_devices_mutex;
608extern struct blocking_notifier_head ubi_notifiers; 703extern struct blocking_notifier_head ubi_notifiers;
609 704
610/* scan.c */ 705/* attach.c */
611int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, 706int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
612 int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips); 707 int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips);
613struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai, 708struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
@@ -615,7 +710,7 @@ struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
615void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av); 710void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av);
616struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi, 711struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
617 struct ubi_attach_info *ai); 712 struct ubi_attach_info *ai);
618int ubi_attach(struct ubi_device *ubi); 713int ubi_attach(struct ubi_device *ubi, int force_scan);
619void ubi_destroy_ai(struct ubi_attach_info *ai); 714void ubi_destroy_ai(struct ubi_attach_info *ai);
620 715
621/* vtbl.c */ 716/* vtbl.c */
@@ -665,6 +760,9 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
665int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 760int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
666 struct ubi_vid_hdr *vid_hdr); 761 struct ubi_vid_hdr *vid_hdr);
667int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai); 762int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
763unsigned long long ubi_next_sqnum(struct ubi_device *ubi);
764int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
765 struct ubi_attach_info *ai_scan);
668 766
669/* wl.c */ 767/* wl.c */
670int ubi_wl_get_peb(struct ubi_device *ubi); 768int ubi_wl_get_peb(struct ubi_device *ubi);
@@ -675,6 +773,12 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
675int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai); 773int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
676void ubi_wl_close(struct ubi_device *ubi); 774void ubi_wl_close(struct ubi_device *ubi);
677int ubi_thread(void *u); 775int ubi_thread(void *u);
776struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor);
777int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
778 int lnum, int torture);
779int ubi_is_erase_work(struct ubi_work *wrk);
780void ubi_refill_pools(struct ubi_device *ubi);
781int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
678 782
679/* io.c */ 783/* io.c */
680int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset, 784int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
@@ -694,7 +798,8 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
694 struct ubi_vid_hdr *vid_hdr); 798 struct ubi_vid_hdr *vid_hdr);
695 799
696/* build.c */ 800/* build.c */
697int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset); 801int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
802 int vid_hdr_offset, int max_beb_per1024);
698int ubi_detach_mtd_dev(int ubi_num, int anyway); 803int ubi_detach_mtd_dev(int ubi_num, int anyway);
699struct ubi_device *ubi_get_device(int ubi_num); 804struct ubi_device *ubi_get_device(int ubi_num);
700void ubi_put_device(struct ubi_device *ubi); 805void ubi_put_device(struct ubi_device *ubi);
@@ -711,6 +816,15 @@ void ubi_free_internal_volumes(struct ubi_device *ubi);
711void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di); 816void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
712void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol, 817void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
713 struct ubi_volume_info *vi); 818 struct ubi_volume_info *vi);
819/* scan.c */
820int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
821 int pnum, const struct ubi_vid_hdr *vid_hdr);
822
823/* fastmap.c */
824size_t ubi_calc_fm_size(struct ubi_device *ubi);
825int ubi_update_fastmap(struct ubi_device *ubi);
826int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
827 int fm_anchor);
714 828
715/* 829/*
716 * ubi_rb_for_each_entry - walk an RB-tree. 830 * ubi_rb_for_each_entry - walk an RB-tree.
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 437bc193e170..926e3df14fb2 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -270,8 +270,8 @@ static int vtbl_check(const struct ubi_device *ubi,
270 270
271 if (len1 > 0 && len1 == len2 && 271 if (len1 > 0 && len1 == len2 &&
272 !strncmp(vtbl[i].name, vtbl[n].name, len1)) { 272 !strncmp(vtbl[i].name, vtbl[n].name, len1)) {
273 ubi_err("volumes %d and %d have the same name" 273 ubi_err("volumes %d and %d have the same name \"%s\"",
274 " \"%s\"", i, n, vtbl[i].name); 274 i, n, vtbl[i].name);
275 ubi_dump_vtbl_record(&vtbl[i], i); 275 ubi_dump_vtbl_record(&vtbl[i], i);
276 ubi_dump_vtbl_record(&vtbl[n], n); 276 ubi_dump_vtbl_record(&vtbl[n], n);
277 return -EINVAL; 277 return -EINVAL;
@@ -304,7 +304,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai,
304 struct ubi_vid_hdr *vid_hdr; 304 struct ubi_vid_hdr *vid_hdr;
305 struct ubi_ainf_peb *new_aeb; 305 struct ubi_ainf_peb *new_aeb;
306 306
307 ubi_msg("create volume table (copy #%d)", copy + 1); 307 dbg_gen("create volume table (copy #%d)", copy + 1);
308 308
309 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); 309 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
310 if (!vid_hdr) 310 if (!vid_hdr)
@@ -340,7 +340,7 @@ retry:
340 * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'. 340 * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
341 */ 341 */
342 err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0); 342 err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
343 kfree(new_aeb); 343 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
344 ubi_free_vid_hdr(ubi, vid_hdr); 344 ubi_free_vid_hdr(ubi, vid_hdr);
345 return err; 345 return err;
346 346
@@ -353,7 +353,7 @@ write_error:
353 list_add(&new_aeb->u.list, &ai->erase); 353 list_add(&new_aeb->u.list, &ai->erase);
354 goto retry; 354 goto retry;
355 } 355 }
356 kfree(new_aeb); 356 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
357out_free: 357out_free:
358 ubi_free_vid_hdr(ubi, vid_hdr); 358 ubi_free_vid_hdr(ubi, vid_hdr);
359 return err; 359 return err;
@@ -562,8 +562,8 @@ static int init_volumes(struct ubi_device *ubi,
562 if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) { 562 if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
563 /* Auto re-size flag may be set only for one volume */ 563 /* Auto re-size flag may be set only for one volume */
564 if (ubi->autoresize_vol_id != -1) { 564 if (ubi->autoresize_vol_id != -1) {
565 ubi_err("more than one auto-resize volume (%d " 565 ubi_err("more than one auto-resize volume (%d and %d)",
566 "and %d)", ubi->autoresize_vol_id, i); 566 ubi->autoresize_vol_id, i);
567 kfree(vol); 567 kfree(vol);
568 return -EINVAL; 568 return -EINVAL;
569 } 569 }
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index b6be644e7b85..da7b44998b40 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -135,36 +135,48 @@
135 */ 135 */
136#define WL_MAX_FAILURES 32 136#define WL_MAX_FAILURES 32
137 137
138/**
139 * struct ubi_work - UBI work description data structure.
140 * @list: a link in the list of pending works
141 * @func: worker function
142 * @e: physical eraseblock to erase
143 * @vol_id: the volume ID on which this erasure is being performed
144 * @lnum: the logical eraseblock number
145 * @torture: if the physical eraseblock has to be tortured
146 *
147 * The @func pointer points to the worker function. If the @cancel argument is
148 * not zero, the worker has to free the resources and exit immediately. The
149 * worker has to return zero in case of success and a negative error code in
150 * case of failure.
151 */
152struct ubi_work {
153 struct list_head list;
154 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
155 /* The below fields are only relevant to erasure works */
156 struct ubi_wl_entry *e;
157 int vol_id;
158 int lnum;
159 int torture;
160};
161
162static int self_check_ec(struct ubi_device *ubi, int pnum, int ec); 138static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
163static int self_check_in_wl_tree(const struct ubi_device *ubi, 139static int self_check_in_wl_tree(const struct ubi_device *ubi,
164 struct ubi_wl_entry *e, struct rb_root *root); 140 struct ubi_wl_entry *e, struct rb_root *root);
165static int self_check_in_pq(const struct ubi_device *ubi, 141static int self_check_in_pq(const struct ubi_device *ubi,
166 struct ubi_wl_entry *e); 142 struct ubi_wl_entry *e);
167 143
144#ifdef CONFIG_MTD_UBI_FASTMAP
145/**
146 * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
147 * @wrk: the work description object
148 */
149static void update_fastmap_work_fn(struct work_struct *wrk)
150{
151 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
152 ubi_update_fastmap(ubi);
153}
154
155/**
156 * ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap.
157 * @ubi: UBI device description object
158 * @pnum: the to be checked PEB
159 */
160static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
161{
162 int i;
163
164 if (!ubi->fm)
165 return 0;
166
167 for (i = 0; i < ubi->fm->used_blocks; i++)
168 if (ubi->fm->e[i]->pnum == pnum)
169 return 1;
170
171 return 0;
172}
173#else
174static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
175{
176 return 0;
177}
178#endif
179
168/** 180/**
169 * wl_tree_add - add a wear-leveling entry to a WL RB-tree. 181 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
170 * @e: the wear-leveling entry to add 182 * @e: the wear-leveling entry to add
@@ -261,18 +273,16 @@ static int produce_free_peb(struct ubi_device *ubi)
261{ 273{
262 int err; 274 int err;
263 275
264 spin_lock(&ubi->wl_lock);
265 while (!ubi->free.rb_node) { 276 while (!ubi->free.rb_node) {
266 spin_unlock(&ubi->wl_lock); 277 spin_unlock(&ubi->wl_lock);
267 278
268 dbg_wl("do one work synchronously"); 279 dbg_wl("do one work synchronously");
269 err = do_work(ubi); 280 err = do_work(ubi);
270 if (err)
271 return err;
272 281
273 spin_lock(&ubi->wl_lock); 282 spin_lock(&ubi->wl_lock);
283 if (err)
284 return err;
274 } 285 }
275 spin_unlock(&ubi->wl_lock);
276 286
277 return 0; 287 return 0;
278} 288}
@@ -339,16 +349,18 @@ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
339 349
340/** 350/**
341 * find_wl_entry - find wear-leveling entry closest to certain erase counter. 351 * find_wl_entry - find wear-leveling entry closest to certain erase counter.
352 * @ubi: UBI device description object
342 * @root: the RB-tree where to look for 353 * @root: the RB-tree where to look for
343 * @diff: maximum possible difference from the smallest erase counter 354 * @diff: maximum possible difference from the smallest erase counter
344 * 355 *
345 * This function looks for a wear leveling entry with erase counter closest to 356 * This function looks for a wear leveling entry with erase counter closest to
346 * min + @diff, where min is the smallest erase counter. 357 * min + @diff, where min is the smallest erase counter.
347 */ 358 */
348static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff) 359static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
360 struct rb_root *root, int diff)
349{ 361{
350 struct rb_node *p; 362 struct rb_node *p;
351 struct ubi_wl_entry *e; 363 struct ubi_wl_entry *e, *prev_e = NULL;
352 int max; 364 int max;
353 365
354 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); 366 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
@@ -363,35 +375,143 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff)
363 p = p->rb_left; 375 p = p->rb_left;
364 else { 376 else {
365 p = p->rb_right; 377 p = p->rb_right;
378 prev_e = e;
366 e = e1; 379 e = e1;
367 } 380 }
368 } 381 }
369 382
383 /* If no fastmap has been written and this WL entry can be used
384 * as anchor PEB, hold it back and return the second best WL entry
385 * such that fastmap can use the anchor PEB later. */
386 if (prev_e && !ubi->fm_disabled &&
387 !ubi->fm && e->pnum < UBI_FM_MAX_START)
388 return prev_e;
389
390 return e;
391}
392
393/**
394 * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
395 * @ubi: UBI device description object
396 * @root: the RB-tree where to look for
397 *
398 * This function looks for a wear leveling entry with medium erase counter,
399 * but not greater or equivalent than the lowest erase counter plus
400 * %WL_FREE_MAX_DIFF/2.
401 */
402static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
403 struct rb_root *root)
404{
405 struct ubi_wl_entry *e, *first, *last;
406
407 first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
408 last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
409
410 if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
411 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
412
413#ifdef CONFIG_MTD_UBI_FASTMAP
414 /* If no fastmap has been written and this WL entry can be used
415 * as anchor PEB, hold it back and return the second best
416 * WL entry such that fastmap can use the anchor PEB later. */
417 if (e && !ubi->fm_disabled && !ubi->fm &&
418 e->pnum < UBI_FM_MAX_START)
419 e = rb_entry(rb_next(root->rb_node),
420 struct ubi_wl_entry, u.rb);
421#endif
422 } else
423 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
424
370 return e; 425 return e;
371} 426}
372 427
428#ifdef CONFIG_MTD_UBI_FASTMAP
429/**
430 * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
431 * @root: the RB-tree where to look for
432 */
433static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
434{
435 struct rb_node *p;
436 struct ubi_wl_entry *e, *victim = NULL;
437 int max_ec = UBI_MAX_ERASECOUNTER;
438
439 ubi_rb_for_each_entry(p, e, root, u.rb) {
440 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
441 victim = e;
442 max_ec = e->ec;
443 }
444 }
445
446 return victim;
447}
448
449static int anchor_pebs_avalible(struct rb_root *root)
450{
451 struct rb_node *p;
452 struct ubi_wl_entry *e;
453
454 ubi_rb_for_each_entry(p, e, root, u.rb)
455 if (e->pnum < UBI_FM_MAX_START)
456 return 1;
457
458 return 0;
459}
460
373/** 461/**
374 * ubi_wl_get_peb - get a physical eraseblock. 462 * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
463 * @ubi: UBI device description object
464 * @anchor: This PEB will be used as anchor PEB by fastmap
465 *
466 * The function returns a physical erase block with a given maximal number
467 * and removes it from the wl subsystem.
468 * Must be called with wl_lock held!
469 */
470struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
471{
472 struct ubi_wl_entry *e = NULL;
473
474 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
475 goto out;
476
477 if (anchor)
478 e = find_anchor_wl_entry(&ubi->free);
479 else
480 e = find_mean_wl_entry(ubi, &ubi->free);
481
482 if (!e)
483 goto out;
484
485 self_check_in_wl_tree(ubi, e, &ubi->free);
486
487 /* remove it from the free list,
488 * the wl subsystem does no longer know this erase block */
489 rb_erase(&e->u.rb, &ubi->free);
490 ubi->free_count--;
491out:
492 return e;
493}
494#endif
495
496/**
497 * __wl_get_peb - get a physical eraseblock.
375 * @ubi: UBI device description object 498 * @ubi: UBI device description object
376 * 499 *
377 * This function returns a physical eraseblock in case of success and a 500 * This function returns a physical eraseblock in case of success and a
378 * negative error code in case of failure. Might sleep. 501 * negative error code in case of failure. Might sleep.
379 */ 502 */
380int ubi_wl_get_peb(struct ubi_device *ubi) 503static int __wl_get_peb(struct ubi_device *ubi)
381{ 504{
382 int err; 505 int err;
383 struct ubi_wl_entry *e, *first, *last; 506 struct ubi_wl_entry *e;
384 507
385retry: 508retry:
386 spin_lock(&ubi->wl_lock);
387 if (!ubi->free.rb_node) { 509 if (!ubi->free.rb_node) {
388 if (ubi->works_count == 0) { 510 if (ubi->works_count == 0) {
389 ubi_assert(list_empty(&ubi->works));
390 ubi_err("no free eraseblocks"); 511 ubi_err("no free eraseblocks");
391 spin_unlock(&ubi->wl_lock); 512 ubi_assert(list_empty(&ubi->works));
392 return -ENOSPC; 513 return -ENOSPC;
393 } 514 }
394 spin_unlock(&ubi->wl_lock);
395 515
396 err = produce_free_peb(ubi); 516 err = produce_free_peb(ubi);
397 if (err < 0) 517 if (err < 0)
@@ -399,13 +519,11 @@ retry:
399 goto retry; 519 goto retry;
400 } 520 }
401 521
402 first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb); 522 e = find_mean_wl_entry(ubi, &ubi->free);
403 last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb); 523 if (!e) {
404 524 ubi_err("no free eraseblocks");
405 if (last->ec - first->ec < WL_FREE_MAX_DIFF) 525 return -ENOSPC;
406 e = rb_entry(ubi->free.rb_node, struct ubi_wl_entry, u.rb); 526 }
407 else
408 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2);
409 527
410 self_check_in_wl_tree(ubi, e, &ubi->free); 528 self_check_in_wl_tree(ubi, e, &ubi->free);
411 529
@@ -414,10 +532,14 @@ retry:
414 * be protected from being moved for some time. 532 * be protected from being moved for some time.
415 */ 533 */
416 rb_erase(&e->u.rb, &ubi->free); 534 rb_erase(&e->u.rb, &ubi->free);
535 ubi->free_count--;
417 dbg_wl("PEB %d EC %d", e->pnum, e->ec); 536 dbg_wl("PEB %d EC %d", e->pnum, e->ec);
537#ifndef CONFIG_MTD_UBI_FASTMAP
538 /* We have to enqueue e only if fastmap is disabled,
539 * is fastmap enabled prot_queue_add() will be called by
540 * ubi_wl_get_peb() after removing e from the pool. */
418 prot_queue_add(ubi, e); 541 prot_queue_add(ubi, e);
419 spin_unlock(&ubi->wl_lock); 542#endif
420
421 err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset, 543 err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
422 ubi->peb_size - ubi->vid_hdr_aloffset); 544 ubi->peb_size - ubi->vid_hdr_aloffset);
423 if (err) { 545 if (err) {
@@ -428,6 +550,150 @@ retry:
428 return e->pnum; 550 return e->pnum;
429} 551}
430 552
553#ifdef CONFIG_MTD_UBI_FASTMAP
554/**
555 * return_unused_pool_pebs - returns unused PEB to the free tree.
556 * @ubi: UBI device description object
557 * @pool: fastmap pool description object
558 */
559static void return_unused_pool_pebs(struct ubi_device *ubi,
560 struct ubi_fm_pool *pool)
561{
562 int i;
563 struct ubi_wl_entry *e;
564
565 for (i = pool->used; i < pool->size; i++) {
566 e = ubi->lookuptbl[pool->pebs[i]];
567 wl_tree_add(e, &ubi->free);
568 ubi->free_count++;
569 }
570}
571
572/**
573 * refill_wl_pool - refills all the fastmap pool used by the
574 * WL sub-system.
575 * @ubi: UBI device description object
576 */
577static void refill_wl_pool(struct ubi_device *ubi)
578{
579 struct ubi_wl_entry *e;
580 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
581
582 return_unused_pool_pebs(ubi, pool);
583
584 for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
585 if (!ubi->free.rb_node ||
586 (ubi->free_count - ubi->beb_rsvd_pebs < 5))
587 break;
588
589 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
590 self_check_in_wl_tree(ubi, e, &ubi->free);
591 rb_erase(&e->u.rb, &ubi->free);
592 ubi->free_count--;
593
594 pool->pebs[pool->size] = e->pnum;
595 }
596 pool->used = 0;
597}
598
599/**
600 * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
601 * @ubi: UBI device description object
602 */
603static void refill_wl_user_pool(struct ubi_device *ubi)
604{
605 struct ubi_fm_pool *pool = &ubi->fm_pool;
606
607 return_unused_pool_pebs(ubi, pool);
608
609 for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
610 if (!ubi->free.rb_node ||
611 (ubi->free_count - ubi->beb_rsvd_pebs < 1))
612 break;
613
614 pool->pebs[pool->size] = __wl_get_peb(ubi);
615 if (pool->pebs[pool->size] < 0)
616 break;
617 }
618 pool->used = 0;
619}
620
621/**
622 * ubi_refill_pools - refills all fastmap PEB pools.
623 * @ubi: UBI device description object
624 */
625void ubi_refill_pools(struct ubi_device *ubi)
626{
627 spin_lock(&ubi->wl_lock);
628 refill_wl_pool(ubi);
629 refill_wl_user_pool(ubi);
630 spin_unlock(&ubi->wl_lock);
631}
632
633/* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of
634 * the fastmap pool.
635 */
636int ubi_wl_get_peb(struct ubi_device *ubi)
637{
638 int ret;
639 struct ubi_fm_pool *pool = &ubi->fm_pool;
640 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
641
642 if (!pool->size || !wl_pool->size || pool->used == pool->size ||
643 wl_pool->used == wl_pool->size)
644 ubi_update_fastmap(ubi);
645
646 /* we got not a single free PEB */
647 if (!pool->size)
648 ret = -ENOSPC;
649 else {
650 spin_lock(&ubi->wl_lock);
651 ret = pool->pebs[pool->used++];
652 prot_queue_add(ubi, ubi->lookuptbl[ret]);
653 spin_unlock(&ubi->wl_lock);
654 }
655
656 return ret;
657}
658
659/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
660 *
661 * @ubi: UBI device description object
662 */
663static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
664{
665 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
666 int pnum;
667
668 if (pool->used == pool->size || !pool->size) {
669 /* We cannot update the fastmap here because this
670 * function is called in atomic context.
671 * Let's fail here and refill/update it as soon as possible. */
672 schedule_work(&ubi->fm_work);
673 return NULL;
674 } else {
675 pnum = pool->pebs[pool->used++];
676 return ubi->lookuptbl[pnum];
677 }
678}
679#else
680static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
681{
682 return find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
683}
684
685int ubi_wl_get_peb(struct ubi_device *ubi)
686{
687 int peb;
688
689 spin_lock(&ubi->wl_lock);
690 peb = __wl_get_peb(ubi);
691 spin_unlock(&ubi->wl_lock);
692
693 return peb;
694}
695#endif
696
431/** 697/**
432 * prot_queue_del - remove a physical eraseblock from the protection queue. 698 * prot_queue_del - remove a physical eraseblock from the protection queue.
433 * @ubi: UBI device description object 699 * @ubi: UBI device description object
@@ -558,14 +824,14 @@ repeat:
558} 824}
559 825
560/** 826/**
561 * schedule_ubi_work - schedule a work. 827 * __schedule_ubi_work - schedule a work.
562 * @ubi: UBI device description object 828 * @ubi: UBI device description object
563 * @wrk: the work to schedule 829 * @wrk: the work to schedule
564 * 830 *
565 * This function adds a work defined by @wrk to the tail of the pending works 831 * This function adds a work defined by @wrk to the tail of the pending works
566 * list. 832 * list. Can only be used of ubi->work_sem is already held in read mode!
567 */ 833 */
568static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) 834static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
569{ 835{
570 spin_lock(&ubi->wl_lock); 836 spin_lock(&ubi->wl_lock);
571 list_add_tail(&wrk->list, &ubi->works); 837 list_add_tail(&wrk->list, &ubi->works);
@@ -576,9 +842,35 @@ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
576 spin_unlock(&ubi->wl_lock); 842 spin_unlock(&ubi->wl_lock);
577} 843}
578 844
845/**
846 * schedule_ubi_work - schedule a work.
847 * @ubi: UBI device description object
848 * @wrk: the work to schedule
849 *
850 * This function adds a work defined by @wrk to the tail of the pending works
851 * list.
852 */
853static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
854{
855 down_read(&ubi->work_sem);
856 __schedule_ubi_work(ubi, wrk);
857 up_read(&ubi->work_sem);
858}
859
579static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, 860static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
580 int cancel); 861 int cancel);
581 862
863#ifdef CONFIG_MTD_UBI_FASTMAP
864/**
865 * ubi_is_erase_work - checks whether a work is erase work.
866 * @wrk: The work object to be checked
867 */
868int ubi_is_erase_work(struct ubi_work *wrk)
869{
870 return wrk->func == erase_worker;
871}
872#endif
873
582/** 874/**
583 * schedule_erase - schedule an erase work. 875 * schedule_erase - schedule an erase work.
584 * @ubi: UBI device description object 876 * @ubi: UBI device description object
@@ -595,6 +887,9 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
595{ 887{
596 struct ubi_work *wl_wrk; 888 struct ubi_work *wl_wrk;
597 889
890 ubi_assert(e);
891 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
892
598 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", 893 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
599 e->pnum, e->ec, torture); 894 e->pnum, e->ec, torture);
600 895
@@ -613,6 +908,79 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
613} 908}
614 909
615/** 910/**
911 * do_sync_erase - run the erase worker synchronously.
912 * @ubi: UBI device description object
913 * @e: the WL entry of the physical eraseblock to erase
914 * @vol_id: the volume ID that last used this PEB
915 * @lnum: the last used logical eraseblock number for the PEB
916 * @torture: if the physical eraseblock has to be tortured
917 *
918 */
919static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
920 int vol_id, int lnum, int torture)
921{
922 struct ubi_work *wl_wrk;
923
924 dbg_wl("sync erase of PEB %i", e->pnum);
925
926 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
927 if (!wl_wrk)
928 return -ENOMEM;
929
930 wl_wrk->e = e;
931 wl_wrk->vol_id = vol_id;
932 wl_wrk->lnum = lnum;
933 wl_wrk->torture = torture;
934
935 return erase_worker(ubi, wl_wrk, 0);
936}
937
938#ifdef CONFIG_MTD_UBI_FASTMAP
939/**
940 * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
941 * sub-system.
942 * see: ubi_wl_put_peb()
943 *
944 * @ubi: UBI device description object
945 * @fm_e: physical eraseblock to return
946 * @lnum: the last used logical eraseblock number for the PEB
947 * @torture: if this physical eraseblock has to be tortured
948 */
949int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
950 int lnum, int torture)
951{
952 struct ubi_wl_entry *e;
953 int vol_id, pnum = fm_e->pnum;
954
955 dbg_wl("PEB %d", pnum);
956
957 ubi_assert(pnum >= 0);
958 ubi_assert(pnum < ubi->peb_count);
959
960 spin_lock(&ubi->wl_lock);
961 e = ubi->lookuptbl[pnum];
962
963 /* This can happen if we recovered from a fastmap the very
964 * first time and writing now a new one. In this case the wl system
965 * has never seen any PEB used by the original fastmap.
966 */
967 if (!e) {
968 e = fm_e;
969 ubi_assert(e->ec >= 0);
970 ubi->lookuptbl[pnum] = e;
971 } else {
972 e->ec = fm_e->ec;
973 kfree(fm_e);
974 }
975
976 spin_unlock(&ubi->wl_lock);
977
978 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
979 return schedule_erase(ubi, e, vol_id, lnum, torture);
980}
981#endif
982
983/**
616 * wear_leveling_worker - wear-leveling worker function. 984 * wear_leveling_worker - wear-leveling worker function.
617 * @ubi: UBI device description object 985 * @ubi: UBI device description object
618 * @wrk: the work object 986 * @wrk: the work object
@@ -627,6 +995,9 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
627{ 995{
628 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; 996 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
629 int vol_id = -1, uninitialized_var(lnum); 997 int vol_id = -1, uninitialized_var(lnum);
998#ifdef CONFIG_MTD_UBI_FASTMAP
999 int anchor = wrk->anchor;
1000#endif
630 struct ubi_wl_entry *e1, *e2; 1001 struct ubi_wl_entry *e1, *e2;
631 struct ubi_vid_hdr *vid_hdr; 1002 struct ubi_vid_hdr *vid_hdr;
632 1003
@@ -660,14 +1031,35 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
660 goto out_cancel; 1031 goto out_cancel;
661 } 1032 }
662 1033
1034#ifdef CONFIG_MTD_UBI_FASTMAP
1035 /* Check whether we need to produce an anchor PEB */
1036 if (!anchor)
1037 anchor = !anchor_pebs_avalible(&ubi->free);
1038
1039 if (anchor) {
1040 e1 = find_anchor_wl_entry(&ubi->used);
1041 if (!e1)
1042 goto out_cancel;
1043 e2 = get_peb_for_wl(ubi);
1044 if (!e2)
1045 goto out_cancel;
1046
1047 self_check_in_wl_tree(ubi, e1, &ubi->used);
1048 rb_erase(&e1->u.rb, &ubi->used);
1049 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
1050 } else if (!ubi->scrub.rb_node) {
1051#else
663 if (!ubi->scrub.rb_node) { 1052 if (!ubi->scrub.rb_node) {
1053#endif
664 /* 1054 /*
665 * Now pick the least worn-out used physical eraseblock and a 1055 * Now pick the least worn-out used physical eraseblock and a
666 * highly worn-out free physical eraseblock. If the erase 1056 * highly worn-out free physical eraseblock. If the erase
667 * counters differ much enough, start wear-leveling. 1057 * counters differ much enough, start wear-leveling.
668 */ 1058 */
669 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); 1059 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
670 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 1060 e2 = get_peb_for_wl(ubi);
1061 if (!e2)
1062 goto out_cancel;
671 1063
672 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { 1064 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
673 dbg_wl("no WL needed: min used EC %d, max free EC %d", 1065 dbg_wl("no WL needed: min used EC %d, max free EC %d",
@@ -682,14 +1074,15 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
682 /* Perform scrubbing */ 1074 /* Perform scrubbing */
683 scrubbing = 1; 1075 scrubbing = 1;
684 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb); 1076 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
685 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 1077 e2 = get_peb_for_wl(ubi);
1078 if (!e2)
1079 goto out_cancel;
1080
686 self_check_in_wl_tree(ubi, e1, &ubi->scrub); 1081 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
687 rb_erase(&e1->u.rb, &ubi->scrub); 1082 rb_erase(&e1->u.rb, &ubi->scrub);
688 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); 1083 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
689 } 1084 }
690 1085
691 self_check_in_wl_tree(ubi, e2, &ubi->free);
692 rb_erase(&e2->u.rb, &ubi->free);
693 ubi->move_from = e1; 1086 ubi->move_from = e1;
694 ubi->move_to = e2; 1087 ubi->move_to = e2;
695 spin_unlock(&ubi->wl_lock); 1088 spin_unlock(&ubi->wl_lock);
@@ -806,7 +1199,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
806 ubi->move_to_put = ubi->wl_scheduled = 0; 1199 ubi->move_to_put = ubi->wl_scheduled = 0;
807 spin_unlock(&ubi->wl_lock); 1200 spin_unlock(&ubi->wl_lock);
808 1201
809 err = schedule_erase(ubi, e1, vol_id, lnum, 0); 1202 err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
810 if (err) { 1203 if (err) {
811 kmem_cache_free(ubi_wl_entry_slab, e1); 1204 kmem_cache_free(ubi_wl_entry_slab, e1);
812 if (e2) 1205 if (e2)
@@ -821,7 +1214,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
821 */ 1214 */
822 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase", 1215 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
823 e2->pnum, vol_id, lnum); 1216 e2->pnum, vol_id, lnum);
824 err = schedule_erase(ubi, e2, vol_id, lnum, 0); 1217 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
825 if (err) { 1218 if (err) {
826 kmem_cache_free(ubi_wl_entry_slab, e2); 1219 kmem_cache_free(ubi_wl_entry_slab, e2);
827 goto out_ro; 1220 goto out_ro;
@@ -860,7 +1253,7 @@ out_not_moved:
860 spin_unlock(&ubi->wl_lock); 1253 spin_unlock(&ubi->wl_lock);
861 1254
862 ubi_free_vid_hdr(ubi, vid_hdr); 1255 ubi_free_vid_hdr(ubi, vid_hdr);
863 err = schedule_erase(ubi, e2, vol_id, lnum, torture); 1256 err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
864 if (err) { 1257 if (err) {
865 kmem_cache_free(ubi_wl_entry_slab, e2); 1258 kmem_cache_free(ubi_wl_entry_slab, e2);
866 goto out_ro; 1259 goto out_ro;
@@ -901,12 +1294,13 @@ out_cancel:
901/** 1294/**
902 * ensure_wear_leveling - schedule wear-leveling if it is needed. 1295 * ensure_wear_leveling - schedule wear-leveling if it is needed.
903 * @ubi: UBI device description object 1296 * @ubi: UBI device description object
1297 * @nested: set to non-zero if this function is called from UBI worker
904 * 1298 *
905 * This function checks if it is time to start wear-leveling and schedules it 1299 * This function checks if it is time to start wear-leveling and schedules it
906 * if yes. This function returns zero in case of success and a negative error 1300 * if yes. This function returns zero in case of success and a negative error
907 * code in case of failure. 1301 * code in case of failure.
908 */ 1302 */
909static int ensure_wear_leveling(struct ubi_device *ubi) 1303static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
910{ 1304{
911 int err = 0; 1305 int err = 0;
912 struct ubi_wl_entry *e1; 1306 struct ubi_wl_entry *e1;
@@ -934,7 +1328,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
934 * %UBI_WL_THRESHOLD. 1328 * %UBI_WL_THRESHOLD.
935 */ 1329 */
936 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); 1330 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
937 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 1331 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
938 1332
939 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) 1333 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
940 goto out_unlock; 1334 goto out_unlock;
@@ -951,8 +1345,12 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
951 goto out_cancel; 1345 goto out_cancel;
952 } 1346 }
953 1347
1348 wrk->anchor = 0;
954 wrk->func = &wear_leveling_worker; 1349 wrk->func = &wear_leveling_worker;
955 schedule_ubi_work(ubi, wrk); 1350 if (nested)
1351 __schedule_ubi_work(ubi, wrk);
1352 else
1353 schedule_ubi_work(ubi, wrk);
956 return err; 1354 return err;
957 1355
958out_cancel: 1356out_cancel:
@@ -963,6 +1361,38 @@ out_unlock:
963 return err; 1361 return err;
964} 1362}
965 1363
1364#ifdef CONFIG_MTD_UBI_FASTMAP
1365/**
1366 * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
1367 * @ubi: UBI device description object
1368 */
1369int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
1370{
1371 struct ubi_work *wrk;
1372
1373 spin_lock(&ubi->wl_lock);
1374 if (ubi->wl_scheduled) {
1375 spin_unlock(&ubi->wl_lock);
1376 return 0;
1377 }
1378 ubi->wl_scheduled = 1;
1379 spin_unlock(&ubi->wl_lock);
1380
1381 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1382 if (!wrk) {
1383 spin_lock(&ubi->wl_lock);
1384 ubi->wl_scheduled = 0;
1385 spin_unlock(&ubi->wl_lock);
1386 return -ENOMEM;
1387 }
1388
1389 wrk->anchor = 1;
1390 wrk->func = &wear_leveling_worker;
1391 schedule_ubi_work(ubi, wrk);
1392 return 0;
1393}
1394#endif
1395
966/** 1396/**
967 * erase_worker - physical eraseblock erase worker function. 1397 * erase_worker - physical eraseblock erase worker function.
968 * @ubi: UBI device description object 1398 * @ubi: UBI device description object
@@ -978,9 +1408,10 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
978 int cancel) 1408 int cancel)
979{ 1409{
980 struct ubi_wl_entry *e = wl_wrk->e; 1410 struct ubi_wl_entry *e = wl_wrk->e;
981 int pnum = e->pnum, err, need; 1411 int pnum = e->pnum;
982 int vol_id = wl_wrk->vol_id; 1412 int vol_id = wl_wrk->vol_id;
983 int lnum = wl_wrk->lnum; 1413 int lnum = wl_wrk->lnum;
1414 int err, available_consumed = 0;
984 1415
985 if (cancel) { 1416 if (cancel) {
986 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); 1417 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
@@ -992,6 +1423,8 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
992 dbg_wl("erase PEB %d EC %d LEB %d:%d", 1423 dbg_wl("erase PEB %d EC %d LEB %d:%d",
993 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum); 1424 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
994 1425
1426 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1427
995 err = sync_erase(ubi, e, wl_wrk->torture); 1428 err = sync_erase(ubi, e, wl_wrk->torture);
996 if (!err) { 1429 if (!err) {
997 /* Fine, we've erased it successfully */ 1430 /* Fine, we've erased it successfully */
@@ -999,6 +1432,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
999 1432
1000 spin_lock(&ubi->wl_lock); 1433 spin_lock(&ubi->wl_lock);
1001 wl_tree_add(e, &ubi->free); 1434 wl_tree_add(e, &ubi->free);
1435 ubi->free_count++;
1002 spin_unlock(&ubi->wl_lock); 1436 spin_unlock(&ubi->wl_lock);
1003 1437
1004 /* 1438 /*
@@ -1008,7 +1442,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1008 serve_prot_queue(ubi); 1442 serve_prot_queue(ubi);
1009 1443
1010 /* And take care about wear-leveling */ 1444 /* And take care about wear-leveling */
1011 err = ensure_wear_leveling(ubi); 1445 err = ensure_wear_leveling(ubi, 1);
1012 return err; 1446 return err;
1013 } 1447 }
1014 1448
@@ -1045,20 +1479,14 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1045 } 1479 }
1046 1480
1047 spin_lock(&ubi->volumes_lock); 1481 spin_lock(&ubi->volumes_lock);
1048 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1049 if (need > 0) {
1050 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1051 ubi->avail_pebs -= need;
1052 ubi->rsvd_pebs += need;
1053 ubi->beb_rsvd_pebs += need;
1054 if (need > 0)
1055 ubi_msg("reserve more %d PEBs", need);
1056 }
1057
1058 if (ubi->beb_rsvd_pebs == 0) { 1482 if (ubi->beb_rsvd_pebs == 0) {
1059 spin_unlock(&ubi->volumes_lock); 1483 if (ubi->avail_pebs == 0) {
1060 ubi_err("no reserved physical eraseblocks"); 1484 spin_unlock(&ubi->volumes_lock);
1061 goto out_ro; 1485 ubi_err("no reserved/available physical eraseblocks");
1486 goto out_ro;
1487 }
1488 ubi->avail_pebs -= 1;
1489 available_consumed = 1;
1062 } 1490 }
1063 spin_unlock(&ubi->volumes_lock); 1491 spin_unlock(&ubi->volumes_lock);
1064 1492
@@ -1068,19 +1496,36 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1068 goto out_ro; 1496 goto out_ro;
1069 1497
1070 spin_lock(&ubi->volumes_lock); 1498 spin_lock(&ubi->volumes_lock);
1071 ubi->beb_rsvd_pebs -= 1; 1499 if (ubi->beb_rsvd_pebs > 0) {
1500 if (available_consumed) {
1501 /*
1502 * The amount of reserved PEBs increased since we last
1503 * checked.
1504 */
1505 ubi->avail_pebs += 1;
1506 available_consumed = 0;
1507 }
1508 ubi->beb_rsvd_pebs -= 1;
1509 }
1072 ubi->bad_peb_count += 1; 1510 ubi->bad_peb_count += 1;
1073 ubi->good_peb_count -= 1; 1511 ubi->good_peb_count -= 1;
1074 ubi_calculate_reserved(ubi); 1512 ubi_calculate_reserved(ubi);
1075 if (ubi->beb_rsvd_pebs) 1513 if (available_consumed)
1514 ubi_warn("no PEBs in the reserved pool, used an available PEB");
1515 else if (ubi->beb_rsvd_pebs)
1076 ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs); 1516 ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
1077 else 1517 else
1078 ubi_warn("last PEB from the reserved pool was used"); 1518 ubi_warn("last PEB from the reserve was used");
1079 spin_unlock(&ubi->volumes_lock); 1519 spin_unlock(&ubi->volumes_lock);
1080 1520
1081 return err; 1521 return err;
1082 1522
1083out_ro: 1523out_ro:
1524 if (available_consumed) {
1525 spin_lock(&ubi->volumes_lock);
1526 ubi->avail_pebs += 1;
1527 spin_unlock(&ubi->volumes_lock);
1528 }
1084 ubi_ro_mode(ubi); 1529 ubi_ro_mode(ubi);
1085 return err; 1530 return err;
1086} 1531}
@@ -1189,7 +1634,7 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1189{ 1634{
1190 struct ubi_wl_entry *e; 1635 struct ubi_wl_entry *e;
1191 1636
1192 dbg_msg("schedule PEB %d for scrubbing", pnum); 1637 ubi_msg("schedule PEB %d for scrubbing", pnum);
1193 1638
1194retry: 1639retry:
1195 spin_lock(&ubi->wl_lock); 1640 spin_lock(&ubi->wl_lock);
@@ -1235,7 +1680,7 @@ retry:
1235 * Technically scrubbing is the same as wear-leveling, so it is done 1680 * Technically scrubbing is the same as wear-leveling, so it is done
1236 * by the WL worker. 1681 * by the WL worker.
1237 */ 1682 */
1238 return ensure_wear_leveling(ubi); 1683 return ensure_wear_leveling(ubi, 0);
1239} 1684}
1240 1685
1241/** 1686/**
@@ -1416,7 +1861,7 @@ static void cancel_pending(struct ubi_device *ubi)
1416 */ 1861 */
1417int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) 1862int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1418{ 1863{
1419 int err, i; 1864 int err, i, reserved_pebs, found_pebs = 0;
1420 struct rb_node *rb1, *rb2; 1865 struct rb_node *rb1, *rb2;
1421 struct ubi_ainf_volume *av; 1866 struct ubi_ainf_volume *av;
1422 struct ubi_ainf_peb *aeb, *tmp; 1867 struct ubi_ainf_peb *aeb, *tmp;
@@ -1428,6 +1873,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1428 init_rwsem(&ubi->work_sem); 1873 init_rwsem(&ubi->work_sem);
1429 ubi->max_ec = ai->max_ec; 1874 ubi->max_ec = ai->max_ec;
1430 INIT_LIST_HEAD(&ubi->works); 1875 INIT_LIST_HEAD(&ubi->works);
1876#ifdef CONFIG_MTD_UBI_FASTMAP
1877 INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
1878#endif
1431 1879
1432 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num); 1880 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1433 1881
@@ -1449,13 +1897,17 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1449 1897
1450 e->pnum = aeb->pnum; 1898 e->pnum = aeb->pnum;
1451 e->ec = aeb->ec; 1899 e->ec = aeb->ec;
1900 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1452 ubi->lookuptbl[e->pnum] = e; 1901 ubi->lookuptbl[e->pnum] = e;
1453 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) { 1902 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
1454 kmem_cache_free(ubi_wl_entry_slab, e); 1903 kmem_cache_free(ubi_wl_entry_slab, e);
1455 goto out_free; 1904 goto out_free;
1456 } 1905 }
1906
1907 found_pebs++;
1457 } 1908 }
1458 1909
1910 ubi->free_count = 0;
1459 list_for_each_entry(aeb, &ai->free, u.list) { 1911 list_for_each_entry(aeb, &ai->free, u.list) {
1460 cond_resched(); 1912 cond_resched();
1461 1913
@@ -1466,8 +1918,14 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1466 e->pnum = aeb->pnum; 1918 e->pnum = aeb->pnum;
1467 e->ec = aeb->ec; 1919 e->ec = aeb->ec;
1468 ubi_assert(e->ec >= 0); 1920 ubi_assert(e->ec >= 0);
1921 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1922
1469 wl_tree_add(e, &ubi->free); 1923 wl_tree_add(e, &ubi->free);
1924 ubi->free_count++;
1925
1470 ubi->lookuptbl[e->pnum] = e; 1926 ubi->lookuptbl[e->pnum] = e;
1927
1928 found_pebs++;
1471 } 1929 }
1472 1930
1473 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { 1931 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
@@ -1481,6 +1939,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1481 e->pnum = aeb->pnum; 1939 e->pnum = aeb->pnum;
1482 e->ec = aeb->ec; 1940 e->ec = aeb->ec;
1483 ubi->lookuptbl[e->pnum] = e; 1941 ubi->lookuptbl[e->pnum] = e;
1942
1484 if (!aeb->scrub) { 1943 if (!aeb->scrub) {
1485 dbg_wl("add PEB %d EC %d to the used tree", 1944 dbg_wl("add PEB %d EC %d to the used tree",
1486 e->pnum, e->ec); 1945 e->pnum, e->ec);
@@ -1490,22 +1949,38 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1490 e->pnum, e->ec); 1949 e->pnum, e->ec);
1491 wl_tree_add(e, &ubi->scrub); 1950 wl_tree_add(e, &ubi->scrub);
1492 } 1951 }
1952
1953 found_pebs++;
1493 } 1954 }
1494 } 1955 }
1495 1956
1496 if (ubi->avail_pebs < WL_RESERVED_PEBS) { 1957 dbg_wl("found %i PEBs", found_pebs);
1958
1959 if (ubi->fm)
1960 ubi_assert(ubi->good_peb_count == \
1961 found_pebs + ubi->fm->used_blocks);
1962 else
1963 ubi_assert(ubi->good_peb_count == found_pebs);
1964
1965 reserved_pebs = WL_RESERVED_PEBS;
1966#ifdef CONFIG_MTD_UBI_FASTMAP
1967 /* Reserve enough LEBs to store two fastmaps. */
1968 reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2;
1969#endif
1970
1971 if (ubi->avail_pebs < reserved_pebs) {
1497 ubi_err("no enough physical eraseblocks (%d, need %d)", 1972 ubi_err("no enough physical eraseblocks (%d, need %d)",
1498 ubi->avail_pebs, WL_RESERVED_PEBS); 1973 ubi->avail_pebs, reserved_pebs);
1499 if (ubi->corr_peb_count) 1974 if (ubi->corr_peb_count)
1500 ubi_err("%d PEBs are corrupted and not used", 1975 ubi_err("%d PEBs are corrupted and not used",
1501 ubi->corr_peb_count); 1976 ubi->corr_peb_count);
1502 goto out_free; 1977 goto out_free;
1503 } 1978 }
1504 ubi->avail_pebs -= WL_RESERVED_PEBS; 1979 ubi->avail_pebs -= reserved_pebs;
1505 ubi->rsvd_pebs += WL_RESERVED_PEBS; 1980 ubi->rsvd_pebs += reserved_pebs;
1506 1981
1507 /* Schedule wear-leveling if needed */ 1982 /* Schedule wear-leveling if needed */
1508 err = ensure_wear_leveling(ubi); 1983 err = ensure_wear_leveling(ubi, 0);
1509 if (err) 1984 if (err)
1510 goto out_free; 1985 goto out_free;
1511 1986
@@ -1584,7 +2059,7 @@ static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
1584 } 2059 }
1585 2060
1586 read_ec = be64_to_cpu(ec_hdr->ec); 2061 read_ec = be64_to_cpu(ec_hdr->ec);
1587 if (ec != read_ec) { 2062 if (ec != read_ec && read_ec - ec > 1) {
1588 ubi_err("self-check failed for PEB %d", pnum); 2063 ubi_err("self-check failed for PEB %d", pnum);
1589 ubi_err("read EC is %lld, should be %d", read_ec, ec); 2064 ubi_err("read EC is %lld, should be %d", read_ec, ec);
1590 dump_stack(); 2065 dump_stack();