diff options
Diffstat (limited to 'drivers/mtd')
-rw-r--r-- | drivers/mtd/devices/block2mtd.c | 8 | ||||
-rw-r--r-- | drivers/mtd/ftl.c | 4 | ||||
-rw-r--r-- | drivers/mtd/maps/Kconfig | 7 | ||||
-rw-r--r-- | drivers/mtd/maps/Makefile | 1 | ||||
-rw-r--r-- | drivers/mtd/maps/mtx-1_flash.c | 93 | ||||
-rw-r--r-- | drivers/mtd/maps/omap_nor.c | 23 | ||||
-rw-r--r-- | drivers/mtd/maps/pcmciamtd.c | 9 | ||||
-rw-r--r-- | drivers/mtd/mtdchar.c | 31 | ||||
-rw-r--r-- | drivers/mtd/nand/cmx270_nand.c | 79 | ||||
-rw-r--r-- | drivers/mtd/nand/orion_nand.c | 3 | ||||
-rw-r--r-- | drivers/mtd/ubi/build.c | 99 | ||||
-rw-r--r-- | drivers/mtd/ubi/cdev.c | 241 | ||||
-rw-r--r-- | drivers/mtd/ubi/debug.c | 158 | ||||
-rw-r--r-- | drivers/mtd/ubi/debug.h | 74 | ||||
-rw-r--r-- | drivers/mtd/ubi/eba.c | 77 | ||||
-rw-r--r-- | drivers/mtd/ubi/gluebi.c | 16 | ||||
-rw-r--r-- | drivers/mtd/ubi/io.c | 48 | ||||
-rw-r--r-- | drivers/mtd/ubi/kapi.c | 50 | ||||
-rw-r--r-- | drivers/mtd/ubi/misc.c | 2 | ||||
-rw-r--r-- | drivers/mtd/ubi/scan.c | 136 | ||||
-rw-r--r-- | drivers/mtd/ubi/scan.h | 21 | ||||
-rw-r--r-- | drivers/mtd/ubi/ubi-media.h | 38 | ||||
-rw-r--r-- | drivers/mtd/ubi/ubi.h | 75 | ||||
-rw-r--r-- | drivers/mtd/ubi/upd.c | 32 | ||||
-rw-r--r-- | drivers/mtd/ubi/vmt.c | 148 | ||||
-rw-r--r-- | drivers/mtd/ubi/vtbl.c | 127 | ||||
-rw-r--r-- | drivers/mtd/ubi/wl.c | 208 |
27 files changed, 1041 insertions, 767 deletions
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c index 303ea9b8cfe4..91fbba767635 100644 --- a/drivers/mtd/devices/block2mtd.c +++ b/drivers/mtd/devices/block2mtd.c | |||
@@ -236,6 +236,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size) | |||
236 | { | 236 | { |
237 | struct block_device *bdev; | 237 | struct block_device *bdev; |
238 | struct block2mtd_dev *dev; | 238 | struct block2mtd_dev *dev; |
239 | char *name; | ||
239 | 240 | ||
240 | if (!devname) | 241 | if (!devname) |
241 | return NULL; | 242 | return NULL; |
@@ -274,12 +275,13 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size) | |||
274 | 275 | ||
275 | /* Setup the MTD structure */ | 276 | /* Setup the MTD structure */ |
276 | /* make the name contain the block device in */ | 277 | /* make the name contain the block device in */ |
277 | dev->mtd.name = kmalloc(sizeof("block2mtd: ") + strlen(devname), | 278 | name = kmalloc(sizeof("block2mtd: ") + strlen(devname) + 1, |
278 | GFP_KERNEL); | 279 | GFP_KERNEL); |
279 | if (!dev->mtd.name) | 280 | if (!name) |
280 | goto devinit_err; | 281 | goto devinit_err; |
281 | 282 | ||
282 | sprintf(dev->mtd.name, "block2mtd: %s", devname); | 283 | sprintf(name, "block2mtd: %s", devname); |
284 | dev->mtd.name = name; | ||
283 | 285 | ||
284 | dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK; | 286 | dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK; |
285 | dev->mtd.erasesize = erase_size; | 287 | dev->mtd.erasesize = erase_size; |
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c index 3fed8f94ac6f..f34f20c78911 100644 --- a/drivers/mtd/ftl.c +++ b/drivers/mtd/ftl.c | |||
@@ -129,10 +129,6 @@ typedef struct partition_t { | |||
129 | u_int16_t DataUnits; | 129 | u_int16_t DataUnits; |
130 | u_int32_t BlocksPerUnit; | 130 | u_int32_t BlocksPerUnit; |
131 | erase_unit_header_t header; | 131 | erase_unit_header_t header; |
132 | #if 0 | ||
133 | region_info_t region; | ||
134 | memory_handle_t handle; | ||
135 | #endif | ||
136 | } partition_t; | 132 | } partition_t; |
137 | 133 | ||
138 | /* Partition state flags */ | 134 | /* Partition state flags */ |
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index ef1e29ea5a2c..df8e00bba07b 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig | |||
@@ -257,13 +257,6 @@ config MTD_ALCHEMY | |||
257 | help | 257 | help |
258 | Flash memory access on AMD Alchemy Pb/Db/RDK Reference Boards | 258 | Flash memory access on AMD Alchemy Pb/Db/RDK Reference Boards |
259 | 259 | ||
260 | config MTD_MTX1 | ||
261 | tristate "4G Systems MTX-1 Flash device" | ||
262 | depends on MIPS_MTX1 && MTD_CFI | ||
263 | help | ||
264 | Flash memory access on 4G Systems MTX-1 Board. If you have one of | ||
265 | these boards and would like to use the flash chips on it, say 'Y'. | ||
266 | |||
267 | config MTD_DILNETPC | 260 | config MTD_DILNETPC |
268 | tristate "CFI Flash device mapped on DIL/Net PC" | 261 | tristate "CFI Flash device mapped on DIL/Net PC" |
269 | depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT | 262 | depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT |
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index b29ea5460657..6cda6df973e5 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile | |||
@@ -63,6 +63,5 @@ obj-$(CONFIG_MTD_DMV182) += dmv182.o | |||
63 | obj-$(CONFIG_MTD_SHARP_SL) += sharpsl-flash.o | 63 | obj-$(CONFIG_MTD_SHARP_SL) += sharpsl-flash.o |
64 | obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o | 64 | obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o |
65 | obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o | 65 | obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o |
66 | obj-$(CONFIG_MTD_MTX1) += mtx-1_flash.o | ||
67 | obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o | 66 | obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o |
68 | obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o | 67 | obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o |
diff --git a/drivers/mtd/maps/mtx-1_flash.c b/drivers/mtd/maps/mtx-1_flash.c deleted file mode 100644 index a3b651904127..000000000000 --- a/drivers/mtd/maps/mtx-1_flash.c +++ /dev/null | |||
@@ -1,93 +0,0 @@ | |||
1 | /* | ||
2 | * Flash memory access on 4G Systems MTX-1 boards | ||
3 | * | ||
4 | * (C) 2005 Bruno Randolf <bruno.randolf@4g-systems.biz> | ||
5 | * (C) 2005 Joern Engel <joern@wohnheim.fh-wedel.de> | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | #include <linux/module.h> | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/kernel.h> | ||
13 | |||
14 | #include <linux/mtd/mtd.h> | ||
15 | #include <linux/mtd/map.h> | ||
16 | #include <linux/mtd/partitions.h> | ||
17 | |||
18 | #include <asm/io.h> | ||
19 | |||
20 | static struct map_info mtx1_map = { | ||
21 | .name = "MTX-1 flash", | ||
22 | .bankwidth = 4, | ||
23 | .size = 0x2000000, | ||
24 | .phys = 0x1E000000, | ||
25 | }; | ||
26 | |||
27 | static struct mtd_partition mtx1_partitions[] = { | ||
28 | { | ||
29 | .name = "filesystem", | ||
30 | .size = 0x01C00000, | ||
31 | .offset = 0, | ||
32 | },{ | ||
33 | .name = "yamon", | ||
34 | .size = 0x00100000, | ||
35 | .offset = MTDPART_OFS_APPEND, | ||
36 | .mask_flags = MTD_WRITEABLE, | ||
37 | },{ | ||
38 | .name = "kernel", | ||
39 | .size = 0x002c0000, | ||
40 | .offset = MTDPART_OFS_APPEND, | ||
41 | },{ | ||
42 | .name = "yamon env", | ||
43 | .size = 0x00040000, | ||
44 | .offset = MTDPART_OFS_APPEND, | ||
45 | } | ||
46 | }; | ||
47 | |||
48 | static struct mtd_info *mtx1_mtd; | ||
49 | |||
50 | int __init mtx1_mtd_init(void) | ||
51 | { | ||
52 | int ret = -ENXIO; | ||
53 | |||
54 | simple_map_init(&mtx1_map); | ||
55 | |||
56 | mtx1_map.virt = ioremap(mtx1_map.phys, mtx1_map.size); | ||
57 | if (!mtx1_map.virt) | ||
58 | return -EIO; | ||
59 | |||
60 | mtx1_mtd = do_map_probe("cfi_probe", &mtx1_map); | ||
61 | if (!mtx1_mtd) | ||
62 | goto err; | ||
63 | |||
64 | mtx1_mtd->owner = THIS_MODULE; | ||
65 | |||
66 | ret = add_mtd_partitions(mtx1_mtd, mtx1_partitions, | ||
67 | ARRAY_SIZE(mtx1_partitions)); | ||
68 | if (ret) | ||
69 | goto err; | ||
70 | |||
71 | return 0; | ||
72 | |||
73 | err: | ||
74 | iounmap(mtx1_map.virt); | ||
75 | return ret; | ||
76 | } | ||
77 | |||
78 | static void __exit mtx1_mtd_cleanup(void) | ||
79 | { | ||
80 | if (mtx1_mtd) { | ||
81 | del_mtd_partitions(mtx1_mtd); | ||
82 | map_destroy(mtx1_mtd); | ||
83 | } | ||
84 | if (mtx1_map.virt) | ||
85 | iounmap(mtx1_map.virt); | ||
86 | } | ||
87 | |||
88 | module_init(mtx1_mtd_init); | ||
89 | module_exit(mtx1_mtd_cleanup); | ||
90 | |||
91 | MODULE_AUTHOR("Bruno Randolf <bruno.randolf@4g-systems.biz>"); | ||
92 | MODULE_DESCRIPTION("MTX-1 flash map"); | ||
93 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mtd/maps/omap_nor.c b/drivers/mtd/maps/omap_nor.c index c12d8056bebd..68eec6c6c517 100644 --- a/drivers/mtd/maps/omap_nor.c +++ b/drivers/mtd/maps/omap_nor.c | |||
@@ -60,13 +60,22 @@ struct omapflash_info { | |||
60 | static void omap_set_vpp(struct map_info *map, int enable) | 60 | static void omap_set_vpp(struct map_info *map, int enable) |
61 | { | 61 | { |
62 | static int count; | 62 | static int count; |
63 | 63 | u32 l; | |
64 | if (enable) { | 64 | |
65 | if (count++ == 0) | 65 | if (cpu_class_is_omap1()) { |
66 | OMAP_EMIFS_CONFIG_REG |= OMAP_EMIFS_CONFIG_WP; | 66 | if (enable) { |
67 | } else { | 67 | if (count++ == 0) { |
68 | if (count && (--count == 0)) | 68 | l = omap_readl(EMIFS_CONFIG); |
69 | OMAP_EMIFS_CONFIG_REG &= ~OMAP_EMIFS_CONFIG_WP; | 69 | l |= OMAP_EMIFS_CONFIG_WP; |
70 | omap_writel(l, EMIFS_CONFIG); | ||
71 | } | ||
72 | } else { | ||
73 | if (count && (--count == 0)) { | ||
74 | l = omap_readl(EMIFS_CONFIG); | ||
75 | l &= ~OMAP_EMIFS_CONFIG_WP; | ||
76 | omap_writel(l, EMIFS_CONFIG); | ||
77 | } | ||
78 | } | ||
70 | } | 79 | } |
71 | } | 80 | } |
72 | 81 | ||
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c index 8f7ca863f89d..90924fb00481 100644 --- a/drivers/mtd/maps/pcmciamtd.c +++ b/drivers/mtd/maps/pcmciamtd.c | |||
@@ -495,17 +495,14 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
495 | int i; | 495 | int i; |
496 | config_info_t t; | 496 | config_info_t t; |
497 | static char *probes[] = { "jedec_probe", "cfi_probe" }; | 497 | static char *probes[] = { "jedec_probe", "cfi_probe" }; |
498 | cisinfo_t cisinfo; | ||
499 | int new_name = 0; | 498 | int new_name = 0; |
500 | 499 | ||
501 | DEBUG(3, "link=0x%p", link); | 500 | DEBUG(3, "link=0x%p", link); |
502 | 501 | ||
503 | DEBUG(2, "Validating CIS"); | 502 | DEBUG(2, "Validating CIS"); |
504 | ret = pcmcia_validate_cis(link, &cisinfo); | 503 | ret = pcmcia_validate_cis(link, NULL); |
505 | if(ret != CS_SUCCESS) { | 504 | if(ret != CS_SUCCESS) { |
506 | cs_error(link, GetTupleData, ret); | 505 | cs_error(link, GetTupleData, ret); |
507 | } else { | ||
508 | DEBUG(2, "ValidateCIS found %d chains", cisinfo.Chains); | ||
509 | } | 506 | } |
510 | 507 | ||
511 | card_settings(dev, link, &new_name); | 508 | card_settings(dev, link, &new_name); |
@@ -560,9 +557,7 @@ static int pcmciamtd_config(struct pcmcia_device *link) | |||
560 | DEBUG(1, "Allocated a window of %dKiB", dev->win_size >> 10); | 557 | DEBUG(1, "Allocated a window of %dKiB", dev->win_size >> 10); |
561 | 558 | ||
562 | /* Get write protect status */ | 559 | /* Get write protect status */ |
563 | CS_CHECK(GetStatus, pcmcia_get_status(link, &status)); | 560 | DEBUG(2, "window handle = 0x%8.8lx", (unsigned long)link->win); |
564 | DEBUG(2, "status value: 0x%x window handle = 0x%8.8lx", | ||
565 | status.CardState, (unsigned long)link->win); | ||
566 | dev->win_base = ioremap(req.Base, req.Size); | 561 | dev->win_base = ioremap(req.Base, req.Size); |
567 | if(!dev->win_base) { | 562 | if(!dev->win_base) { |
568 | err("ioremap(%lu, %u) failed", req.Base, req.Size); | 563 | err("ioremap(%lu, %u) failed", req.Base, req.Size); |
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index f5061fe72e4c..d2f331876e4c 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/smp_lock.h> | ||
15 | 16 | ||
16 | #include <linux/mtd/mtd.h> | 17 | #include <linux/mtd/mtd.h> |
17 | #include <linux/mtd/compatmac.h> | 18 | #include <linux/mtd/compatmac.h> |
@@ -25,10 +26,13 @@ static void mtd_notify_add(struct mtd_info* mtd) | |||
25 | if (!mtd) | 26 | if (!mtd) |
26 | return; | 27 | return; |
27 | 28 | ||
28 | device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2), "mtd%d", mtd->index); | 29 | device_create_drvdata(mtd_class, NULL, |
30 | MKDEV(MTD_CHAR_MAJOR, mtd->index*2), | ||
31 | NULL, "mtd%d", mtd->index); | ||
29 | 32 | ||
30 | device_create(mtd_class, NULL, | 33 | device_create_drvdata(mtd_class, NULL, |
31 | MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1), "mtd%dro", mtd->index); | 34 | MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1), |
35 | NULL, "mtd%dro", mtd->index); | ||
32 | } | 36 | } |
33 | 37 | ||
34 | static void mtd_notify_remove(struct mtd_info* mtd) | 38 | static void mtd_notify_remove(struct mtd_info* mtd) |
@@ -84,6 +88,7 @@ static int mtd_open(struct inode *inode, struct file *file) | |||
84 | { | 88 | { |
85 | int minor = iminor(inode); | 89 | int minor = iminor(inode); |
86 | int devnum = minor >> 1; | 90 | int devnum = minor >> 1; |
91 | int ret = 0; | ||
87 | struct mtd_info *mtd; | 92 | struct mtd_info *mtd; |
88 | struct mtd_file_info *mfi; | 93 | struct mtd_file_info *mfi; |
89 | 94 | ||
@@ -96,31 +101,39 @@ static int mtd_open(struct inode *inode, struct file *file) | |||
96 | if ((file->f_mode & 2) && (minor & 1)) | 101 | if ((file->f_mode & 2) && (minor & 1)) |
97 | return -EACCES; | 102 | return -EACCES; |
98 | 103 | ||
104 | lock_kernel(); | ||
99 | mtd = get_mtd_device(NULL, devnum); | 105 | mtd = get_mtd_device(NULL, devnum); |
100 | 106 | ||
101 | if (IS_ERR(mtd)) | 107 | if (IS_ERR(mtd)) { |
102 | return PTR_ERR(mtd); | 108 | ret = PTR_ERR(mtd); |
109 | goto out; | ||
110 | } | ||
103 | 111 | ||
104 | if (MTD_ABSENT == mtd->type) { | 112 | if (MTD_ABSENT == mtd->type) { |
105 | put_mtd_device(mtd); | 113 | put_mtd_device(mtd); |
106 | return -ENODEV; | 114 | ret = -ENODEV; |
115 | goto out; | ||
107 | } | 116 | } |
108 | 117 | ||
109 | /* You can't open it RW if it's not a writeable device */ | 118 | /* You can't open it RW if it's not a writeable device */ |
110 | if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) { | 119 | if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) { |
111 | put_mtd_device(mtd); | 120 | put_mtd_device(mtd); |
112 | return -EACCES; | 121 | ret = -EACCES; |
122 | goto out; | ||
113 | } | 123 | } |
114 | 124 | ||
115 | mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); | 125 | mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); |
116 | if (!mfi) { | 126 | if (!mfi) { |
117 | put_mtd_device(mtd); | 127 | put_mtd_device(mtd); |
118 | return -ENOMEM; | 128 | ret = -ENOMEM; |
129 | goto out; | ||
119 | } | 130 | } |
120 | mfi->mtd = mtd; | 131 | mfi->mtd = mtd; |
121 | file->private_data = mfi; | 132 | file->private_data = mfi; |
122 | 133 | ||
123 | return 0; | 134 | out: |
135 | unlock_kernel(); | ||
136 | return ret; | ||
124 | } /* mtd_open */ | 137 | } /* mtd_open */ |
125 | 138 | ||
126 | /*====================================================================*/ | 139 | /*====================================================================*/ |
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c index cb663ef245d5..fc8529bedfdf 100644 --- a/drivers/mtd/nand/cmx270_nand.c +++ b/drivers/mtd/nand/cmx270_nand.c | |||
@@ -20,9 +20,11 @@ | |||
20 | 20 | ||
21 | #include <linux/mtd/nand.h> | 21 | #include <linux/mtd/nand.h> |
22 | #include <linux/mtd/partitions.h> | 22 | #include <linux/mtd/partitions.h> |
23 | #include <linux/gpio.h> | ||
23 | 24 | ||
24 | #include <asm/io.h> | 25 | #include <asm/io.h> |
25 | #include <asm/irq.h> | 26 | #include <asm/irq.h> |
27 | #include <asm/mach-types.h> | ||
26 | 28 | ||
27 | #include <asm/arch/hardware.h> | 29 | #include <asm/arch/hardware.h> |
28 | #include <asm/arch/pxa-regs.h> | 30 | #include <asm/arch/pxa-regs.h> |
@@ -30,20 +32,6 @@ | |||
30 | #define GPIO_NAND_CS (11) | 32 | #define GPIO_NAND_CS (11) |
31 | #define GPIO_NAND_RB (89) | 33 | #define GPIO_NAND_RB (89) |
32 | 34 | ||
33 | /* This macro needed to ensure in-order operation of GPIO and local | ||
34 | * bus. Without both asm command and dummy uncached read there're | ||
35 | * states when NAND access is broken. I've looked for such macro(s) in | ||
36 | * include/asm-arm but found nothing approptiate. | ||
37 | * dmac_clean_range is close, but is makes cache invalidation | ||
38 | * unnecessary here and it cannot be used in module | ||
39 | */ | ||
40 | #define DRAIN_WB() \ | ||
41 | do { \ | ||
42 | unsigned char dummy; \ | ||
43 | asm volatile ("mcr p15, 0, r0, c7, c10, 4":::"r0"); \ | ||
44 | dummy=*((unsigned char*)UNCACHED_ADDR); \ | ||
45 | } while(0) | ||
46 | |||
47 | /* MTD structure for CM-X270 board */ | 35 | /* MTD structure for CM-X270 board */ |
48 | static struct mtd_info *cmx270_nand_mtd; | 36 | static struct mtd_info *cmx270_nand_mtd; |
49 | 37 | ||
@@ -103,14 +91,14 @@ static int cmx270_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) | |||
103 | 91 | ||
104 | static inline void nand_cs_on(void) | 92 | static inline void nand_cs_on(void) |
105 | { | 93 | { |
106 | GPCR(GPIO_NAND_CS) = GPIO_bit(GPIO_NAND_CS); | 94 | gpio_set_value(GPIO_NAND_CS, 0); |
107 | } | 95 | } |
108 | 96 | ||
109 | static void nand_cs_off(void) | 97 | static void nand_cs_off(void) |
110 | { | 98 | { |
111 | DRAIN_WB(); | 99 | dsb(); |
112 | 100 | ||
113 | GPSR(GPIO_NAND_CS) = GPIO_bit(GPIO_NAND_CS); | 101 | gpio_set_value(GPIO_NAND_CS, 1); |
114 | } | 102 | } |
115 | 103 | ||
116 | /* | 104 | /* |
@@ -122,7 +110,7 @@ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat, | |||
122 | struct nand_chip* this = mtd->priv; | 110 | struct nand_chip* this = mtd->priv; |
123 | unsigned int nandaddr = (unsigned int)this->IO_ADDR_W; | 111 | unsigned int nandaddr = (unsigned int)this->IO_ADDR_W; |
124 | 112 | ||
125 | DRAIN_WB(); | 113 | dsb(); |
126 | 114 | ||
127 | if (ctrl & NAND_CTRL_CHANGE) { | 115 | if (ctrl & NAND_CTRL_CHANGE) { |
128 | if ( ctrl & NAND_ALE ) | 116 | if ( ctrl & NAND_ALE ) |
@@ -139,12 +127,12 @@ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat, | |||
139 | nand_cs_off(); | 127 | nand_cs_off(); |
140 | } | 128 | } |
141 | 129 | ||
142 | DRAIN_WB(); | 130 | dsb(); |
143 | this->IO_ADDR_W = (void __iomem*)nandaddr; | 131 | this->IO_ADDR_W = (void __iomem*)nandaddr; |
144 | if (dat != NAND_CMD_NONE) | 132 | if (dat != NAND_CMD_NONE) |
145 | writel((dat << 16), this->IO_ADDR_W); | 133 | writel((dat << 16), this->IO_ADDR_W); |
146 | 134 | ||
147 | DRAIN_WB(); | 135 | dsb(); |
148 | } | 136 | } |
149 | 137 | ||
150 | /* | 138 | /* |
@@ -152,9 +140,9 @@ static void cmx270_hwcontrol(struct mtd_info *mtd, int dat, | |||
152 | */ | 140 | */ |
153 | static int cmx270_device_ready(struct mtd_info *mtd) | 141 | static int cmx270_device_ready(struct mtd_info *mtd) |
154 | { | 142 | { |
155 | DRAIN_WB(); | 143 | dsb(); |
156 | 144 | ||
157 | return (GPLR(GPIO_NAND_RB) & GPIO_bit(GPIO_NAND_RB)); | 145 | return (gpio_get_value(GPIO_NAND_RB)); |
158 | } | 146 | } |
159 | 147 | ||
160 | /* | 148 | /* |
@@ -168,20 +156,40 @@ static int cmx270_init(void) | |||
168 | int mtd_parts_nb = 0; | 156 | int mtd_parts_nb = 0; |
169 | int ret; | 157 | int ret; |
170 | 158 | ||
159 | if (!machine_is_armcore()) | ||
160 | return -ENODEV; | ||
161 | |||
162 | ret = gpio_request(GPIO_NAND_CS, "NAND CS"); | ||
163 | if (ret) { | ||
164 | pr_warning("CM-X270: failed to request NAND CS gpio\n"); | ||
165 | return ret; | ||
166 | } | ||
167 | |||
168 | gpio_direction_output(GPIO_NAND_CS, 1); | ||
169 | |||
170 | ret = gpio_request(GPIO_NAND_RB, "NAND R/B"); | ||
171 | if (ret) { | ||
172 | pr_warning("CM-X270: failed to request NAND R/B gpio\n"); | ||
173 | goto err_gpio_request; | ||
174 | } | ||
175 | |||
176 | gpio_direction_input(GPIO_NAND_RB); | ||
177 | |||
171 | /* Allocate memory for MTD device structure and private data */ | 178 | /* Allocate memory for MTD device structure and private data */ |
172 | cmx270_nand_mtd = kzalloc(sizeof(struct mtd_info) + | 179 | cmx270_nand_mtd = kzalloc(sizeof(struct mtd_info) + |
173 | sizeof(struct nand_chip), | 180 | sizeof(struct nand_chip), |
174 | GFP_KERNEL); | 181 | GFP_KERNEL); |
175 | if (!cmx270_nand_mtd) { | 182 | if (!cmx270_nand_mtd) { |
176 | printk("Unable to allocate CM-X270 NAND MTD device structure.\n"); | 183 | pr_debug("Unable to allocate CM-X270 NAND MTD device structure.\n"); |
177 | return -ENOMEM; | 184 | ret = -ENOMEM; |
185 | goto err_kzalloc; | ||
178 | } | 186 | } |
179 | 187 | ||
180 | cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12); | 188 | cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12); |
181 | if (!cmx270_nand_io) { | 189 | if (!cmx270_nand_io) { |
182 | printk("Unable to ioremap NAND device\n"); | 190 | pr_debug("Unable to ioremap NAND device\n"); |
183 | ret = -EINVAL; | 191 | ret = -EINVAL; |
184 | goto err1; | 192 | goto err_ioremap; |
185 | } | 193 | } |
186 | 194 | ||
187 | /* Get pointer to private data */ | 195 | /* Get pointer to private data */ |
@@ -209,9 +217,9 @@ static int cmx270_init(void) | |||
209 | 217 | ||
210 | /* Scan to find existence of the device */ | 218 | /* Scan to find existence of the device */ |
211 | if (nand_scan (cmx270_nand_mtd, 1)) { | 219 | if (nand_scan (cmx270_nand_mtd, 1)) { |
212 | printk(KERN_NOTICE "No NAND device\n"); | 220 | pr_notice("No NAND device\n"); |
213 | ret = -ENXIO; | 221 | ret = -ENXIO; |
214 | goto err2; | 222 | goto err_scan; |
215 | } | 223 | } |
216 | 224 | ||
217 | #ifdef CONFIG_MTD_CMDLINE_PARTS | 225 | #ifdef CONFIG_MTD_CMDLINE_PARTS |
@@ -229,18 +237,22 @@ static int cmx270_init(void) | |||
229 | } | 237 | } |
230 | 238 | ||
231 | /* Register the partitions */ | 239 | /* Register the partitions */ |
232 | printk(KERN_NOTICE "Using %s partition definition\n", part_type); | 240 | pr_notice("Using %s partition definition\n", part_type); |
233 | ret = add_mtd_partitions(cmx270_nand_mtd, mtd_parts, mtd_parts_nb); | 241 | ret = add_mtd_partitions(cmx270_nand_mtd, mtd_parts, mtd_parts_nb); |
234 | if (ret) | 242 | if (ret) |
235 | goto err2; | 243 | goto err_scan; |
236 | 244 | ||
237 | /* Return happy */ | 245 | /* Return happy */ |
238 | return 0; | 246 | return 0; |
239 | 247 | ||
240 | err2: | 248 | err_scan: |
241 | iounmap(cmx270_nand_io); | 249 | iounmap(cmx270_nand_io); |
242 | err1: | 250 | err_ioremap: |
243 | kfree(cmx270_nand_mtd); | 251 | kfree(cmx270_nand_mtd); |
252 | err_kzalloc: | ||
253 | gpio_free(GPIO_NAND_RB); | ||
254 | err_gpio_request: | ||
255 | gpio_free(GPIO_NAND_CS); | ||
244 | 256 | ||
245 | return ret; | 257 | return ret; |
246 | 258 | ||
@@ -255,6 +267,9 @@ static void cmx270_cleanup(void) | |||
255 | /* Release resources, unregister device */ | 267 | /* Release resources, unregister device */ |
256 | nand_release(cmx270_nand_mtd); | 268 | nand_release(cmx270_nand_mtd); |
257 | 269 | ||
270 | gpio_free(GPIO_NAND_RB); | ||
271 | gpio_free(GPIO_NAND_CS); | ||
272 | |||
258 | iounmap(cmx270_nand_io); | 273 | iounmap(cmx270_nand_io); |
259 | 274 | ||
260 | /* Free the MTD device structure */ | 275 | /* Free the MTD device structure */ |
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index 59e05a1c50cf..ee2ac3948cd8 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c | |||
@@ -85,6 +85,9 @@ static int __init orion_nand_probe(struct platform_device *pdev) | |||
85 | nc->cmd_ctrl = orion_nand_cmd_ctrl; | 85 | nc->cmd_ctrl = orion_nand_cmd_ctrl; |
86 | nc->ecc.mode = NAND_ECC_SOFT; | 86 | nc->ecc.mode = NAND_ECC_SOFT; |
87 | 87 | ||
88 | if (board->chip_delay) | ||
89 | nc->chip_delay = board->chip_delay; | ||
90 | |||
88 | if (board->width == 16) | 91 | if (board->width == 16) |
89 | nc->options |= NAND_BUSWIDTH_16; | 92 | nc->options |= NAND_BUSWIDTH_16; |
90 | 93 | ||
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 961416ac0616..c7630a228310 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c | |||
@@ -51,14 +51,13 @@ | |||
51 | * @name: MTD device name or number string | 51 | * @name: MTD device name or number string |
52 | * @vid_hdr_offs: VID header offset | 52 | * @vid_hdr_offs: VID header offset |
53 | */ | 53 | */ |
54 | struct mtd_dev_param | 54 | struct mtd_dev_param { |
55 | { | ||
56 | char name[MTD_PARAM_LEN_MAX]; | 55 | char name[MTD_PARAM_LEN_MAX]; |
57 | int vid_hdr_offs; | 56 | int vid_hdr_offs; |
58 | }; | 57 | }; |
59 | 58 | ||
60 | /* Numbers of elements set in the @mtd_dev_param array */ | 59 | /* Numbers of elements set in the @mtd_dev_param array */ |
61 | static int mtd_devs = 0; | 60 | static int mtd_devs; |
62 | 61 | ||
63 | /* MTD devices specification parameters */ | 62 | /* MTD devices specification parameters */ |
64 | static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES]; | 63 | static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES]; |
@@ -160,8 +159,7 @@ void ubi_put_device(struct ubi_device *ubi) | |||
160 | } | 159 | } |
161 | 160 | ||
162 | /** | 161 | /** |
163 | * ubi_get_by_major - get UBI device description object by character device | 162 | * ubi_get_by_major - get UBI device by character device major number. |
164 | * major number. | ||
165 | * @major: major number | 163 | * @major: major number |
166 | * | 164 | * |
167 | * This function is similar to 'ubi_get_device()', but it searches the device | 165 | * This function is similar to 'ubi_get_device()', but it searches the device |
@@ -355,15 +353,34 @@ static void kill_volumes(struct ubi_device *ubi) | |||
355 | } | 353 | } |
356 | 354 | ||
357 | /** | 355 | /** |
356 | * free_user_volumes - free all user volumes. | ||
357 | * @ubi: UBI device description object | ||
358 | * | ||
359 | * Normally the volumes are freed at the release function of the volume device | ||
360 | * objects. However, on error paths the volumes have to be freed before the | ||
361 | * device objects have been initialized. | ||
362 | */ | ||
363 | static void free_user_volumes(struct ubi_device *ubi) | ||
364 | { | ||
365 | int i; | ||
366 | |||
367 | for (i = 0; i < ubi->vtbl_slots; i++) | ||
368 | if (ubi->volumes[i]) { | ||
369 | kfree(ubi->volumes[i]->eba_tbl); | ||
370 | kfree(ubi->volumes[i]); | ||
371 | } | ||
372 | } | ||
373 | |||
374 | /** | ||
358 | * uif_init - initialize user interfaces for an UBI device. | 375 | * uif_init - initialize user interfaces for an UBI device. |
359 | * @ubi: UBI device description object | 376 | * @ubi: UBI device description object |
360 | * | 377 | * |
361 | * This function returns zero in case of success and a negative error code in | 378 | * This function returns zero in case of success and a negative error code in |
362 | * case of failure. | 379 | * case of failure. Note, this function destroys all volumes if it failes. |
363 | */ | 380 | */ |
364 | static int uif_init(struct ubi_device *ubi) | 381 | static int uif_init(struct ubi_device *ubi) |
365 | { | 382 | { |
366 | int i, err; | 383 | int i, err, do_free = 0; |
367 | dev_t dev; | 384 | dev_t dev; |
368 | 385 | ||
369 | sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); | 386 | sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); |
@@ -384,7 +401,7 @@ static int uif_init(struct ubi_device *ubi) | |||
384 | 401 | ||
385 | ubi_assert(MINOR(dev) == 0); | 402 | ubi_assert(MINOR(dev) == 0); |
386 | cdev_init(&ubi->cdev, &ubi_cdev_operations); | 403 | cdev_init(&ubi->cdev, &ubi_cdev_operations); |
387 | dbg_msg("%s major is %u", ubi->ubi_name, MAJOR(dev)); | 404 | dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev)); |
388 | ubi->cdev.owner = THIS_MODULE; | 405 | ubi->cdev.owner = THIS_MODULE; |
389 | 406 | ||
390 | err = cdev_add(&ubi->cdev, dev, 1); | 407 | err = cdev_add(&ubi->cdev, dev, 1); |
@@ -410,10 +427,13 @@ static int uif_init(struct ubi_device *ubi) | |||
410 | 427 | ||
411 | out_volumes: | 428 | out_volumes: |
412 | kill_volumes(ubi); | 429 | kill_volumes(ubi); |
430 | do_free = 0; | ||
413 | out_sysfs: | 431 | out_sysfs: |
414 | ubi_sysfs_close(ubi); | 432 | ubi_sysfs_close(ubi); |
415 | cdev_del(&ubi->cdev); | 433 | cdev_del(&ubi->cdev); |
416 | out_unreg: | 434 | out_unreg: |
435 | if (do_free) | ||
436 | free_user_volumes(ubi); | ||
417 | unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); | 437 | unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); |
418 | ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); | 438 | ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); |
419 | return err; | 439 | return err; |
@@ -422,6 +442,10 @@ out_unreg: | |||
422 | /** | 442 | /** |
423 | * uif_close - close user interfaces for an UBI device. | 443 | * uif_close - close user interfaces for an UBI device. |
424 | * @ubi: UBI device description object | 444 | * @ubi: UBI device description object |
445 | * | ||
446 | * Note, since this function un-registers UBI volume device objects (@vol->dev), | ||
447 | * the memory allocated voe the volumes is freed as well (in the release | ||
448 | * function). | ||
425 | */ | 449 | */ |
426 | static void uif_close(struct ubi_device *ubi) | 450 | static void uif_close(struct ubi_device *ubi) |
427 | { | 451 | { |
@@ -432,6 +456,21 @@ static void uif_close(struct ubi_device *ubi) | |||
432 | } | 456 | } |
433 | 457 | ||
434 | /** | 458 | /** |
459 | * free_internal_volumes - free internal volumes. | ||
460 | * @ubi: UBI device description object | ||
461 | */ | ||
462 | static void free_internal_volumes(struct ubi_device *ubi) | ||
463 | { | ||
464 | int i; | ||
465 | |||
466 | for (i = ubi->vtbl_slots; | ||
467 | i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { | ||
468 | kfree(ubi->volumes[i]->eba_tbl); | ||
469 | kfree(ubi->volumes[i]); | ||
470 | } | ||
471 | } | ||
472 | |||
473 | /** | ||
435 | * attach_by_scanning - attach an MTD device using scanning method. | 474 | * attach_by_scanning - attach an MTD device using scanning method. |
436 | * @ubi: UBI device descriptor | 475 | * @ubi: UBI device descriptor |
437 | * | 476 | * |
@@ -475,6 +514,7 @@ static int attach_by_scanning(struct ubi_device *ubi) | |||
475 | out_wl: | 514 | out_wl: |
476 | ubi_wl_close(ubi); | 515 | ubi_wl_close(ubi); |
477 | out_vtbl: | 516 | out_vtbl: |
517 | free_internal_volumes(ubi); | ||
478 | vfree(ubi->vtbl); | 518 | vfree(ubi->vtbl); |
479 | out_si: | 519 | out_si: |
480 | ubi_scan_destroy_si(si); | 520 | ubi_scan_destroy_si(si); |
@@ -482,7 +522,7 @@ out_si: | |||
482 | } | 522 | } |
483 | 523 | ||
484 | /** | 524 | /** |
485 | * io_init - initialize I/O unit for a given UBI device. | 525 | * io_init - initialize I/O sub-system for a given UBI device. |
486 | * @ubi: UBI device description object | 526 | * @ubi: UBI device description object |
487 | * | 527 | * |
488 | * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are | 528 | * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are |
@@ -530,7 +570,11 @@ static int io_init(struct ubi_device *ubi) | |||
530 | ubi->min_io_size = ubi->mtd->writesize; | 570 | ubi->min_io_size = ubi->mtd->writesize; |
531 | ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; | 571 | ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; |
532 | 572 | ||
533 | /* Make sure minimal I/O unit is power of 2 */ | 573 | /* |
574 | * Make sure minimal I/O unit is power of 2. Note, there is no | ||
575 | * fundamental reason for this assumption. It is just an optimization | ||
576 | * which allows us to avoid costly division operations. | ||
577 | */ | ||
534 | if (!is_power_of_2(ubi->min_io_size)) { | 578 | if (!is_power_of_2(ubi->min_io_size)) { |
535 | ubi_err("min. I/O unit (%d) is not power of 2", | 579 | ubi_err("min. I/O unit (%d) is not power of 2", |
536 | ubi->min_io_size); | 580 | ubi->min_io_size); |
@@ -581,7 +625,7 @@ static int io_init(struct ubi_device *ubi) | |||
581 | if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE || | 625 | if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE || |
582 | ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || | 626 | ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || |
583 | ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || | 627 | ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || |
584 | ubi->leb_start % ubi->min_io_size) { | 628 | ubi->leb_start & (ubi->min_io_size - 1)) { |
585 | ubi_err("bad VID header (%d) or data offsets (%d)", | 629 | ubi_err("bad VID header (%d) or data offsets (%d)", |
586 | ubi->vid_hdr_offset, ubi->leb_start); | 630 | ubi->vid_hdr_offset, ubi->leb_start); |
587 | return -EINVAL; | 631 | return -EINVAL; |
@@ -646,7 +690,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id) | |||
646 | 690 | ||
647 | /* | 691 | /* |
648 | * Clear the auto-resize flag in the volume in-memory copy of the | 692 | * Clear the auto-resize flag in the volume in-memory copy of the |
649 | * volume table, and 'ubi_resize_volume()' will propogate this change | 693 | * volume table, and 'ubi_resize_volume()' will propagate this change |
650 | * to the flash. | 694 | * to the flash. |
651 | */ | 695 | */ |
652 | ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG; | 696 | ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG; |
@@ -655,7 +699,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id) | |||
655 | struct ubi_vtbl_record vtbl_rec; | 699 | struct ubi_vtbl_record vtbl_rec; |
656 | 700 | ||
657 | /* | 701 | /* |
658 | * No avalilable PEBs to re-size the volume, clear the flag on | 702 | * No available PEBs to re-size the volume, clear the flag on |
659 | * flash and exit. | 703 | * flash and exit. |
660 | */ | 704 | */ |
661 | memcpy(&vtbl_rec, &ubi->vtbl[vol_id], | 705 | memcpy(&vtbl_rec, &ubi->vtbl[vol_id], |
@@ -682,13 +726,13 @@ static int autoresize(struct ubi_device *ubi, int vol_id) | |||
682 | 726 | ||
683 | /** | 727 | /** |
684 | * ubi_attach_mtd_dev - attach an MTD device. | 728 | * ubi_attach_mtd_dev - attach an MTD device. |
685 | * @mtd_dev: MTD device description object | 729 | * @mtd: MTD device description object |
686 | * @ubi_num: number to assign to the new UBI device | 730 | * @ubi_num: number to assign to the new UBI device |
687 | * @vid_hdr_offset: VID header offset | 731 | * @vid_hdr_offset: VID header offset |
688 | * | 732 | * |
689 | * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number | 733 | * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number |
690 | * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in | 734 | * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in |
691 | * which case this function finds a vacant device nubert and assings it | 735 | * which case this function finds a vacant device number and assigns it |
692 | * automatically. Returns the new UBI device number in case of success and a | 736 | * automatically. Returns the new UBI device number in case of success and a |
693 | * negative error code in case of failure. | 737 | * negative error code in case of failure. |
694 | * | 738 | * |
@@ -698,7 +742,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id) | |||
698 | int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) | 742 | int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) |
699 | { | 743 | { |
700 | struct ubi_device *ubi; | 744 | struct ubi_device *ubi; |
701 | int i, err; | 745 | int i, err, do_free = 1; |
702 | 746 | ||
703 | /* | 747 | /* |
704 | * Check if we already have the same MTD device attached. | 748 | * Check if we already have the same MTD device attached. |
@@ -735,7 +779,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) | |||
735 | if (!ubi_devices[ubi_num]) | 779 | if (!ubi_devices[ubi_num]) |
736 | break; | 780 | break; |
737 | if (ubi_num == UBI_MAX_DEVICES) { | 781 | if (ubi_num == UBI_MAX_DEVICES) { |
738 | dbg_err("only %d UBI devices may be created", UBI_MAX_DEVICES); | 782 | dbg_err("only %d UBI devices may be created", |
783 | UBI_MAX_DEVICES); | ||
739 | return -ENFILE; | 784 | return -ENFILE; |
740 | } | 785 | } |
741 | } else { | 786 | } else { |
@@ -760,6 +805,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) | |||
760 | 805 | ||
761 | mutex_init(&ubi->buf_mutex); | 806 | mutex_init(&ubi->buf_mutex); |
762 | mutex_init(&ubi->ckvol_mutex); | 807 | mutex_init(&ubi->ckvol_mutex); |
808 | mutex_init(&ubi->mult_mutex); | ||
763 | mutex_init(&ubi->volumes_mutex); | 809 | mutex_init(&ubi->volumes_mutex); |
764 | spin_lock_init(&ubi->volumes_lock); | 810 | spin_lock_init(&ubi->volumes_lock); |
765 | 811 | ||
@@ -798,7 +844,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) | |||
798 | 844 | ||
799 | err = uif_init(ubi); | 845 | err = uif_init(ubi); |
800 | if (err) | 846 | if (err) |
801 | goto out_detach; | 847 | goto out_nofree; |
802 | 848 | ||
803 | ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); | 849 | ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); |
804 | if (IS_ERR(ubi->bgt_thread)) { | 850 | if (IS_ERR(ubi->bgt_thread)) { |
@@ -824,20 +870,22 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) | |||
824 | ubi->beb_rsvd_pebs); | 870 | ubi->beb_rsvd_pebs); |
825 | ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); | 871 | ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); |
826 | 872 | ||
827 | /* Enable the background thread */ | 873 | if (!DBG_DISABLE_BGT) |
828 | if (!DBG_DISABLE_BGT) { | ||
829 | ubi->thread_enabled = 1; | 874 | ubi->thread_enabled = 1; |
830 | wake_up_process(ubi->bgt_thread); | 875 | wake_up_process(ubi->bgt_thread); |
831 | } | ||
832 | 876 | ||
833 | ubi_devices[ubi_num] = ubi; | 877 | ubi_devices[ubi_num] = ubi; |
834 | return ubi_num; | 878 | return ubi_num; |
835 | 879 | ||
836 | out_uif: | 880 | out_uif: |
837 | uif_close(ubi); | 881 | uif_close(ubi); |
882 | out_nofree: | ||
883 | do_free = 0; | ||
838 | out_detach: | 884 | out_detach: |
839 | ubi_eba_close(ubi); | ||
840 | ubi_wl_close(ubi); | 885 | ubi_wl_close(ubi); |
886 | if (do_free) | ||
887 | free_user_volumes(ubi); | ||
888 | free_internal_volumes(ubi); | ||
841 | vfree(ubi->vtbl); | 889 | vfree(ubi->vtbl); |
842 | out_free: | 890 | out_free: |
843 | vfree(ubi->peb_buf1); | 891 | vfree(ubi->peb_buf1); |
@@ -899,8 +947,8 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) | |||
899 | kthread_stop(ubi->bgt_thread); | 947 | kthread_stop(ubi->bgt_thread); |
900 | 948 | ||
901 | uif_close(ubi); | 949 | uif_close(ubi); |
902 | ubi_eba_close(ubi); | ||
903 | ubi_wl_close(ubi); | 950 | ubi_wl_close(ubi); |
951 | free_internal_volumes(ubi); | ||
904 | vfree(ubi->vtbl); | 952 | vfree(ubi->vtbl); |
905 | put_mtd_device(ubi->mtd); | 953 | put_mtd_device(ubi->mtd); |
906 | vfree(ubi->peb_buf1); | 954 | vfree(ubi->peb_buf1); |
@@ -1044,8 +1092,7 @@ static void __exit ubi_exit(void) | |||
1044 | module_exit(ubi_exit); | 1092 | module_exit(ubi_exit); |
1045 | 1093 | ||
1046 | /** | 1094 | /** |
1047 | * bytes_str_to_int - convert a string representing number of bytes to an | 1095 | * bytes_str_to_int - convert a number of bytes string into an integer. |
1048 | * integer. | ||
1049 | * @str: the string to convert | 1096 | * @str: the string to convert |
1050 | * | 1097 | * |
1051 | * This function returns positive resulting integer in case of success and a | 1098 | * This function returns positive resulting integer in case of success and a |
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index 9d6aae5449b6..03c759b4eeb5 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c | |||
@@ -39,8 +39,9 @@ | |||
39 | #include <linux/stat.h> | 39 | #include <linux/stat.h> |
40 | #include <linux/ioctl.h> | 40 | #include <linux/ioctl.h> |
41 | #include <linux/capability.h> | 41 | #include <linux/capability.h> |
42 | #include <linux/uaccess.h> | ||
43 | #include <linux/smp_lock.h> | ||
42 | #include <mtd/ubi-user.h> | 44 | #include <mtd/ubi-user.h> |
43 | #include <asm/uaccess.h> | ||
44 | #include <asm/div64.h> | 45 | #include <asm/div64.h> |
45 | #include "ubi.h" | 46 | #include "ubi.h" |
46 | 47 | ||
@@ -103,18 +104,22 @@ static int vol_cdev_open(struct inode *inode, struct file *file) | |||
103 | struct ubi_volume_desc *desc; | 104 | struct ubi_volume_desc *desc; |
104 | int vol_id = iminor(inode) - 1, mode, ubi_num; | 105 | int vol_id = iminor(inode) - 1, mode, ubi_num; |
105 | 106 | ||
107 | lock_kernel(); | ||
106 | ubi_num = ubi_major2num(imajor(inode)); | 108 | ubi_num = ubi_major2num(imajor(inode)); |
107 | if (ubi_num < 0) | 109 | if (ubi_num < 0) { |
110 | unlock_kernel(); | ||
108 | return ubi_num; | 111 | return ubi_num; |
112 | } | ||
109 | 113 | ||
110 | if (file->f_mode & FMODE_WRITE) | 114 | if (file->f_mode & FMODE_WRITE) |
111 | mode = UBI_READWRITE; | 115 | mode = UBI_READWRITE; |
112 | else | 116 | else |
113 | mode = UBI_READONLY; | 117 | mode = UBI_READONLY; |
114 | 118 | ||
115 | dbg_msg("open volume %d, mode %d", vol_id, mode); | 119 | dbg_gen("open volume %d, mode %d", vol_id, mode); |
116 | 120 | ||
117 | desc = ubi_open_volume(ubi_num, vol_id, mode); | 121 | desc = ubi_open_volume(ubi_num, vol_id, mode); |
122 | unlock_kernel(); | ||
118 | if (IS_ERR(desc)) | 123 | if (IS_ERR(desc)) |
119 | return PTR_ERR(desc); | 124 | return PTR_ERR(desc); |
120 | 125 | ||
@@ -127,7 +132,7 @@ static int vol_cdev_release(struct inode *inode, struct file *file) | |||
127 | struct ubi_volume_desc *desc = file->private_data; | 132 | struct ubi_volume_desc *desc = file->private_data; |
128 | struct ubi_volume *vol = desc->vol; | 133 | struct ubi_volume *vol = desc->vol; |
129 | 134 | ||
130 | dbg_msg("release volume %d, mode %d", vol->vol_id, desc->mode); | 135 | dbg_gen("release volume %d, mode %d", vol->vol_id, desc->mode); |
131 | 136 | ||
132 | if (vol->updating) { | 137 | if (vol->updating) { |
133 | ubi_warn("update of volume %d not finished, volume is damaged", | 138 | ubi_warn("update of volume %d not finished, volume is damaged", |
@@ -136,7 +141,7 @@ static int vol_cdev_release(struct inode *inode, struct file *file) | |||
136 | vol->updating = 0; | 141 | vol->updating = 0; |
137 | vfree(vol->upd_buf); | 142 | vfree(vol->upd_buf); |
138 | } else if (vol->changing_leb) { | 143 | } else if (vol->changing_leb) { |
139 | dbg_msg("only %lld of %lld bytes received for atomic LEB change" | 144 | dbg_gen("only %lld of %lld bytes received for atomic LEB change" |
140 | " for volume %d:%d, cancel", vol->upd_received, | 145 | " for volume %d:%d, cancel", vol->upd_received, |
141 | vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id); | 146 | vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id); |
142 | vol->changing_leb = 0; | 147 | vol->changing_leb = 0; |
@@ -178,7 +183,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin) | |||
178 | return -EINVAL; | 183 | return -EINVAL; |
179 | } | 184 | } |
180 | 185 | ||
181 | dbg_msg("seek volume %d, offset %lld, origin %d, new offset %lld", | 186 | dbg_gen("seek volume %d, offset %lld, origin %d, new offset %lld", |
182 | vol->vol_id, offset, origin, new_offset); | 187 | vol->vol_id, offset, origin, new_offset); |
183 | 188 | ||
184 | file->f_pos = new_offset; | 189 | file->f_pos = new_offset; |
@@ -196,7 +201,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count, | |||
196 | void *tbuf; | 201 | void *tbuf; |
197 | uint64_t tmp; | 202 | uint64_t tmp; |
198 | 203 | ||
199 | dbg_msg("read %zd bytes from offset %lld of volume %d", | 204 | dbg_gen("read %zd bytes from offset %lld of volume %d", |
200 | count, *offp, vol->vol_id); | 205 | count, *offp, vol->vol_id); |
201 | 206 | ||
202 | if (vol->updating) { | 207 | if (vol->updating) { |
@@ -211,7 +216,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count, | |||
211 | return 0; | 216 | return 0; |
212 | 217 | ||
213 | if (vol->corrupted) | 218 | if (vol->corrupted) |
214 | dbg_msg("read from corrupted volume %d", vol->vol_id); | 219 | dbg_gen("read from corrupted volume %d", vol->vol_id); |
215 | 220 | ||
216 | if (*offp + count > vol->used_bytes) | 221 | if (*offp + count > vol->used_bytes) |
217 | count_save = count = vol->used_bytes - *offp; | 222 | count_save = count = vol->used_bytes - *offp; |
@@ -280,7 +285,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, | |||
280 | char *tbuf; | 285 | char *tbuf; |
281 | uint64_t tmp; | 286 | uint64_t tmp; |
282 | 287 | ||
283 | dbg_msg("requested: write %zd bytes to offset %lld of volume %u", | 288 | dbg_gen("requested: write %zd bytes to offset %lld of volume %u", |
284 | count, *offp, vol->vol_id); | 289 | count, *offp, vol->vol_id); |
285 | 290 | ||
286 | if (vol->vol_type == UBI_STATIC_VOLUME) | 291 | if (vol->vol_type == UBI_STATIC_VOLUME) |
@@ -290,7 +295,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, | |||
290 | off = do_div(tmp, vol->usable_leb_size); | 295 | off = do_div(tmp, vol->usable_leb_size); |
291 | lnum = tmp; | 296 | lnum = tmp; |
292 | 297 | ||
293 | if (off % ubi->min_io_size) { | 298 | if (off & (ubi->min_io_size - 1)) { |
294 | dbg_err("unaligned position"); | 299 | dbg_err("unaligned position"); |
295 | return -EINVAL; | 300 | return -EINVAL; |
296 | } | 301 | } |
@@ -299,7 +304,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, | |||
299 | count_save = count = vol->used_bytes - *offp; | 304 | count_save = count = vol->used_bytes - *offp; |
300 | 305 | ||
301 | /* We can write only in fractions of the minimum I/O unit */ | 306 | /* We can write only in fractions of the minimum I/O unit */ |
302 | if (count % ubi->min_io_size) { | 307 | if (count & (ubi->min_io_size - 1)) { |
303 | dbg_err("unaligned write length"); | 308 | dbg_err("unaligned write length"); |
304 | return -EINVAL; | 309 | return -EINVAL; |
305 | } | 310 | } |
@@ -347,7 +352,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, | |||
347 | } | 352 | } |
348 | 353 | ||
349 | #else | 354 | #else |
350 | #define vol_cdev_direct_write(file, buf, count, offp) -EPERM | 355 | #define vol_cdev_direct_write(file, buf, count, offp) (-EPERM) |
351 | #endif /* CONFIG_MTD_UBI_DEBUG_USERSPACE_IO */ | 356 | #endif /* CONFIG_MTD_UBI_DEBUG_USERSPACE_IO */ |
352 | 357 | ||
353 | static ssize_t vol_cdev_write(struct file *file, const char __user *buf, | 358 | static ssize_t vol_cdev_write(struct file *file, const char __user *buf, |
@@ -432,7 +437,8 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file, | |||
432 | break; | 437 | break; |
433 | } | 438 | } |
434 | 439 | ||
435 | rsvd_bytes = vol->reserved_pebs * (ubi->leb_size-vol->data_pad); | 440 | rsvd_bytes = (long long)vol->reserved_pebs * |
441 | ubi->leb_size-vol->data_pad; | ||
436 | if (bytes < 0 || bytes > rsvd_bytes) { | 442 | if (bytes < 0 || bytes > rsvd_bytes) { |
437 | err = -EINVAL; | 443 | err = -EINVAL; |
438 | break; | 444 | break; |
@@ -508,7 +514,7 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file, | |||
508 | break; | 514 | break; |
509 | } | 515 | } |
510 | 516 | ||
511 | dbg_msg("erase LEB %d:%d", vol->vol_id, lnum); | 517 | dbg_gen("erase LEB %d:%d", vol->vol_id, lnum); |
512 | err = ubi_eba_unmap_leb(ubi, vol, lnum); | 518 | err = ubi_eba_unmap_leb(ubi, vol, lnum); |
513 | if (err) | 519 | if (err) |
514 | break; | 520 | break; |
@@ -559,7 +565,7 @@ static int verify_mkvol_req(const struct ubi_device *ubi, | |||
559 | if (req->alignment > ubi->leb_size) | 565 | if (req->alignment > ubi->leb_size) |
560 | goto bad; | 566 | goto bad; |
561 | 567 | ||
562 | n = req->alignment % ubi->min_io_size; | 568 | n = req->alignment & (ubi->min_io_size - 1); |
563 | if (req->alignment != 1 && n) | 569 | if (req->alignment != 1 && n) |
564 | goto bad; | 570 | goto bad; |
565 | 571 | ||
@@ -568,6 +574,10 @@ static int verify_mkvol_req(const struct ubi_device *ubi, | |||
568 | goto bad; | 574 | goto bad; |
569 | } | 575 | } |
570 | 576 | ||
577 | n = strnlen(req->name, req->name_len + 1); | ||
578 | if (n != req->name_len) | ||
579 | goto bad; | ||
580 | |||
571 | return 0; | 581 | return 0; |
572 | 582 | ||
573 | bad: | 583 | bad: |
@@ -595,6 +605,166 @@ static int verify_rsvol_req(const struct ubi_device *ubi, | |||
595 | return 0; | 605 | return 0; |
596 | } | 606 | } |
597 | 607 | ||
608 | /** | ||
609 | * rename_volumes - rename UBI volumes. | ||
610 | * @ubi: UBI device description object | ||
611 | * @req: volumes re-name request | ||
612 | * | ||
613 | * This is a helper function for the volume re-name IOCTL which validates the | ||
614 | * the request, opens the volume and calls corresponding volumes management | ||
615 | * function. Returns zero in case of success and a negative error code in case | ||
616 | * of failure. | ||
617 | */ | ||
618 | static int rename_volumes(struct ubi_device *ubi, | ||
619 | struct ubi_rnvol_req *req) | ||
620 | { | ||
621 | int i, n, err; | ||
622 | struct list_head rename_list; | ||
623 | struct ubi_rename_entry *re, *re1; | ||
624 | |||
625 | if (req->count < 0 || req->count > UBI_MAX_RNVOL) | ||
626 | return -EINVAL; | ||
627 | |||
628 | if (req->count == 0) | ||
629 | return 0; | ||
630 | |||
631 | /* Validate volume IDs and names in the request */ | ||
632 | for (i = 0; i < req->count; i++) { | ||
633 | if (req->ents[i].vol_id < 0 || | ||
634 | req->ents[i].vol_id >= ubi->vtbl_slots) | ||
635 | return -EINVAL; | ||
636 | if (req->ents[i].name_len < 0) | ||
637 | return -EINVAL; | ||
638 | if (req->ents[i].name_len > UBI_VOL_NAME_MAX) | ||
639 | return -ENAMETOOLONG; | ||
640 | req->ents[i].name[req->ents[i].name_len] = '\0'; | ||
641 | n = strlen(req->ents[i].name); | ||
642 | if (n != req->ents[i].name_len) | ||
643 | err = -EINVAL; | ||
644 | } | ||
645 | |||
646 | /* Make sure volume IDs and names are unique */ | ||
647 | for (i = 0; i < req->count - 1; i++) { | ||
648 | for (n = i + 1; n < req->count; n++) { | ||
649 | if (req->ents[i].vol_id == req->ents[n].vol_id) { | ||
650 | dbg_err("duplicated volume id %d", | ||
651 | req->ents[i].vol_id); | ||
652 | return -EINVAL; | ||
653 | } | ||
654 | if (!strcmp(req->ents[i].name, req->ents[n].name)) { | ||
655 | dbg_err("duplicated volume name \"%s\"", | ||
656 | req->ents[i].name); | ||
657 | return -EINVAL; | ||
658 | } | ||
659 | } | ||
660 | } | ||
661 | |||
662 | /* Create the re-name list */ | ||
663 | INIT_LIST_HEAD(&rename_list); | ||
664 | for (i = 0; i < req->count; i++) { | ||
665 | int vol_id = req->ents[i].vol_id; | ||
666 | int name_len = req->ents[i].name_len; | ||
667 | const char *name = req->ents[i].name; | ||
668 | |||
669 | re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); | ||
670 | if (!re) { | ||
671 | err = -ENOMEM; | ||
672 | goto out_free; | ||
673 | } | ||
674 | |||
675 | re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE); | ||
676 | if (IS_ERR(re->desc)) { | ||
677 | err = PTR_ERR(re->desc); | ||
678 | dbg_err("cannot open volume %d, error %d", vol_id, err); | ||
679 | kfree(re); | ||
680 | goto out_free; | ||
681 | } | ||
682 | |||
683 | /* Skip this re-naming if the name does not really change */ | ||
684 | if (re->desc->vol->name_len == name_len && | ||
685 | !memcmp(re->desc->vol->name, name, name_len)) { | ||
686 | ubi_close_volume(re->desc); | ||
687 | kfree(re); | ||
688 | continue; | ||
689 | } | ||
690 | |||
691 | re->new_name_len = name_len; | ||
692 | memcpy(re->new_name, name, name_len); | ||
693 | list_add_tail(&re->list, &rename_list); | ||
694 | dbg_msg("will rename volume %d from \"%s\" to \"%s\"", | ||
695 | vol_id, re->desc->vol->name, name); | ||
696 | } | ||
697 | |||
698 | if (list_empty(&rename_list)) | ||
699 | return 0; | ||
700 | |||
701 | /* Find out the volumes which have to be removed */ | ||
702 | list_for_each_entry(re, &rename_list, list) { | ||
703 | struct ubi_volume_desc *desc; | ||
704 | int no_remove_needed = 0; | ||
705 | |||
706 | /* | ||
707 | * Volume @re->vol_id is going to be re-named to | ||
708 | * @re->new_name, while its current name is @name. If a volume | ||
709 | * with name @re->new_name currently exists, it has to be | ||
710 | * removed, unless it is also re-named in the request (@req). | ||
711 | */ | ||
712 | list_for_each_entry(re1, &rename_list, list) { | ||
713 | if (re->new_name_len == re1->desc->vol->name_len && | ||
714 | !memcmp(re->new_name, re1->desc->vol->name, | ||
715 | re1->desc->vol->name_len)) { | ||
716 | no_remove_needed = 1; | ||
717 | break; | ||
718 | } | ||
719 | } | ||
720 | |||
721 | if (no_remove_needed) | ||
722 | continue; | ||
723 | |||
724 | /* | ||
725 | * It seems we need to remove volume with name @re->new_name, | ||
726 | * if it exists. | ||
727 | */ | ||
728 | desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, UBI_EXCLUSIVE); | ||
729 | if (IS_ERR(desc)) { | ||
730 | err = PTR_ERR(desc); | ||
731 | if (err == -ENODEV) | ||
732 | /* Re-naming into a non-existing volume name */ | ||
733 | continue; | ||
734 | |||
735 | /* The volume exists but busy, or an error occurred */ | ||
736 | dbg_err("cannot open volume \"%s\", error %d", | ||
737 | re->new_name, err); | ||
738 | goto out_free; | ||
739 | } | ||
740 | |||
741 | re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); | ||
742 | if (!re) { | ||
743 | err = -ENOMEM; | ||
744 | ubi_close_volume(desc); | ||
745 | goto out_free; | ||
746 | } | ||
747 | |||
748 | re->remove = 1; | ||
749 | re->desc = desc; | ||
750 | list_add(&re->list, &rename_list); | ||
751 | dbg_msg("will remove volume %d, name \"%s\"", | ||
752 | re->desc->vol->vol_id, re->desc->vol->name); | ||
753 | } | ||
754 | |||
755 | mutex_lock(&ubi->volumes_mutex); | ||
756 | err = ubi_rename_volumes(ubi, &rename_list); | ||
757 | mutex_unlock(&ubi->volumes_mutex); | ||
758 | |||
759 | out_free: | ||
760 | list_for_each_entry_safe(re, re1, &rename_list, list) { | ||
761 | ubi_close_volume(re->desc); | ||
762 | list_del(&re->list); | ||
763 | kfree(re); | ||
764 | } | ||
765 | return err; | ||
766 | } | ||
767 | |||
598 | static int ubi_cdev_ioctl(struct inode *inode, struct file *file, | 768 | static int ubi_cdev_ioctl(struct inode *inode, struct file *file, |
599 | unsigned int cmd, unsigned long arg) | 769 | unsigned int cmd, unsigned long arg) |
600 | { | 770 | { |
@@ -616,19 +786,18 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file, | |||
616 | { | 786 | { |
617 | struct ubi_mkvol_req req; | 787 | struct ubi_mkvol_req req; |
618 | 788 | ||
619 | dbg_msg("create volume"); | 789 | dbg_gen("create volume"); |
620 | err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req)); | 790 | err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req)); |
621 | if (err) { | 791 | if (err) { |
622 | err = -EFAULT; | 792 | err = -EFAULT; |
623 | break; | 793 | break; |
624 | } | 794 | } |
625 | 795 | ||
796 | req.name[req.name_len] = '\0'; | ||
626 | err = verify_mkvol_req(ubi, &req); | 797 | err = verify_mkvol_req(ubi, &req); |
627 | if (err) | 798 | if (err) |
628 | break; | 799 | break; |
629 | 800 | ||
630 | req.name[req.name_len] = '\0'; | ||
631 | |||
632 | mutex_lock(&ubi->volumes_mutex); | 801 | mutex_lock(&ubi->volumes_mutex); |
633 | err = ubi_create_volume(ubi, &req); | 802 | err = ubi_create_volume(ubi, &req); |
634 | mutex_unlock(&ubi->volumes_mutex); | 803 | mutex_unlock(&ubi->volumes_mutex); |
@@ -647,7 +816,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file, | |||
647 | { | 816 | { |
648 | int vol_id; | 817 | int vol_id; |
649 | 818 | ||
650 | dbg_msg("remove volume"); | 819 | dbg_gen("remove volume"); |
651 | err = get_user(vol_id, (__user int32_t *)argp); | 820 | err = get_user(vol_id, (__user int32_t *)argp); |
652 | if (err) { | 821 | if (err) { |
653 | err = -EFAULT; | 822 | err = -EFAULT; |
@@ -661,7 +830,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file, | |||
661 | } | 830 | } |
662 | 831 | ||
663 | mutex_lock(&ubi->volumes_mutex); | 832 | mutex_lock(&ubi->volumes_mutex); |
664 | err = ubi_remove_volume(desc); | 833 | err = ubi_remove_volume(desc, 0); |
665 | mutex_unlock(&ubi->volumes_mutex); | 834 | mutex_unlock(&ubi->volumes_mutex); |
666 | 835 | ||
667 | /* | 836 | /* |
@@ -680,7 +849,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file, | |||
680 | uint64_t tmp; | 849 | uint64_t tmp; |
681 | struct ubi_rsvol_req req; | 850 | struct ubi_rsvol_req req; |
682 | 851 | ||
683 | dbg_msg("re-size volume"); | 852 | dbg_gen("re-size volume"); |
684 | err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req)); | 853 | err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req)); |
685 | if (err) { | 854 | if (err) { |
686 | err = -EFAULT; | 855 | err = -EFAULT; |
@@ -708,6 +877,32 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file, | |||
708 | break; | 877 | break; |
709 | } | 878 | } |
710 | 879 | ||
880 | /* Re-name volumes command */ | ||
881 | case UBI_IOCRNVOL: | ||
882 | { | ||
883 | struct ubi_rnvol_req *req; | ||
884 | |||
885 | dbg_msg("re-name volumes"); | ||
886 | req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL); | ||
887 | if (!req) { | ||
888 | err = -ENOMEM; | ||
889 | break; | ||
890 | }; | ||
891 | |||
892 | err = copy_from_user(req, argp, sizeof(struct ubi_rnvol_req)); | ||
893 | if (err) { | ||
894 | err = -EFAULT; | ||
895 | kfree(req); | ||
896 | break; | ||
897 | } | ||
898 | |||
899 | mutex_lock(&ubi->mult_mutex); | ||
900 | err = rename_volumes(ubi, req); | ||
901 | mutex_unlock(&ubi->mult_mutex); | ||
902 | kfree(req); | ||
903 | break; | ||
904 | } | ||
905 | |||
711 | default: | 906 | default: |
712 | err = -ENOTTY; | 907 | err = -ENOTTY; |
713 | break; | 908 | break; |
@@ -733,7 +928,7 @@ static int ctrl_cdev_ioctl(struct inode *inode, struct file *file, | |||
733 | struct ubi_attach_req req; | 928 | struct ubi_attach_req req; |
734 | struct mtd_info *mtd; | 929 | struct mtd_info *mtd; |
735 | 930 | ||
736 | dbg_msg("attach MTD device"); | 931 | dbg_gen("attach MTD device"); |
737 | err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req)); | 932 | err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req)); |
738 | if (err) { | 933 | if (err) { |
739 | err = -EFAULT; | 934 | err = -EFAULT; |
@@ -773,7 +968,7 @@ static int ctrl_cdev_ioctl(struct inode *inode, struct file *file, | |||
773 | { | 968 | { |
774 | int ubi_num; | 969 | int ubi_num; |
775 | 970 | ||
776 | dbg_msg("dettach MTD device"); | 971 | dbg_gen("dettach MTD device"); |
777 | err = get_user(ubi_num, (__user int32_t *)argp); | 972 | err = get_user(ubi_num, (__user int32_t *)argp); |
778 | if (err) { | 973 | if (err) { |
779 | err = -EFAULT; | 974 | err = -EFAULT; |
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c index 56956ec2845f..c0ed60e8ade9 100644 --- a/drivers/mtd/ubi/debug.c +++ b/drivers/mtd/ubi/debug.c | |||
@@ -24,7 +24,7 @@ | |||
24 | * changes. | 24 | * changes. |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #ifdef CONFIG_MTD_UBI_DEBUG_MSG | 27 | #ifdef CONFIG_MTD_UBI_DEBUG |
28 | 28 | ||
29 | #include "ubi.h" | 29 | #include "ubi.h" |
30 | 30 | ||
@@ -34,14 +34,19 @@ | |||
34 | */ | 34 | */ |
35 | void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) | 35 | void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) |
36 | { | 36 | { |
37 | dbg_msg("erase counter header dump:"); | 37 | printk(KERN_DEBUG "Erase counter header dump:\n"); |
38 | dbg_msg("magic %#08x", be32_to_cpu(ec_hdr->magic)); | 38 | printk(KERN_DEBUG "\tmagic %#08x\n", |
39 | dbg_msg("version %d", (int)ec_hdr->version); | 39 | be32_to_cpu(ec_hdr->magic)); |
40 | dbg_msg("ec %llu", (long long)be64_to_cpu(ec_hdr->ec)); | 40 | printk(KERN_DEBUG "\tversion %d\n", (int)ec_hdr->version); |
41 | dbg_msg("vid_hdr_offset %d", be32_to_cpu(ec_hdr->vid_hdr_offset)); | 41 | printk(KERN_DEBUG "\tec %llu\n", |
42 | dbg_msg("data_offset %d", be32_to_cpu(ec_hdr->data_offset)); | 42 | (long long)be64_to_cpu(ec_hdr->ec)); |
43 | dbg_msg("hdr_crc %#08x", be32_to_cpu(ec_hdr->hdr_crc)); | 43 | printk(KERN_DEBUG "\tvid_hdr_offset %d\n", |
44 | dbg_msg("erase counter header hexdump:"); | 44 | be32_to_cpu(ec_hdr->vid_hdr_offset)); |
45 | printk(KERN_DEBUG "\tdata_offset %d\n", | ||
46 | be32_to_cpu(ec_hdr->data_offset)); | ||
47 | printk(KERN_DEBUG "\thdr_crc %#08x\n", | ||
48 | be32_to_cpu(ec_hdr->hdr_crc)); | ||
49 | printk(KERN_DEBUG "erase counter header hexdump:\n"); | ||
45 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, | 50 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, |
46 | ec_hdr, UBI_EC_HDR_SIZE, 1); | 51 | ec_hdr, UBI_EC_HDR_SIZE, 1); |
47 | } | 52 | } |
@@ -52,22 +57,23 @@ void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) | |||
52 | */ | 57 | */ |
53 | void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr) | 58 | void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr) |
54 | { | 59 | { |
55 | dbg_msg("volume identifier header dump:"); | 60 | printk(KERN_DEBUG "Volume identifier header dump:\n"); |
56 | dbg_msg("magic %08x", be32_to_cpu(vid_hdr->magic)); | 61 | printk(KERN_DEBUG "\tmagic %08x\n", be32_to_cpu(vid_hdr->magic)); |
57 | dbg_msg("version %d", (int)vid_hdr->version); | 62 | printk(KERN_DEBUG "\tversion %d\n", (int)vid_hdr->version); |
58 | dbg_msg("vol_type %d", (int)vid_hdr->vol_type); | 63 | printk(KERN_DEBUG "\tvol_type %d\n", (int)vid_hdr->vol_type); |
59 | dbg_msg("copy_flag %d", (int)vid_hdr->copy_flag); | 64 | printk(KERN_DEBUG "\tcopy_flag %d\n", (int)vid_hdr->copy_flag); |
60 | dbg_msg("compat %d", (int)vid_hdr->compat); | 65 | printk(KERN_DEBUG "\tcompat %d\n", (int)vid_hdr->compat); |
61 | dbg_msg("vol_id %d", be32_to_cpu(vid_hdr->vol_id)); | 66 | printk(KERN_DEBUG "\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id)); |
62 | dbg_msg("lnum %d", be32_to_cpu(vid_hdr->lnum)); | 67 | printk(KERN_DEBUG "\tlnum %d\n", be32_to_cpu(vid_hdr->lnum)); |
63 | dbg_msg("leb_ver %u", be32_to_cpu(vid_hdr->leb_ver)); | 68 | printk(KERN_DEBUG "\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size)); |
64 | dbg_msg("data_size %d", be32_to_cpu(vid_hdr->data_size)); | 69 | printk(KERN_DEBUG "\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs)); |
65 | dbg_msg("used_ebs %d", be32_to_cpu(vid_hdr->used_ebs)); | 70 | printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad)); |
66 | dbg_msg("data_pad %d", be32_to_cpu(vid_hdr->data_pad)); | 71 | printk(KERN_DEBUG "\tsqnum %llu\n", |
67 | dbg_msg("sqnum %llu", | ||
68 | (unsigned long long)be64_to_cpu(vid_hdr->sqnum)); | 72 | (unsigned long long)be64_to_cpu(vid_hdr->sqnum)); |
69 | dbg_msg("hdr_crc %08x", be32_to_cpu(vid_hdr->hdr_crc)); | 73 | printk(KERN_DEBUG "\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc)); |
70 | dbg_msg("volume identifier header hexdump:"); | 74 | printk(KERN_DEBUG "Volume identifier header hexdump:\n"); |
75 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, | ||
76 | vid_hdr, UBI_VID_HDR_SIZE, 1); | ||
71 | } | 77 | } |
72 | 78 | ||
73 | /** | 79 | /** |
@@ -76,27 +82,27 @@ void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr) | |||
76 | */ | 82 | */ |
77 | void ubi_dbg_dump_vol_info(const struct ubi_volume *vol) | 83 | void ubi_dbg_dump_vol_info(const struct ubi_volume *vol) |
78 | { | 84 | { |
79 | dbg_msg("volume information dump:"); | 85 | printk(KERN_DEBUG "Volume information dump:\n"); |
80 | dbg_msg("vol_id %d", vol->vol_id); | 86 | printk(KERN_DEBUG "\tvol_id %d\n", vol->vol_id); |
81 | dbg_msg("reserved_pebs %d", vol->reserved_pebs); | 87 | printk(KERN_DEBUG "\treserved_pebs %d\n", vol->reserved_pebs); |
82 | dbg_msg("alignment %d", vol->alignment); | 88 | printk(KERN_DEBUG "\talignment %d\n", vol->alignment); |
83 | dbg_msg("data_pad %d", vol->data_pad); | 89 | printk(KERN_DEBUG "\tdata_pad %d\n", vol->data_pad); |
84 | dbg_msg("vol_type %d", vol->vol_type); | 90 | printk(KERN_DEBUG "\tvol_type %d\n", vol->vol_type); |
85 | dbg_msg("name_len %d", vol->name_len); | 91 | printk(KERN_DEBUG "\tname_len %d\n", vol->name_len); |
86 | dbg_msg("usable_leb_size %d", vol->usable_leb_size); | 92 | printk(KERN_DEBUG "\tusable_leb_size %d\n", vol->usable_leb_size); |
87 | dbg_msg("used_ebs %d", vol->used_ebs); | 93 | printk(KERN_DEBUG "\tused_ebs %d\n", vol->used_ebs); |
88 | dbg_msg("used_bytes %lld", vol->used_bytes); | 94 | printk(KERN_DEBUG "\tused_bytes %lld\n", vol->used_bytes); |
89 | dbg_msg("last_eb_bytes %d", vol->last_eb_bytes); | 95 | printk(KERN_DEBUG "\tlast_eb_bytes %d\n", vol->last_eb_bytes); |
90 | dbg_msg("corrupted %d", vol->corrupted); | 96 | printk(KERN_DEBUG "\tcorrupted %d\n", vol->corrupted); |
91 | dbg_msg("upd_marker %d", vol->upd_marker); | 97 | printk(KERN_DEBUG "\tupd_marker %d\n", vol->upd_marker); |
92 | 98 | ||
93 | if (vol->name_len <= UBI_VOL_NAME_MAX && | 99 | if (vol->name_len <= UBI_VOL_NAME_MAX && |
94 | strnlen(vol->name, vol->name_len + 1) == vol->name_len) { | 100 | strnlen(vol->name, vol->name_len + 1) == vol->name_len) { |
95 | dbg_msg("name %s", vol->name); | 101 | printk(KERN_DEBUG "\tname %s\n", vol->name); |
96 | } else { | 102 | } else { |
97 | dbg_msg("the 1st 5 characters of the name: %c%c%c%c%c", | 103 | printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n", |
98 | vol->name[0], vol->name[1], vol->name[2], | 104 | vol->name[0], vol->name[1], vol->name[2], |
99 | vol->name[3], vol->name[4]); | 105 | vol->name[3], vol->name[4]); |
100 | } | 106 | } |
101 | } | 107 | } |
102 | 108 | ||
@@ -109,28 +115,29 @@ void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx) | |||
109 | { | 115 | { |
110 | int name_len = be16_to_cpu(r->name_len); | 116 | int name_len = be16_to_cpu(r->name_len); |
111 | 117 | ||
112 | dbg_msg("volume table record %d dump:", idx); | 118 | printk(KERN_DEBUG "Volume table record %d dump:\n", idx); |
113 | dbg_msg("reserved_pebs %d", be32_to_cpu(r->reserved_pebs)); | 119 | printk(KERN_DEBUG "\treserved_pebs %d\n", |
114 | dbg_msg("alignment %d", be32_to_cpu(r->alignment)); | 120 | be32_to_cpu(r->reserved_pebs)); |
115 | dbg_msg("data_pad %d", be32_to_cpu(r->data_pad)); | 121 | printk(KERN_DEBUG "\talignment %d\n", be32_to_cpu(r->alignment)); |
116 | dbg_msg("vol_type %d", (int)r->vol_type); | 122 | printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(r->data_pad)); |
117 | dbg_msg("upd_marker %d", (int)r->upd_marker); | 123 | printk(KERN_DEBUG "\tvol_type %d\n", (int)r->vol_type); |
118 | dbg_msg("name_len %d", name_len); | 124 | printk(KERN_DEBUG "\tupd_marker %d\n", (int)r->upd_marker); |
125 | printk(KERN_DEBUG "\tname_len %d\n", name_len); | ||
119 | 126 | ||
120 | if (r->name[0] == '\0') { | 127 | if (r->name[0] == '\0') { |
121 | dbg_msg("name NULL"); | 128 | printk(KERN_DEBUG "\tname NULL\n"); |
122 | return; | 129 | return; |
123 | } | 130 | } |
124 | 131 | ||
125 | if (name_len <= UBI_VOL_NAME_MAX && | 132 | if (name_len <= UBI_VOL_NAME_MAX && |
126 | strnlen(&r->name[0], name_len + 1) == name_len) { | 133 | strnlen(&r->name[0], name_len + 1) == name_len) { |
127 | dbg_msg("name %s", &r->name[0]); | 134 | printk(KERN_DEBUG "\tname %s\n", &r->name[0]); |
128 | } else { | 135 | } else { |
129 | dbg_msg("1st 5 characters of the name: %c%c%c%c%c", | 136 | printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n", |
130 | r->name[0], r->name[1], r->name[2], r->name[3], | 137 | r->name[0], r->name[1], r->name[2], r->name[3], |
131 | r->name[4]); | 138 | r->name[4]); |
132 | } | 139 | } |
133 | dbg_msg("crc %#08x", be32_to_cpu(r->crc)); | 140 | printk(KERN_DEBUG "\tcrc %#08x\n", be32_to_cpu(r->crc)); |
134 | } | 141 | } |
135 | 142 | ||
136 | /** | 143 | /** |
@@ -139,15 +146,15 @@ void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx) | |||
139 | */ | 146 | */ |
140 | void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv) | 147 | void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv) |
141 | { | 148 | { |
142 | dbg_msg("volume scanning information dump:"); | 149 | printk(KERN_DEBUG "Volume scanning information dump:\n"); |
143 | dbg_msg("vol_id %d", sv->vol_id); | 150 | printk(KERN_DEBUG "\tvol_id %d\n", sv->vol_id); |
144 | dbg_msg("highest_lnum %d", sv->highest_lnum); | 151 | printk(KERN_DEBUG "\thighest_lnum %d\n", sv->highest_lnum); |
145 | dbg_msg("leb_count %d", sv->leb_count); | 152 | printk(KERN_DEBUG "\tleb_count %d\n", sv->leb_count); |
146 | dbg_msg("compat %d", sv->compat); | 153 | printk(KERN_DEBUG "\tcompat %d\n", sv->compat); |
147 | dbg_msg("vol_type %d", sv->vol_type); | 154 | printk(KERN_DEBUG "\tvol_type %d\n", sv->vol_type); |
148 | dbg_msg("used_ebs %d", sv->used_ebs); | 155 | printk(KERN_DEBUG "\tused_ebs %d\n", sv->used_ebs); |
149 | dbg_msg("last_data_size %d", sv->last_data_size); | 156 | printk(KERN_DEBUG "\tlast_data_size %d\n", sv->last_data_size); |
150 | dbg_msg("data_pad %d", sv->data_pad); | 157 | printk(KERN_DEBUG "\tdata_pad %d\n", sv->data_pad); |
151 | } | 158 | } |
152 | 159 | ||
153 | /** | 160 | /** |
@@ -157,14 +164,13 @@ void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv) | |||
157 | */ | 164 | */ |
158 | void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type) | 165 | void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type) |
159 | { | 166 | { |
160 | dbg_msg("eraseblock scanning information dump:"); | 167 | printk(KERN_DEBUG "eraseblock scanning information dump:\n"); |
161 | dbg_msg("ec %d", seb->ec); | 168 | printk(KERN_DEBUG "\tec %d\n", seb->ec); |
162 | dbg_msg("pnum %d", seb->pnum); | 169 | printk(KERN_DEBUG "\tpnum %d\n", seb->pnum); |
163 | if (type == 0) { | 170 | if (type == 0) { |
164 | dbg_msg("lnum %d", seb->lnum); | 171 | printk(KERN_DEBUG "\tlnum %d\n", seb->lnum); |
165 | dbg_msg("scrub %d", seb->scrub); | 172 | printk(KERN_DEBUG "\tscrub %d\n", seb->scrub); |
166 | dbg_msg("sqnum %llu", seb->sqnum); | 173 | printk(KERN_DEBUG "\tsqnum %llu\n", seb->sqnum); |
167 | dbg_msg("leb_ver %u", seb->leb_ver); | ||
168 | } | 174 | } |
169 | } | 175 | } |
170 | 176 | ||
@@ -176,16 +182,16 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req) | |||
176 | { | 182 | { |
177 | char nm[17]; | 183 | char nm[17]; |
178 | 184 | ||
179 | dbg_msg("volume creation request dump:"); | 185 | printk(KERN_DEBUG "Volume creation request dump:\n"); |
180 | dbg_msg("vol_id %d", req->vol_id); | 186 | printk(KERN_DEBUG "\tvol_id %d\n", req->vol_id); |
181 | dbg_msg("alignment %d", req->alignment); | 187 | printk(KERN_DEBUG "\talignment %d\n", req->alignment); |
182 | dbg_msg("bytes %lld", (long long)req->bytes); | 188 | printk(KERN_DEBUG "\tbytes %lld\n", (long long)req->bytes); |
183 | dbg_msg("vol_type %d", req->vol_type); | 189 | printk(KERN_DEBUG "\tvol_type %d\n", req->vol_type); |
184 | dbg_msg("name_len %d", req->name_len); | 190 | printk(KERN_DEBUG "\tname_len %d\n", req->name_len); |
185 | 191 | ||
186 | memcpy(nm, req->name, 16); | 192 | memcpy(nm, req->name, 16); |
187 | nm[16] = 0; | 193 | nm[16] = 0; |
188 | dbg_msg("the 1st 16 characters of the name: %s", nm); | 194 | printk(KERN_DEBUG "\t1st 16 characters of name: %s\n", nm); |
189 | } | 195 | } |
190 | 196 | ||
191 | #endif /* CONFIG_MTD_UBI_DEBUG_MSG */ | 197 | #endif /* CONFIG_MTD_UBI_DEBUG */ |
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h index 8ea99d8c9e1f..78e914d23ece 100644 --- a/drivers/mtd/ubi/debug.h +++ b/drivers/mtd/ubi/debug.h | |||
@@ -24,21 +24,16 @@ | |||
24 | #ifdef CONFIG_MTD_UBI_DEBUG | 24 | #ifdef CONFIG_MTD_UBI_DEBUG |
25 | #include <linux/random.h> | 25 | #include <linux/random.h> |
26 | 26 | ||
27 | #define ubi_assert(expr) BUG_ON(!(expr)) | ||
28 | #define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__) | 27 | #define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__) |
29 | #else | ||
30 | #define ubi_assert(expr) ({}) | ||
31 | #define dbg_err(fmt, ...) ({}) | ||
32 | #endif | ||
33 | 28 | ||
34 | #ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT | 29 | #define ubi_assert(expr) do { \ |
35 | #define DBG_DISABLE_BGT 1 | 30 | if (unlikely(!(expr))) { \ |
36 | #else | 31 | printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \ |
37 | #define DBG_DISABLE_BGT 0 | 32 | __func__, __LINE__, current->pid); \ |
38 | #endif | 33 | ubi_dbg_dump_stack(); \ |
34 | } \ | ||
35 | } while (0) | ||
39 | 36 | ||
40 | #ifdef CONFIG_MTD_UBI_DEBUG_MSG | ||
41 | /* Generic debugging message */ | ||
42 | #define dbg_msg(fmt, ...) \ | 37 | #define dbg_msg(fmt, ...) \ |
43 | printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \ | 38 | printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \ |
44 | current->pid, __func__, ##__VA_ARGS__) | 39 | current->pid, __func__, ##__VA_ARGS__) |
@@ -61,36 +56,29 @@ void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv); | |||
61 | void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type); | 56 | void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type); |
62 | void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req); | 57 | void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req); |
63 | 58 | ||
59 | #ifdef CONFIG_MTD_UBI_DEBUG_MSG | ||
60 | /* General debugging messages */ | ||
61 | #define dbg_gen(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) | ||
64 | #else | 62 | #else |
65 | 63 | #define dbg_gen(fmt, ...) ({}) | |
66 | #define dbg_msg(fmt, ...) ({}) | 64 | #endif |
67 | #define ubi_dbg_dump_stack() ({}) | ||
68 | #define ubi_dbg_dump_ec_hdr(ec_hdr) ({}) | ||
69 | #define ubi_dbg_dump_vid_hdr(vid_hdr) ({}) | ||
70 | #define ubi_dbg_dump_vol_info(vol) ({}) | ||
71 | #define ubi_dbg_dump_vtbl_record(r, idx) ({}) | ||
72 | #define ubi_dbg_dump_sv(sv) ({}) | ||
73 | #define ubi_dbg_dump_seb(seb, type) ({}) | ||
74 | #define ubi_dbg_dump_mkvol_req(req) ({}) | ||
75 | |||
76 | #endif /* CONFIG_MTD_UBI_DEBUG_MSG */ | ||
77 | 65 | ||
78 | #ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA | 66 | #ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA |
79 | /* Messages from the eraseblock association unit */ | 67 | /* Messages from the eraseblock association sub-system */ |
80 | #define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) | 68 | #define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) |
81 | #else | 69 | #else |
82 | #define dbg_eba(fmt, ...) ({}) | 70 | #define dbg_eba(fmt, ...) ({}) |
83 | #endif | 71 | #endif |
84 | 72 | ||
85 | #ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL | 73 | #ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL |
86 | /* Messages from the wear-leveling unit */ | 74 | /* Messages from the wear-leveling sub-system */ |
87 | #define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) | 75 | #define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) |
88 | #else | 76 | #else |
89 | #define dbg_wl(fmt, ...) ({}) | 77 | #define dbg_wl(fmt, ...) ({}) |
90 | #endif | 78 | #endif |
91 | 79 | ||
92 | #ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO | 80 | #ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO |
93 | /* Messages from the input/output unit */ | 81 | /* Messages from the input/output sub-system */ |
94 | #define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) | 82 | #define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) |
95 | #else | 83 | #else |
96 | #define dbg_io(fmt, ...) ({}) | 84 | #define dbg_io(fmt, ...) ({}) |
@@ -105,6 +93,12 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req); | |||
105 | #define UBI_IO_DEBUG 0 | 93 | #define UBI_IO_DEBUG 0 |
106 | #endif | 94 | #endif |
107 | 95 | ||
96 | #ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT | ||
97 | #define DBG_DISABLE_BGT 1 | ||
98 | #else | ||
99 | #define DBG_DISABLE_BGT 0 | ||
100 | #endif | ||
101 | |||
108 | #ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_BITFLIPS | 102 | #ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_BITFLIPS |
109 | /** | 103 | /** |
110 | * ubi_dbg_is_bitflip - if it is time to emulate a bit-flip. | 104 | * ubi_dbg_is_bitflip - if it is time to emulate a bit-flip. |
@@ -149,4 +143,30 @@ static inline int ubi_dbg_is_erase_failure(void) | |||
149 | #define ubi_dbg_is_erase_failure() 0 | 143 | #define ubi_dbg_is_erase_failure() 0 |
150 | #endif | 144 | #endif |
151 | 145 | ||
146 | #else | ||
147 | |||
148 | #define ubi_assert(expr) ({}) | ||
149 | #define dbg_err(fmt, ...) ({}) | ||
150 | #define dbg_msg(fmt, ...) ({}) | ||
151 | #define dbg_gen(fmt, ...) ({}) | ||
152 | #define dbg_eba(fmt, ...) ({}) | ||
153 | #define dbg_wl(fmt, ...) ({}) | ||
154 | #define dbg_io(fmt, ...) ({}) | ||
155 | #define dbg_bld(fmt, ...) ({}) | ||
156 | #define ubi_dbg_dump_stack() ({}) | ||
157 | #define ubi_dbg_dump_ec_hdr(ec_hdr) ({}) | ||
158 | #define ubi_dbg_dump_vid_hdr(vid_hdr) ({}) | ||
159 | #define ubi_dbg_dump_vol_info(vol) ({}) | ||
160 | #define ubi_dbg_dump_vtbl_record(r, idx) ({}) | ||
161 | #define ubi_dbg_dump_sv(sv) ({}) | ||
162 | #define ubi_dbg_dump_seb(seb, type) ({}) | ||
163 | #define ubi_dbg_dump_mkvol_req(req) ({}) | ||
164 | |||
165 | #define UBI_IO_DEBUG 0 | ||
166 | #define DBG_DISABLE_BGT 0 | ||
167 | #define ubi_dbg_is_bitflip() 0 | ||
168 | #define ubi_dbg_is_write_failure() 0 | ||
169 | #define ubi_dbg_is_erase_failure() 0 | ||
170 | |||
171 | #endif /* !CONFIG_MTD_UBI_DEBUG */ | ||
152 | #endif /* !__UBI_DEBUG_H__ */ | 172 | #endif /* !__UBI_DEBUG_H__ */ |
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 7ce91ca742b1..e04bcf1dff87 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c | |||
@@ -19,20 +19,20 @@ | |||
19 | */ | 19 | */ |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * The UBI Eraseblock Association (EBA) unit. | 22 | * The UBI Eraseblock Association (EBA) sub-system. |
23 | * | 23 | * |
24 | * This unit is responsible for I/O to/from logical eraseblock. | 24 | * This sub-system is responsible for I/O to/from logical eraseblock. |
25 | * | 25 | * |
26 | * Although in this implementation the EBA table is fully kept and managed in | 26 | * Although in this implementation the EBA table is fully kept and managed in |
27 | * RAM, which assumes poor scalability, it might be (partially) maintained on | 27 | * RAM, which assumes poor scalability, it might be (partially) maintained on |
28 | * flash in future implementations. | 28 | * flash in future implementations. |
29 | * | 29 | * |
30 | * The EBA unit implements per-logical eraseblock locking. Before accessing a | 30 | * The EBA sub-system implements per-logical eraseblock locking. Before |
31 | * logical eraseblock it is locked for reading or writing. The per-logical | 31 | * accessing a logical eraseblock it is locked for reading or writing. The |
32 | * eraseblock locking is implemented by means of the lock tree. The lock tree | 32 | * per-logical eraseblock locking is implemented by means of the lock tree. The |
33 | * is an RB-tree which refers all the currently locked logical eraseblocks. The | 33 | * lock tree is an RB-tree which refers all the currently locked logical |
34 | * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by | 34 | * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects. |
35 | * (@vol_id, @lnum) pairs. | 35 | * They are indexed by (@vol_id, @lnum) pairs. |
36 | * | 36 | * |
37 | * EBA also maintains the global sequence counter which is incremented each | 37 | * EBA also maintains the global sequence counter which is incremented each |
38 | * time a logical eraseblock is mapped to a physical eraseblock and it is | 38 | * time a logical eraseblock is mapped to a physical eraseblock and it is |
@@ -189,9 +189,7 @@ static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi, | |||
189 | le->users += 1; | 189 | le->users += 1; |
190 | spin_unlock(&ubi->ltree_lock); | 190 | spin_unlock(&ubi->ltree_lock); |
191 | 191 | ||
192 | if (le_free) | 192 | kfree(le_free); |
193 | kfree(le_free); | ||
194 | |||
195 | return le; | 193 | return le; |
196 | } | 194 | } |
197 | 195 | ||
@@ -223,22 +221,18 @@ static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) | |||
223 | */ | 221 | */ |
224 | static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) | 222 | static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) |
225 | { | 223 | { |
226 | int free = 0; | ||
227 | struct ubi_ltree_entry *le; | 224 | struct ubi_ltree_entry *le; |
228 | 225 | ||
229 | spin_lock(&ubi->ltree_lock); | 226 | spin_lock(&ubi->ltree_lock); |
230 | le = ltree_lookup(ubi, vol_id, lnum); | 227 | le = ltree_lookup(ubi, vol_id, lnum); |
231 | le->users -= 1; | 228 | le->users -= 1; |
232 | ubi_assert(le->users >= 0); | 229 | ubi_assert(le->users >= 0); |
230 | up_read(&le->mutex); | ||
233 | if (le->users == 0) { | 231 | if (le->users == 0) { |
234 | rb_erase(&le->rb, &ubi->ltree); | 232 | rb_erase(&le->rb, &ubi->ltree); |
235 | free = 1; | 233 | kfree(le); |
236 | } | 234 | } |
237 | spin_unlock(&ubi->ltree_lock); | 235 | spin_unlock(&ubi->ltree_lock); |
238 | |||
239 | up_read(&le->mutex); | ||
240 | if (free) | ||
241 | kfree(le); | ||
242 | } | 236 | } |
243 | 237 | ||
244 | /** | 238 | /** |
@@ -274,7 +268,6 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) | |||
274 | */ | 268 | */ |
275 | static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) | 269 | static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) |
276 | { | 270 | { |
277 | int free; | ||
278 | struct ubi_ltree_entry *le; | 271 | struct ubi_ltree_entry *le; |
279 | 272 | ||
280 | le = ltree_add_entry(ubi, vol_id, lnum); | 273 | le = ltree_add_entry(ubi, vol_id, lnum); |
@@ -289,12 +282,9 @@ static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) | |||
289 | ubi_assert(le->users >= 0); | 282 | ubi_assert(le->users >= 0); |
290 | if (le->users == 0) { | 283 | if (le->users == 0) { |
291 | rb_erase(&le->rb, &ubi->ltree); | 284 | rb_erase(&le->rb, &ubi->ltree); |
292 | free = 1; | ||
293 | } else | ||
294 | free = 0; | ||
295 | spin_unlock(&ubi->ltree_lock); | ||
296 | if (free) | ||
297 | kfree(le); | 285 | kfree(le); |
286 | } | ||
287 | spin_unlock(&ubi->ltree_lock); | ||
298 | 288 | ||
299 | return 1; | 289 | return 1; |
300 | } | 290 | } |
@@ -307,23 +297,18 @@ static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) | |||
307 | */ | 297 | */ |
308 | static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) | 298 | static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) |
309 | { | 299 | { |
310 | int free; | ||
311 | struct ubi_ltree_entry *le; | 300 | struct ubi_ltree_entry *le; |
312 | 301 | ||
313 | spin_lock(&ubi->ltree_lock); | 302 | spin_lock(&ubi->ltree_lock); |
314 | le = ltree_lookup(ubi, vol_id, lnum); | 303 | le = ltree_lookup(ubi, vol_id, lnum); |
315 | le->users -= 1; | 304 | le->users -= 1; |
316 | ubi_assert(le->users >= 0); | 305 | ubi_assert(le->users >= 0); |
306 | up_write(&le->mutex); | ||
317 | if (le->users == 0) { | 307 | if (le->users == 0) { |
318 | rb_erase(&le->rb, &ubi->ltree); | 308 | rb_erase(&le->rb, &ubi->ltree); |
319 | free = 1; | ||
320 | } else | ||
321 | free = 0; | ||
322 | spin_unlock(&ubi->ltree_lock); | ||
323 | |||
324 | up_write(&le->mutex); | ||
325 | if (free) | ||
326 | kfree(le); | 309 | kfree(le); |
310 | } | ||
311 | spin_unlock(&ubi->ltree_lock); | ||
327 | } | 312 | } |
328 | 313 | ||
329 | /** | 314 | /** |
@@ -516,9 +501,8 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, | |||
516 | struct ubi_vid_hdr *vid_hdr; | 501 | struct ubi_vid_hdr *vid_hdr; |
517 | 502 | ||
518 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); | 503 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); |
519 | if (!vid_hdr) { | 504 | if (!vid_hdr) |
520 | return -ENOMEM; | 505 | return -ENOMEM; |
521 | } | ||
522 | 506 | ||
523 | mutex_lock(&ubi->buf_mutex); | 507 | mutex_lock(&ubi->buf_mutex); |
524 | 508 | ||
@@ -752,7 +736,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, | |||
752 | /* If this is the last LEB @len may be unaligned */ | 736 | /* If this is the last LEB @len may be unaligned */ |
753 | len = ALIGN(data_size, ubi->min_io_size); | 737 | len = ALIGN(data_size, ubi->min_io_size); |
754 | else | 738 | else |
755 | ubi_assert(len % ubi->min_io_size == 0); | 739 | ubi_assert(!(len & (ubi->min_io_size - 1))); |
756 | 740 | ||
757 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); | 741 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); |
758 | if (!vid_hdr) | 742 | if (!vid_hdr) |
@@ -919,7 +903,7 @@ retry: | |||
919 | } | 903 | } |
920 | 904 | ||
921 | if (vol->eba_tbl[lnum] >= 0) { | 905 | if (vol->eba_tbl[lnum] >= 0) { |
922 | err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1); | 906 | err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 0); |
923 | if (err) | 907 | if (err) |
924 | goto out_leb_unlock; | 908 | goto out_leb_unlock; |
925 | } | 909 | } |
@@ -1141,7 +1125,7 @@ out_unlock_leb: | |||
1141 | } | 1125 | } |
1142 | 1126 | ||
1143 | /** | 1127 | /** |
1144 | * ubi_eba_init_scan - initialize the EBA unit using scanning information. | 1128 | * ubi_eba_init_scan - initialize the EBA sub-system using scanning information. |
1145 | * @ubi: UBI device description object | 1129 | * @ubi: UBI device description object |
1146 | * @si: scanning information | 1130 | * @si: scanning information |
1147 | * | 1131 | * |
@@ -1156,7 +1140,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) | |||
1156 | struct ubi_scan_leb *seb; | 1140 | struct ubi_scan_leb *seb; |
1157 | struct rb_node *rb; | 1141 | struct rb_node *rb; |
1158 | 1142 | ||
1159 | dbg_eba("initialize EBA unit"); | 1143 | dbg_eba("initialize EBA sub-system"); |
1160 | 1144 | ||
1161 | spin_lock_init(&ubi->ltree_lock); | 1145 | spin_lock_init(&ubi->ltree_lock); |
1162 | mutex_init(&ubi->alc_mutex); | 1146 | mutex_init(&ubi->alc_mutex); |
@@ -1222,7 +1206,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) | |||
1222 | ubi->rsvd_pebs += ubi->beb_rsvd_pebs; | 1206 | ubi->rsvd_pebs += ubi->beb_rsvd_pebs; |
1223 | } | 1207 | } |
1224 | 1208 | ||
1225 | dbg_eba("EBA unit is initialized"); | 1209 | dbg_eba("EBA sub-system is initialized"); |
1226 | return 0; | 1210 | return 0; |
1227 | 1211 | ||
1228 | out_free: | 1212 | out_free: |
@@ -1233,20 +1217,3 @@ out_free: | |||
1233 | } | 1217 | } |
1234 | return err; | 1218 | return err; |
1235 | } | 1219 | } |
1236 | |||
1237 | /** | ||
1238 | * ubi_eba_close - close EBA unit. | ||
1239 | * @ubi: UBI device description object | ||
1240 | */ | ||
1241 | void ubi_eba_close(const struct ubi_device *ubi) | ||
1242 | { | ||
1243 | int i, num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; | ||
1244 | |||
1245 | dbg_eba("close EBA unit"); | ||
1246 | |||
1247 | for (i = 0; i < num_volumes; i++) { | ||
1248 | if (!ubi->volumes[i]) | ||
1249 | continue; | ||
1250 | kfree(ubi->volumes[i]->eba_tbl); | ||
1251 | } | ||
1252 | } | ||
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c index e909b390069a..605812bb0b1a 100644 --- a/drivers/mtd/ubi/gluebi.c +++ b/drivers/mtd/ubi/gluebi.c | |||
@@ -111,7 +111,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
111 | struct ubi_device *ubi; | 111 | struct ubi_device *ubi; |
112 | uint64_t tmp = from; | 112 | uint64_t tmp = from; |
113 | 113 | ||
114 | dbg_msg("read %zd bytes from offset %lld", len, from); | 114 | dbg_gen("read %zd bytes from offset %lld", len, from); |
115 | 115 | ||
116 | if (len < 0 || from < 0 || from + len > mtd->size) | 116 | if (len < 0 || from < 0 || from + len > mtd->size) |
117 | return -EINVAL; | 117 | return -EINVAL; |
@@ -162,7 +162,7 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
162 | struct ubi_device *ubi; | 162 | struct ubi_device *ubi; |
163 | uint64_t tmp = to; | 163 | uint64_t tmp = to; |
164 | 164 | ||
165 | dbg_msg("write %zd bytes to offset %lld", len, to); | 165 | dbg_gen("write %zd bytes to offset %lld", len, to); |
166 | 166 | ||
167 | if (len < 0 || to < 0 || len + to > mtd->size) | 167 | if (len < 0 || to < 0 || len + to > mtd->size) |
168 | return -EINVAL; | 168 | return -EINVAL; |
@@ -215,7 +215,7 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
215 | struct ubi_volume *vol; | 215 | struct ubi_volume *vol; |
216 | struct ubi_device *ubi; | 216 | struct ubi_device *ubi; |
217 | 217 | ||
218 | dbg_msg("erase %u bytes at offset %u", instr->len, instr->addr); | 218 | dbg_gen("erase %u bytes at offset %u", instr->len, instr->addr); |
219 | 219 | ||
220 | if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize) | 220 | if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize) |
221 | return -EINVAL; | 221 | return -EINVAL; |
@@ -249,8 +249,8 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
249 | if (err) | 249 | if (err) |
250 | goto out_err; | 250 | goto out_err; |
251 | 251 | ||
252 | instr->state = MTD_ERASE_DONE; | 252 | instr->state = MTD_ERASE_DONE; |
253 | mtd_erase_callback(instr); | 253 | mtd_erase_callback(instr); |
254 | return 0; | 254 | return 0; |
255 | 255 | ||
256 | out_err: | 256 | out_err: |
@@ -299,12 +299,12 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol) | |||
299 | mtd->size = vol->used_bytes; | 299 | mtd->size = vol->used_bytes; |
300 | 300 | ||
301 | if (add_mtd_device(mtd)) { | 301 | if (add_mtd_device(mtd)) { |
302 | ubi_err("cannot not add MTD device\n"); | 302 | ubi_err("cannot not add MTD device"); |
303 | kfree(mtd->name); | 303 | kfree(mtd->name); |
304 | return -ENFILE; | 304 | return -ENFILE; |
305 | } | 305 | } |
306 | 306 | ||
307 | dbg_msg("added mtd%d (\"%s\"), size %u, EB size %u", | 307 | dbg_gen("added mtd%d (\"%s\"), size %u, EB size %u", |
308 | mtd->index, mtd->name, mtd->size, mtd->erasesize); | 308 | mtd->index, mtd->name, mtd->size, mtd->erasesize); |
309 | return 0; | 309 | return 0; |
310 | } | 310 | } |
@@ -322,7 +322,7 @@ int ubi_destroy_gluebi(struct ubi_volume *vol) | |||
322 | int err; | 322 | int err; |
323 | struct mtd_info *mtd = &vol->gluebi_mtd; | 323 | struct mtd_info *mtd = &vol->gluebi_mtd; |
324 | 324 | ||
325 | dbg_msg("remove mtd%d", mtd->index); | 325 | dbg_gen("remove mtd%d", mtd->index); |
326 | err = del_mtd_device(mtd); | 326 | err = del_mtd_device(mtd); |
327 | if (err) | 327 | if (err) |
328 | return err; | 328 | return err; |
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index 4ac11df7b048..2fb64be44f1b 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c | |||
@@ -20,15 +20,15 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * UBI input/output unit. | 23 | * UBI input/output sub-system. |
24 | * | 24 | * |
25 | * This unit provides a uniform way to work with all kinds of the underlying | 25 | * This sub-system provides a uniform way to work with all kinds of the |
26 | * MTD devices. It also implements handy functions for reading and writing UBI | 26 | * underlying MTD devices. It also implements handy functions for reading and |
27 | * headers. | 27 | * writing UBI headers. |
28 | * | 28 | * |
29 | * We are trying to have a paranoid mindset and not to trust to what we read | 29 | * We are trying to have a paranoid mindset and not to trust to what we read |
30 | * from the flash media in order to be more secure and robust. So this unit | 30 | * from the flash media in order to be more secure and robust. So this |
31 | * validates every single header it reads from the flash media. | 31 | * sub-system validates every single header it reads from the flash media. |
32 | * | 32 | * |
33 | * Some words about how the eraseblock headers are stored. | 33 | * Some words about how the eraseblock headers are stored. |
34 | * | 34 | * |
@@ -79,11 +79,11 @@ | |||
79 | * 512-byte chunks, we have to allocate one more buffer and copy our VID header | 79 | * 512-byte chunks, we have to allocate one more buffer and copy our VID header |
80 | * to offset 448 of this buffer. | 80 | * to offset 448 of this buffer. |
81 | * | 81 | * |
82 | * The I/O unit does the following trick in order to avoid this extra copy. | 82 | * The I/O sub-system does the following trick in order to avoid this extra |
83 | * It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID header | 83 | * copy. It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID |
84 | * and returns a pointer to offset @ubi->vid_hdr_shift of this buffer. When the | 84 | * header and returns a pointer to offset @ubi->vid_hdr_shift of this buffer. |
85 | * VID header is being written out, it shifts the VID header pointer back and | 85 | * When the VID header is being written out, it shifts the VID header pointer |
86 | * writes the whole sub-page. | 86 | * back and writes the whole sub-page. |
87 | */ | 87 | */ |
88 | 88 | ||
89 | #include <linux/crc32.h> | 89 | #include <linux/crc32.h> |
@@ -156,15 +156,19 @@ retry: | |||
156 | /* | 156 | /* |
157 | * -EUCLEAN is reported if there was a bit-flip which | 157 | * -EUCLEAN is reported if there was a bit-flip which |
158 | * was corrected, so this is harmless. | 158 | * was corrected, so this is harmless. |
159 | * | ||
160 | * We do not report about it here unless debugging is | ||
161 | * enabled. A corresponding message will be printed | ||
162 | * later, when it is has been scrubbed. | ||
159 | */ | 163 | */ |
160 | ubi_msg("fixable bit-flip detected at PEB %d", pnum); | 164 | dbg_msg("fixable bit-flip detected at PEB %d", pnum); |
161 | ubi_assert(len == read); | 165 | ubi_assert(len == read); |
162 | return UBI_IO_BITFLIPS; | 166 | return UBI_IO_BITFLIPS; |
163 | } | 167 | } |
164 | 168 | ||
165 | if (read != len && retries++ < UBI_IO_RETRIES) { | 169 | if (read != len && retries++ < UBI_IO_RETRIES) { |
166 | dbg_io("error %d while reading %d bytes from PEB %d:%d, " | 170 | dbg_io("error %d while reading %d bytes from PEB %d:%d," |
167 | "read only %zd bytes, retry", | 171 | " read only %zd bytes, retry", |
168 | err, len, pnum, offset, read); | 172 | err, len, pnum, offset, read); |
169 | yield(); | 173 | yield(); |
170 | goto retry; | 174 | goto retry; |
@@ -187,7 +191,7 @@ retry: | |||
187 | ubi_assert(len == read); | 191 | ubi_assert(len == read); |
188 | 192 | ||
189 | if (ubi_dbg_is_bitflip()) { | 193 | if (ubi_dbg_is_bitflip()) { |
190 | dbg_msg("bit-flip (emulated)"); | 194 | dbg_gen("bit-flip (emulated)"); |
191 | err = UBI_IO_BITFLIPS; | 195 | err = UBI_IO_BITFLIPS; |
192 | } | 196 | } |
193 | } | 197 | } |
@@ -391,6 +395,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum) | |||
391 | { | 395 | { |
392 | int err, i, patt_count; | 396 | int err, i, patt_count; |
393 | 397 | ||
398 | ubi_msg("run torture test for PEB %d", pnum); | ||
394 | patt_count = ARRAY_SIZE(patterns); | 399 | patt_count = ARRAY_SIZE(patterns); |
395 | ubi_assert(patt_count > 0); | 400 | ubi_assert(patt_count > 0); |
396 | 401 | ||
@@ -434,6 +439,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum) | |||
434 | } | 439 | } |
435 | 440 | ||
436 | err = patt_count; | 441 | err = patt_count; |
442 | ubi_msg("PEB %d passed torture test, do not mark it a bad", pnum); | ||
437 | 443 | ||
438 | out: | 444 | out: |
439 | mutex_unlock(&ubi->buf_mutex); | 445 | mutex_unlock(&ubi->buf_mutex); |
@@ -699,8 +705,8 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, | |||
699 | 705 | ||
700 | if (hdr_crc != crc) { | 706 | if (hdr_crc != crc) { |
701 | if (verbose) { | 707 | if (verbose) { |
702 | ubi_warn("bad EC header CRC at PEB %d, calculated %#08x," | 708 | ubi_warn("bad EC header CRC at PEB %d, calculated " |
703 | " read %#08x", pnum, crc, hdr_crc); | 709 | "%#08x, read %#08x", pnum, crc, hdr_crc); |
704 | ubi_dbg_dump_ec_hdr(ec_hdr); | 710 | ubi_dbg_dump_ec_hdr(ec_hdr); |
705 | } | 711 | } |
706 | return UBI_IO_BAD_EC_HDR; | 712 | return UBI_IO_BAD_EC_HDR; |
@@ -1095,8 +1101,7 @@ fail: | |||
1095 | } | 1101 | } |
1096 | 1102 | ||
1097 | /** | 1103 | /** |
1098 | * paranoid_check_peb_ec_hdr - check that the erase counter header of a | 1104 | * paranoid_check_peb_ec_hdr - check erase counter header. |
1099 | * physical eraseblock is in-place and is all right. | ||
1100 | * @ubi: UBI device description object | 1105 | * @ubi: UBI device description object |
1101 | * @pnum: the physical eraseblock number to check | 1106 | * @pnum: the physical eraseblock number to check |
1102 | * | 1107 | * |
@@ -1174,8 +1179,7 @@ fail: | |||
1174 | } | 1179 | } |
1175 | 1180 | ||
1176 | /** | 1181 | /** |
1177 | * paranoid_check_peb_vid_hdr - check that the volume identifier header of a | 1182 | * paranoid_check_peb_vid_hdr - check volume identifier header. |
1178 | * physical eraseblock is in-place and is all right. | ||
1179 | * @ubi: UBI device description object | 1183 | * @ubi: UBI device description object |
1180 | * @pnum: the physical eraseblock number to check | 1184 | * @pnum: the physical eraseblock number to check |
1181 | * | 1185 | * |
@@ -1256,7 +1260,7 @@ static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset, | |||
1256 | 1260 | ||
1257 | fail: | 1261 | fail: |
1258 | ubi_err("paranoid check failed for PEB %d", pnum); | 1262 | ubi_err("paranoid check failed for PEB %d", pnum); |
1259 | dbg_msg("hex dump of the %d-%d region", offset, offset + len); | 1263 | ubi_msg("hex dump of the %d-%d region", offset, offset + len); |
1260 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, | 1264 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, |
1261 | ubi->dbg_peb_buf, len, 1); | 1265 | ubi->dbg_peb_buf, len, 1); |
1262 | err = 1; | 1266 | err = 1; |
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index a70d58823f8d..5d9bcf109c13 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c | |||
@@ -106,7 +106,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode) | |||
106 | struct ubi_device *ubi; | 106 | struct ubi_device *ubi; |
107 | struct ubi_volume *vol; | 107 | struct ubi_volume *vol; |
108 | 108 | ||
109 | dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode); | 109 | dbg_gen("open device %d volume %d, mode %d", ubi_num, vol_id, mode); |
110 | 110 | ||
111 | if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) | 111 | if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) |
112 | return ERR_PTR(-EINVAL); | 112 | return ERR_PTR(-EINVAL); |
@@ -215,7 +215,7 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name, | |||
215 | struct ubi_device *ubi; | 215 | struct ubi_device *ubi; |
216 | struct ubi_volume_desc *ret; | 216 | struct ubi_volume_desc *ret; |
217 | 217 | ||
218 | dbg_msg("open volume %s, mode %d", name, mode); | 218 | dbg_gen("open volume %s, mode %d", name, mode); |
219 | 219 | ||
220 | if (!name) | 220 | if (!name) |
221 | return ERR_PTR(-EINVAL); | 221 | return ERR_PTR(-EINVAL); |
@@ -266,7 +266,7 @@ void ubi_close_volume(struct ubi_volume_desc *desc) | |||
266 | struct ubi_volume *vol = desc->vol; | 266 | struct ubi_volume *vol = desc->vol; |
267 | struct ubi_device *ubi = vol->ubi; | 267 | struct ubi_device *ubi = vol->ubi; |
268 | 268 | ||
269 | dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode); | 269 | dbg_gen("close volume %d, mode %d", vol->vol_id, desc->mode); |
270 | 270 | ||
271 | spin_lock(&ubi->volumes_lock); | 271 | spin_lock(&ubi->volumes_lock); |
272 | switch (desc->mode) { | 272 | switch (desc->mode) { |
@@ -323,7 +323,7 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, | |||
323 | struct ubi_device *ubi = vol->ubi; | 323 | struct ubi_device *ubi = vol->ubi; |
324 | int err, vol_id = vol->vol_id; | 324 | int err, vol_id = vol->vol_id; |
325 | 325 | ||
326 | dbg_msg("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset); | 326 | dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset); |
327 | 327 | ||
328 | if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 || | 328 | if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 || |
329 | lnum >= vol->used_ebs || offset < 0 || len < 0 || | 329 | lnum >= vol->used_ebs || offset < 0 || len < 0 || |
@@ -388,7 +388,7 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, | |||
388 | struct ubi_device *ubi = vol->ubi; | 388 | struct ubi_device *ubi = vol->ubi; |
389 | int vol_id = vol->vol_id; | 389 | int vol_id = vol->vol_id; |
390 | 390 | ||
391 | dbg_msg("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset); | 391 | dbg_gen("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset); |
392 | 392 | ||
393 | if (vol_id < 0 || vol_id >= ubi->vtbl_slots) | 393 | if (vol_id < 0 || vol_id >= ubi->vtbl_slots) |
394 | return -EINVAL; | 394 | return -EINVAL; |
@@ -397,8 +397,8 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, | |||
397 | return -EROFS; | 397 | return -EROFS; |
398 | 398 | ||
399 | if (lnum < 0 || lnum >= vol->reserved_pebs || offset < 0 || len < 0 || | 399 | if (lnum < 0 || lnum >= vol->reserved_pebs || offset < 0 || len < 0 || |
400 | offset + len > vol->usable_leb_size || offset % ubi->min_io_size || | 400 | offset + len > vol->usable_leb_size || |
401 | len % ubi->min_io_size) | 401 | offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1)) |
402 | return -EINVAL; | 402 | return -EINVAL; |
403 | 403 | ||
404 | if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && | 404 | if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && |
@@ -438,7 +438,7 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, | |||
438 | struct ubi_device *ubi = vol->ubi; | 438 | struct ubi_device *ubi = vol->ubi; |
439 | int vol_id = vol->vol_id; | 439 | int vol_id = vol->vol_id; |
440 | 440 | ||
441 | dbg_msg("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum); | 441 | dbg_gen("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum); |
442 | 442 | ||
443 | if (vol_id < 0 || vol_id >= ubi->vtbl_slots) | 443 | if (vol_id < 0 || vol_id >= ubi->vtbl_slots) |
444 | return -EINVAL; | 444 | return -EINVAL; |
@@ -447,7 +447,7 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, | |||
447 | return -EROFS; | 447 | return -EROFS; |
448 | 448 | ||
449 | if (lnum < 0 || lnum >= vol->reserved_pebs || len < 0 || | 449 | if (lnum < 0 || lnum >= vol->reserved_pebs || len < 0 || |
450 | len > vol->usable_leb_size || len % ubi->min_io_size) | 450 | len > vol->usable_leb_size || len & (ubi->min_io_size - 1)) |
451 | return -EINVAL; | 451 | return -EINVAL; |
452 | 452 | ||
453 | if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && | 453 | if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && |
@@ -482,7 +482,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum) | |||
482 | struct ubi_device *ubi = vol->ubi; | 482 | struct ubi_device *ubi = vol->ubi; |
483 | int err; | 483 | int err; |
484 | 484 | ||
485 | dbg_msg("erase LEB %d:%d", vol->vol_id, lnum); | 485 | dbg_gen("erase LEB %d:%d", vol->vol_id, lnum); |
486 | 486 | ||
487 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) | 487 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) |
488 | return -EROFS; | 488 | return -EROFS; |
@@ -542,7 +542,7 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum) | |||
542 | struct ubi_volume *vol = desc->vol; | 542 | struct ubi_volume *vol = desc->vol; |
543 | struct ubi_device *ubi = vol->ubi; | 543 | struct ubi_device *ubi = vol->ubi; |
544 | 544 | ||
545 | dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum); | 545 | dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum); |
546 | 546 | ||
547 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) | 547 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) |
548 | return -EROFS; | 548 | return -EROFS; |
@@ -579,7 +579,7 @@ int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype) | |||
579 | struct ubi_volume *vol = desc->vol; | 579 | struct ubi_volume *vol = desc->vol; |
580 | struct ubi_device *ubi = vol->ubi; | 580 | struct ubi_device *ubi = vol->ubi; |
581 | 581 | ||
582 | dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum); | 582 | dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum); |
583 | 583 | ||
584 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) | 584 | if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) |
585 | return -EROFS; | 585 | return -EROFS; |
@@ -621,7 +621,7 @@ int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum) | |||
621 | { | 621 | { |
622 | struct ubi_volume *vol = desc->vol; | 622 | struct ubi_volume *vol = desc->vol; |
623 | 623 | ||
624 | dbg_msg("test LEB %d:%d", vol->vol_id, lnum); | 624 | dbg_gen("test LEB %d:%d", vol->vol_id, lnum); |
625 | 625 | ||
626 | if (lnum < 0 || lnum >= vol->reserved_pebs) | 626 | if (lnum < 0 || lnum >= vol->reserved_pebs) |
627 | return -EINVAL; | 627 | return -EINVAL; |
@@ -632,3 +632,27 @@ int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum) | |||
632 | return vol->eba_tbl[lnum] >= 0; | 632 | return vol->eba_tbl[lnum] >= 0; |
633 | } | 633 | } |
634 | EXPORT_SYMBOL_GPL(ubi_is_mapped); | 634 | EXPORT_SYMBOL_GPL(ubi_is_mapped); |
635 | |||
636 | /** | ||
637 | * ubi_sync - synchronize UBI device buffers. | ||
638 | * @ubi_num: UBI device to synchronize | ||
639 | * | ||
640 | * The underlying MTD device may cache data in hardware or in software. This | ||
641 | * function ensures the caches are flushed. Returns zero in case of success and | ||
642 | * a negative error code in case of failure. | ||
643 | */ | ||
644 | int ubi_sync(int ubi_num) | ||
645 | { | ||
646 | struct ubi_device *ubi; | ||
647 | |||
648 | ubi = ubi_get_device(ubi_num); | ||
649 | if (!ubi) | ||
650 | return -ENODEV; | ||
651 | |||
652 | if (ubi->mtd->sync) | ||
653 | ubi->mtd->sync(ubi->mtd); | ||
654 | |||
655 | ubi_put_device(ubi); | ||
656 | return 0; | ||
657 | } | ||
658 | EXPORT_SYMBOL_GPL(ubi_sync); | ||
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c index 93e052812012..22ad31402945 100644 --- a/drivers/mtd/ubi/misc.c +++ b/drivers/mtd/ubi/misc.c | |||
@@ -37,7 +37,7 @@ int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, | |||
37 | { | 37 | { |
38 | int i; | 38 | int i; |
39 | 39 | ||
40 | ubi_assert(length % ubi->min_io_size == 0); | 40 | ubi_assert(!(length & (ubi->min_io_size - 1))); |
41 | 41 | ||
42 | for (i = length - 1; i >= 0; i--) | 42 | for (i = length - 1; i >= 0; i--) |
43 | if (((const uint8_t *)buf)[i] != 0xFF) | 43 | if (((const uint8_t *)buf)[i] != 0xFF) |
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index 96d410e106ab..967bb4406df9 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c | |||
@@ -19,9 +19,9 @@ | |||
19 | */ | 19 | */ |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * UBI scanning unit. | 22 | * UBI scanning sub-system. |
23 | * | 23 | * |
24 | * This unit is responsible for scanning the flash media, checking UBI | 24 | * This sub-system is responsible for scanning the flash media, checking UBI |
25 | * headers and providing complete information about the UBI flash image. | 25 | * headers and providing complete information about the UBI flash image. |
26 | * | 26 | * |
27 | * The scanning information is represented by a &struct ubi_scan_info' object. | 27 | * The scanning information is represented by a &struct ubi_scan_info' object. |
@@ -93,8 +93,7 @@ static int add_to_list(struct ubi_scan_info *si, int pnum, int ec, | |||
93 | } | 93 | } |
94 | 94 | ||
95 | /** | 95 | /** |
96 | * validate_vid_hdr - check that volume identifier header is correct and | 96 | * validate_vid_hdr - check volume identifier header. |
97 | * consistent. | ||
98 | * @vid_hdr: the volume identifier header to check | 97 | * @vid_hdr: the volume identifier header to check |
99 | * @sv: information about the volume this logical eraseblock belongs to | 98 | * @sv: information about the volume this logical eraseblock belongs to |
100 | * @pnum: physical eraseblock number the VID header came from | 99 | * @pnum: physical eraseblock number the VID header came from |
@@ -103,7 +102,7 @@ static int add_to_list(struct ubi_scan_info *si, int pnum, int ec, | |||
103 | * non-zero if an inconsistency was found and zero if not. | 102 | * non-zero if an inconsistency was found and zero if not. |
104 | * | 103 | * |
105 | * Note, UBI does sanity check of everything it reads from the flash media. | 104 | * Note, UBI does sanity check of everything it reads from the flash media. |
106 | * Most of the checks are done in the I/O unit. Here we check that the | 105 | * Most of the checks are done in the I/O sub-system. Here we check that the |
107 | * information in the VID header is consistent to the information in other VID | 106 | * information in the VID header is consistent to the information in other VID |
108 | * headers of the same volume. | 107 | * headers of the same volume. |
109 | */ | 108 | */ |
@@ -247,45 +246,21 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb, | |||
247 | struct ubi_vid_hdr *vh = NULL; | 246 | struct ubi_vid_hdr *vh = NULL; |
248 | unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum); | 247 | unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum); |
249 | 248 | ||
250 | if (seb->sqnum == 0 && sqnum2 == 0) { | 249 | if (sqnum2 == seb->sqnum) { |
251 | long long abs, v1 = seb->leb_ver, v2 = be32_to_cpu(vid_hdr->leb_ver); | ||
252 | |||
253 | /* | 250 | /* |
254 | * UBI constantly increases the logical eraseblock version | 251 | * This must be a really ancient UBI image which has been |
255 | * number and it can overflow. Thus, we have to bear in mind | 252 | * created before sequence numbers support has been added. At |
256 | * that versions that are close to %0xFFFFFFFF are less then | 253 | * that times we used 32-bit LEB versions stored in logical |
257 | * versions that are close to %0. | 254 | * eraseblocks. That was before UBI got into mainline. We do not |
258 | * | 255 | * support these images anymore. Well, those images will work |
259 | * The UBI WL unit guarantees that the number of pending tasks | 256 | * still work, but only if no unclean reboots happened. |
260 | * is not greater then %0x7FFFFFFF. So, if the difference | ||
261 | * between any two versions is greater or equivalent to | ||
262 | * %0x7FFFFFFF, there was an overflow and the logical | ||
263 | * eraseblock with lower version is actually newer then the one | ||
264 | * with higher version. | ||
265 | * | ||
266 | * FIXME: but this is anyway obsolete and will be removed at | ||
267 | * some point. | ||
268 | */ | 257 | */ |
269 | dbg_bld("using old crappy leb_ver stuff"); | 258 | ubi_err("unsupported on-flash UBI format\n"); |
270 | 259 | return -EINVAL; | |
271 | if (v1 == v2) { | 260 | } |
272 | ubi_err("PEB %d and PEB %d have the same version %lld", | ||
273 | seb->pnum, pnum, v1); | ||
274 | return -EINVAL; | ||
275 | } | ||
276 | |||
277 | abs = v1 - v2; | ||
278 | if (abs < 0) | ||
279 | abs = -abs; | ||
280 | 261 | ||
281 | if (abs < 0x7FFFFFFF) | 262 | /* Obviously the LEB with lower sequence counter is older */ |
282 | /* Non-overflow situation */ | 263 | second_is_newer = !!(sqnum2 > seb->sqnum); |
283 | second_is_newer = (v2 > v1); | ||
284 | else | ||
285 | second_is_newer = (v2 < v1); | ||
286 | } else | ||
287 | /* Obviously the LEB with lower sequence counter is older */ | ||
288 | second_is_newer = sqnum2 > seb->sqnum; | ||
289 | 264 | ||
290 | /* | 265 | /* |
291 | * Now we know which copy is newer. If the copy flag of the PEB with | 266 | * Now we know which copy is newer. If the copy flag of the PEB with |
@@ -293,7 +268,7 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb, | |||
293 | * check data CRC. For the second PEB we already have the VID header, | 268 | * check data CRC. For the second PEB we already have the VID header, |
294 | * for the first one - we'll need to re-read it from flash. | 269 | * for the first one - we'll need to re-read it from flash. |
295 | * | 270 | * |
296 | * FIXME: this may be optimized so that we wouldn't read twice. | 271 | * Note: this may be optimized so that we wouldn't read twice. |
297 | */ | 272 | */ |
298 | 273 | ||
299 | if (second_is_newer) { | 274 | if (second_is_newer) { |
@@ -379,8 +354,7 @@ out_free_vidh: | |||
379 | } | 354 | } |
380 | 355 | ||
381 | /** | 356 | /** |
382 | * ubi_scan_add_used - add information about a physical eraseblock to the | 357 | * ubi_scan_add_used - add physical eraseblock to the scanning information. |
383 | * scanning information. | ||
384 | * @ubi: UBI device description object | 358 | * @ubi: UBI device description object |
385 | * @si: scanning information | 359 | * @si: scanning information |
386 | * @pnum: the physical eraseblock number | 360 | * @pnum: the physical eraseblock number |
@@ -400,7 +374,6 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si, | |||
400 | int bitflips) | 374 | int bitflips) |
401 | { | 375 | { |
402 | int err, vol_id, lnum; | 376 | int err, vol_id, lnum; |
403 | uint32_t leb_ver; | ||
404 | unsigned long long sqnum; | 377 | unsigned long long sqnum; |
405 | struct ubi_scan_volume *sv; | 378 | struct ubi_scan_volume *sv; |
406 | struct ubi_scan_leb *seb; | 379 | struct ubi_scan_leb *seb; |
@@ -409,10 +382,9 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si, | |||
409 | vol_id = be32_to_cpu(vid_hdr->vol_id); | 382 | vol_id = be32_to_cpu(vid_hdr->vol_id); |
410 | lnum = be32_to_cpu(vid_hdr->lnum); | 383 | lnum = be32_to_cpu(vid_hdr->lnum); |
411 | sqnum = be64_to_cpu(vid_hdr->sqnum); | 384 | sqnum = be64_to_cpu(vid_hdr->sqnum); |
412 | leb_ver = be32_to_cpu(vid_hdr->leb_ver); | ||
413 | 385 | ||
414 | dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, ver %u, bitflips %d", | 386 | dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d", |
415 | pnum, vol_id, lnum, ec, sqnum, leb_ver, bitflips); | 387 | pnum, vol_id, lnum, ec, sqnum, bitflips); |
416 | 388 | ||
417 | sv = add_volume(si, vol_id, pnum, vid_hdr); | 389 | sv = add_volume(si, vol_id, pnum, vid_hdr); |
418 | if (IS_ERR(sv) < 0) | 390 | if (IS_ERR(sv) < 0) |
@@ -445,25 +417,20 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si, | |||
445 | */ | 417 | */ |
446 | 418 | ||
447 | dbg_bld("this LEB already exists: PEB %d, sqnum %llu, " | 419 | dbg_bld("this LEB already exists: PEB %d, sqnum %llu, " |
448 | "LEB ver %u, EC %d", seb->pnum, seb->sqnum, | 420 | "EC %d", seb->pnum, seb->sqnum, seb->ec); |
449 | seb->leb_ver, seb->ec); | ||
450 | |||
451 | /* | ||
452 | * Make sure that the logical eraseblocks have different | ||
453 | * versions. Otherwise the image is bad. | ||
454 | */ | ||
455 | if (seb->leb_ver == leb_ver && leb_ver != 0) { | ||
456 | ubi_err("two LEBs with same version %u", leb_ver); | ||
457 | ubi_dbg_dump_seb(seb, 0); | ||
458 | ubi_dbg_dump_vid_hdr(vid_hdr); | ||
459 | return -EINVAL; | ||
460 | } | ||
461 | 421 | ||
462 | /* | 422 | /* |
463 | * Make sure that the logical eraseblocks have different | 423 | * Make sure that the logical eraseblocks have different |
464 | * sequence numbers. Otherwise the image is bad. | 424 | * sequence numbers. Otherwise the image is bad. |
465 | * | 425 | * |
466 | * FIXME: remove 'sqnum != 0' check when leb_ver is removed. | 426 | * However, if the sequence number is zero, we assume it must |
427 | * be an ancient UBI image from the era when UBI did not have | ||
428 | * sequence numbers. We still can attach these images, unless | ||
429 | * there is a need to distinguish between old and new | ||
430 | * eraseblocks, in which case we'll refuse the image in | ||
431 | * 'compare_lebs()'. In other words, we attach old clean | ||
432 | * images, but refuse attaching old images with duplicated | ||
433 | * logical eraseblocks because there was an unclean reboot. | ||
467 | */ | 434 | */ |
468 | if (seb->sqnum == sqnum && sqnum != 0) { | 435 | if (seb->sqnum == sqnum && sqnum != 0) { |
469 | ubi_err("two LEBs with same sequence number %llu", | 436 | ubi_err("two LEBs with same sequence number %llu", |
@@ -503,7 +470,6 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si, | |||
503 | seb->pnum = pnum; | 470 | seb->pnum = pnum; |
504 | seb->scrub = ((cmp_res & 2) || bitflips); | 471 | seb->scrub = ((cmp_res & 2) || bitflips); |
505 | seb->sqnum = sqnum; | 472 | seb->sqnum = sqnum; |
506 | seb->leb_ver = leb_ver; | ||
507 | 473 | ||
508 | if (sv->highest_lnum == lnum) | 474 | if (sv->highest_lnum == lnum) |
509 | sv->last_data_size = | 475 | sv->last_data_size = |
@@ -540,7 +506,6 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si, | |||
540 | seb->lnum = lnum; | 506 | seb->lnum = lnum; |
541 | seb->sqnum = sqnum; | 507 | seb->sqnum = sqnum; |
542 | seb->scrub = bitflips; | 508 | seb->scrub = bitflips; |
543 | seb->leb_ver = leb_ver; | ||
544 | 509 | ||
545 | if (sv->highest_lnum <= lnum) { | 510 | if (sv->highest_lnum <= lnum) { |
546 | sv->highest_lnum = lnum; | 511 | sv->highest_lnum = lnum; |
@@ -554,8 +519,7 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si, | |||
554 | } | 519 | } |
555 | 520 | ||
556 | /** | 521 | /** |
557 | * ubi_scan_find_sv - find information about a particular volume in the | 522 | * ubi_scan_find_sv - find volume in the scanning information. |
558 | * scanning information. | ||
559 | * @si: scanning information | 523 | * @si: scanning information |
560 | * @vol_id: the requested volume ID | 524 | * @vol_id: the requested volume ID |
561 | * | 525 | * |
@@ -584,8 +548,7 @@ struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si, | |||
584 | } | 548 | } |
585 | 549 | ||
586 | /** | 550 | /** |
587 | * ubi_scan_find_seb - find information about a particular logical | 551 | * ubi_scan_find_seb - find LEB in the volume scanning information. |
588 | * eraseblock in the volume scanning information. | ||
589 | * @sv: a pointer to the volume scanning information | 552 | * @sv: a pointer to the volume scanning information |
590 | * @lnum: the requested logical eraseblock | 553 | * @lnum: the requested logical eraseblock |
591 | * | 554 | * |
@@ -645,9 +608,9 @@ void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv) | |||
645 | * | 608 | * |
646 | * This function erases physical eraseblock 'pnum', and writes the erase | 609 | * This function erases physical eraseblock 'pnum', and writes the erase |
647 | * counter header to it. This function should only be used on UBI device | 610 | * counter header to it. This function should only be used on UBI device |
648 | * initialization stages, when the EBA unit had not been yet initialized. This | 611 | * initialization stages, when the EBA sub-system had not been yet initialized. |
649 | * function returns zero in case of success and a negative error code in case | 612 | * This function returns zero in case of success and a negative error code in |
650 | * of failure. | 613 | * case of failure. |
651 | */ | 614 | */ |
652 | int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si, | 615 | int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si, |
653 | int pnum, int ec) | 616 | int pnum, int ec) |
@@ -687,9 +650,10 @@ out_free: | |||
687 | * @si: scanning information | 650 | * @si: scanning information |
688 | * | 651 | * |
689 | * This function returns a free physical eraseblock. It is supposed to be | 652 | * This function returns a free physical eraseblock. It is supposed to be |
690 | * called on the UBI initialization stages when the wear-leveling unit is not | 653 | * called on the UBI initialization stages when the wear-leveling sub-system is |
691 | * initialized yet. This function picks a physical eraseblocks from one of the | 654 | * not initialized yet. This function picks a physical eraseblocks from one of |
692 | * lists, writes the EC header if it is needed, and removes it from the list. | 655 | * the lists, writes the EC header if it is needed, and removes it from the |
656 | * list. | ||
693 | * | 657 | * |
694 | * This function returns scanning physical eraseblock information in case of | 658 | * This function returns scanning physical eraseblock information in case of |
695 | * success and an error code in case of failure. | 659 | * success and an error code in case of failure. |
@@ -742,8 +706,7 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi, | |||
742 | } | 706 | } |
743 | 707 | ||
744 | /** | 708 | /** |
745 | * process_eb - read UBI headers, check them and add corresponding data | 709 | * process_eb - read, check UBI headers, and add them to scanning information. |
746 | * to the scanning information. | ||
747 | * @ubi: UBI device description object | 710 | * @ubi: UBI device description object |
748 | * @si: scanning information | 711 | * @si: scanning information |
749 | * @pnum: the physical eraseblock number | 712 | * @pnum: the physical eraseblock number |
@@ -751,7 +714,8 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi, | |||
751 | * This function returns a zero if the physical eraseblock was successfully | 714 | * This function returns a zero if the physical eraseblock was successfully |
752 | * handled and a negative error code in case of failure. | 715 | * handled and a negative error code in case of failure. |
753 | */ | 716 | */ |
754 | static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum) | 717 | static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, |
718 | int pnum) | ||
755 | { | 719 | { |
756 | long long uninitialized_var(ec); | 720 | long long uninitialized_var(ec); |
757 | int err, bitflips = 0, vol_id, ec_corr = 0; | 721 | int err, bitflips = 0, vol_id, ec_corr = 0; |
@@ -764,8 +728,9 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum | |||
764 | return err; | 728 | return err; |
765 | else if (err) { | 729 | else if (err) { |
766 | /* | 730 | /* |
767 | * FIXME: this is actually duty of the I/O unit to initialize | 731 | * FIXME: this is actually duty of the I/O sub-system to |
768 | * this, but MTD does not provide enough information. | 732 | * initialize this, but MTD does not provide enough |
733 | * information. | ||
769 | */ | 734 | */ |
770 | si->bad_peb_count += 1; | 735 | si->bad_peb_count += 1; |
771 | return 0; | 736 | return 0; |
@@ -930,7 +895,7 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi) | |||
930 | for (pnum = 0; pnum < ubi->peb_count; pnum++) { | 895 | for (pnum = 0; pnum < ubi->peb_count; pnum++) { |
931 | cond_resched(); | 896 | cond_resched(); |
932 | 897 | ||
933 | dbg_msg("process PEB %d", pnum); | 898 | dbg_gen("process PEB %d", pnum); |
934 | err = process_eb(ubi, si, pnum); | 899 | err = process_eb(ubi, si, pnum); |
935 | if (err < 0) | 900 | if (err < 0) |
936 | goto out_vidh; | 901 | goto out_vidh; |
@@ -1079,8 +1044,7 @@ void ubi_scan_destroy_si(struct ubi_scan_info *si) | |||
1079 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID | 1044 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID |
1080 | 1045 | ||
1081 | /** | 1046 | /** |
1082 | * paranoid_check_si - check if the scanning information is correct and | 1047 | * paranoid_check_si - check the scanning information. |
1083 | * consistent. | ||
1084 | * @ubi: UBI device description object | 1048 | * @ubi: UBI device description object |
1085 | * @si: scanning information | 1049 | * @si: scanning information |
1086 | * | 1050 | * |
@@ -1265,11 +1229,6 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si) | |||
1265 | ubi_err("bad data_pad %d", sv->data_pad); | 1229 | ubi_err("bad data_pad %d", sv->data_pad); |
1266 | goto bad_vid_hdr; | 1230 | goto bad_vid_hdr; |
1267 | } | 1231 | } |
1268 | |||
1269 | if (seb->leb_ver != be32_to_cpu(vidh->leb_ver)) { | ||
1270 | ubi_err("bad leb_ver %u", seb->leb_ver); | ||
1271 | goto bad_vid_hdr; | ||
1272 | } | ||
1273 | } | 1232 | } |
1274 | 1233 | ||
1275 | if (!last_seb) | 1234 | if (!last_seb) |
@@ -1299,8 +1258,7 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si) | |||
1299 | if (err < 0) { | 1258 | if (err < 0) { |
1300 | kfree(buf); | 1259 | kfree(buf); |
1301 | return err; | 1260 | return err; |
1302 | } | 1261 | } else if (err) |
1303 | else if (err) | ||
1304 | buf[pnum] = 1; | 1262 | buf[pnum] = 1; |
1305 | } | 1263 | } |
1306 | 1264 | ||
diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h index 966b9b682a42..61df208e2f20 100644 --- a/drivers/mtd/ubi/scan.h +++ b/drivers/mtd/ubi/scan.h | |||
@@ -34,7 +34,6 @@ | |||
34 | * @u: unions RB-tree or @list links | 34 | * @u: unions RB-tree or @list links |
35 | * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects | 35 | * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects |
36 | * @u.list: link in one of the eraseblock lists | 36 | * @u.list: link in one of the eraseblock lists |
37 | * @leb_ver: logical eraseblock version (obsolete) | ||
38 | * | 37 | * |
39 | * One object of this type is allocated for each physical eraseblock during | 38 | * One object of this type is allocated for each physical eraseblock during |
40 | * scanning. | 39 | * scanning. |
@@ -49,7 +48,6 @@ struct ubi_scan_leb { | |||
49 | struct rb_node rb; | 48 | struct rb_node rb; |
50 | struct list_head list; | 49 | struct list_head list; |
51 | } u; | 50 | } u; |
52 | uint32_t leb_ver; | ||
53 | }; | 51 | }; |
54 | 52 | ||
55 | /** | 53 | /** |
@@ -59,16 +57,16 @@ struct ubi_scan_leb { | |||
59 | * @leb_count: number of logical eraseblocks in this volume | 57 | * @leb_count: number of logical eraseblocks in this volume |
60 | * @vol_type: volume type | 58 | * @vol_type: volume type |
61 | * @used_ebs: number of used logical eraseblocks in this volume (only for | 59 | * @used_ebs: number of used logical eraseblocks in this volume (only for |
62 | * static volumes) | 60 | * static volumes) |
63 | * @last_data_size: amount of data in the last logical eraseblock of this | 61 | * @last_data_size: amount of data in the last logical eraseblock of this |
64 | * volume (always equivalent to the usable logical eraseblock size in case of | 62 | * volume (always equivalent to the usable logical eraseblock |
65 | * dynamic volumes) | 63 | * size in case of dynamic volumes) |
66 | * @data_pad: how many bytes at the end of logical eraseblocks of this volume | 64 | * @data_pad: how many bytes at the end of logical eraseblocks of this volume |
67 | * are not used (due to volume alignment) | 65 | * are not used (due to volume alignment) |
68 | * @compat: compatibility flags of this volume | 66 | * @compat: compatibility flags of this volume |
69 | * @rb: link in the volume RB-tree | 67 | * @rb: link in the volume RB-tree |
70 | * @root: root of the RB-tree containing all the eraseblock belonging to this | 68 | * @root: root of the RB-tree containing all the eraseblock belonging to this |
71 | * volume (&struct ubi_scan_leb objects) | 69 | * volume (&struct ubi_scan_leb objects) |
72 | * | 70 | * |
73 | * One object of this type is allocated for each volume during scanning. | 71 | * One object of this type is allocated for each volume during scanning. |
74 | */ | 72 | */ |
@@ -92,8 +90,8 @@ struct ubi_scan_volume { | |||
92 | * @free: list of free physical eraseblocks | 90 | * @free: list of free physical eraseblocks |
93 | * @erase: list of physical eraseblocks which have to be erased | 91 | * @erase: list of physical eraseblocks which have to be erased |
94 | * @alien: list of physical eraseblocks which should not be used by UBI (e.g., | 92 | * @alien: list of physical eraseblocks which should not be used by UBI (e.g., |
93 | * those belonging to "preserve"-compatible internal volumes) | ||
95 | * @bad_peb_count: count of bad physical eraseblocks | 94 | * @bad_peb_count: count of bad physical eraseblocks |
96 | * those belonging to "preserve"-compatible internal volumes) | ||
97 | * @vols_found: number of volumes found during scanning | 95 | * @vols_found: number of volumes found during scanning |
98 | * @highest_vol_id: highest volume ID | 96 | * @highest_vol_id: highest volume ID |
99 | * @alien_peb_count: count of physical eraseblocks in the @alien list | 97 | * @alien_peb_count: count of physical eraseblocks in the @alien list |
@@ -106,8 +104,8 @@ struct ubi_scan_volume { | |||
106 | * @ec_count: a temporary variable used when calculating @mean_ec | 104 | * @ec_count: a temporary variable used when calculating @mean_ec |
107 | * | 105 | * |
108 | * This data structure contains the result of scanning and may be used by other | 106 | * This data structure contains the result of scanning and may be used by other |
109 | * UBI units to build final UBI data structures, further error-recovery and so | 107 | * UBI sub-systems to build final UBI data structures, further error-recovery |
110 | * on. | 108 | * and so on. |
111 | */ | 109 | */ |
112 | struct ubi_scan_info { | 110 | struct ubi_scan_info { |
113 | struct rb_root volumes; | 111 | struct rb_root volumes; |
@@ -132,8 +130,7 @@ struct ubi_device; | |||
132 | struct ubi_vid_hdr; | 130 | struct ubi_vid_hdr; |
133 | 131 | ||
134 | /* | 132 | /* |
135 | * ubi_scan_move_to_list - move a physical eraseblock from the volume tree to a | 133 | * ubi_scan_move_to_list - move a PEB from the volume tree to a list. |
136 | * list. | ||
137 | * | 134 | * |
138 | * @sv: volume scanning information | 135 | * @sv: volume scanning information |
139 | * @seb: scanning eraseblock infprmation | 136 | * @seb: scanning eraseblock infprmation |
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h index c3185d9fd048..2ad940409053 100644 --- a/drivers/mtd/ubi/ubi-media.h +++ b/drivers/mtd/ubi/ubi-media.h | |||
@@ -98,10 +98,11 @@ enum { | |||
98 | * Compatibility constants used by internal volumes. | 98 | * Compatibility constants used by internal volumes. |
99 | * | 99 | * |
100 | * @UBI_COMPAT_DELETE: delete this internal volume before anything is written | 100 | * @UBI_COMPAT_DELETE: delete this internal volume before anything is written |
101 | * to the flash | 101 | * to the flash |
102 | * @UBI_COMPAT_RO: attach this device in read-only mode | 102 | * @UBI_COMPAT_RO: attach this device in read-only mode |
103 | * @UBI_COMPAT_PRESERVE: preserve this internal volume - do not touch its | 103 | * @UBI_COMPAT_PRESERVE: preserve this internal volume - do not touch its |
104 | * physical eraseblocks, don't allow the wear-leveling unit to move them | 104 | * physical eraseblocks, don't allow the wear-leveling |
105 | * sub-system to move them | ||
105 | * @UBI_COMPAT_REJECT: reject this UBI image | 106 | * @UBI_COMPAT_REJECT: reject this UBI image |
106 | */ | 107 | */ |
107 | enum { | 108 | enum { |
@@ -123,7 +124,7 @@ enum { | |||
123 | * struct ubi_ec_hdr - UBI erase counter header. | 124 | * struct ubi_ec_hdr - UBI erase counter header. |
124 | * @magic: erase counter header magic number (%UBI_EC_HDR_MAGIC) | 125 | * @magic: erase counter header magic number (%UBI_EC_HDR_MAGIC) |
125 | * @version: version of UBI implementation which is supposed to accept this | 126 | * @version: version of UBI implementation which is supposed to accept this |
126 | * UBI image | 127 | * UBI image |
127 | * @padding1: reserved for future, zeroes | 128 | * @padding1: reserved for future, zeroes |
128 | * @ec: the erase counter | 129 | * @ec: the erase counter |
129 | * @vid_hdr_offset: where the VID header starts | 130 | * @vid_hdr_offset: where the VID header starts |
@@ -159,24 +160,23 @@ struct ubi_ec_hdr { | |||
159 | * struct ubi_vid_hdr - on-flash UBI volume identifier header. | 160 | * struct ubi_vid_hdr - on-flash UBI volume identifier header. |
160 | * @magic: volume identifier header magic number (%UBI_VID_HDR_MAGIC) | 161 | * @magic: volume identifier header magic number (%UBI_VID_HDR_MAGIC) |
161 | * @version: UBI implementation version which is supposed to accept this UBI | 162 | * @version: UBI implementation version which is supposed to accept this UBI |
162 | * image (%UBI_VERSION) | 163 | * image (%UBI_VERSION) |
163 | * @vol_type: volume type (%UBI_VID_DYNAMIC or %UBI_VID_STATIC) | 164 | * @vol_type: volume type (%UBI_VID_DYNAMIC or %UBI_VID_STATIC) |
164 | * @copy_flag: if this logical eraseblock was copied from another physical | 165 | * @copy_flag: if this logical eraseblock was copied from another physical |
165 | * eraseblock (for wear-leveling reasons) | 166 | * eraseblock (for wear-leveling reasons) |
166 | * @compat: compatibility of this volume (%0, %UBI_COMPAT_DELETE, | 167 | * @compat: compatibility of this volume (%0, %UBI_COMPAT_DELETE, |
167 | * %UBI_COMPAT_IGNORE, %UBI_COMPAT_PRESERVE, or %UBI_COMPAT_REJECT) | 168 | * %UBI_COMPAT_IGNORE, %UBI_COMPAT_PRESERVE, or %UBI_COMPAT_REJECT) |
168 | * @vol_id: ID of this volume | 169 | * @vol_id: ID of this volume |
169 | * @lnum: logical eraseblock number | 170 | * @lnum: logical eraseblock number |
170 | * @leb_ver: version of this logical eraseblock (IMPORTANT: obsolete, to be | 171 | * @padding1: reserved for future, zeroes |
171 | * removed, kept only for not breaking older UBI users) | ||
172 | * @data_size: how many bytes of data this logical eraseblock contains | 172 | * @data_size: how many bytes of data this logical eraseblock contains |
173 | * @used_ebs: total number of used logical eraseblocks in this volume | 173 | * @used_ebs: total number of used logical eraseblocks in this volume |
174 | * @data_pad: how many bytes at the end of this physical eraseblock are not | 174 | * @data_pad: how many bytes at the end of this physical eraseblock are not |
175 | * used | 175 | * used |
176 | * @data_crc: CRC checksum of the data stored in this logical eraseblock | 176 | * @data_crc: CRC checksum of the data stored in this logical eraseblock |
177 | * @padding1: reserved for future, zeroes | ||
178 | * @sqnum: sequence number | ||
179 | * @padding2: reserved for future, zeroes | 177 | * @padding2: reserved for future, zeroes |
178 | * @sqnum: sequence number | ||
179 | * @padding3: reserved for future, zeroes | ||
180 | * @hdr_crc: volume identifier header CRC checksum | 180 | * @hdr_crc: volume identifier header CRC checksum |
181 | * | 181 | * |
182 | * The @sqnum is the value of the global sequence counter at the time when this | 182 | * The @sqnum is the value of the global sequence counter at the time when this |
@@ -224,10 +224,6 @@ struct ubi_ec_hdr { | |||
224 | * checksum is correct, this physical eraseblock is selected (P1). Otherwise | 224 | * checksum is correct, this physical eraseblock is selected (P1). Otherwise |
225 | * the older one (P) is selected. | 225 | * the older one (P) is selected. |
226 | * | 226 | * |
227 | * Note, there is an obsolete @leb_ver field which was used instead of @sqnum | ||
228 | * in the past. But it is not used anymore and we keep it in order to be able | ||
229 | * to deal with old UBI images. It will be removed at some point. | ||
230 | * | ||
231 | * There are 2 sorts of volumes in UBI: user volumes and internal volumes. | 227 | * There are 2 sorts of volumes in UBI: user volumes and internal volumes. |
232 | * Internal volumes are not seen from outside and are used for various internal | 228 | * Internal volumes are not seen from outside and are used for various internal |
233 | * UBI purposes. In this implementation there is only one internal volume - the | 229 | * UBI purposes. In this implementation there is only one internal volume - the |
@@ -248,9 +244,9 @@ struct ubi_ec_hdr { | |||
248 | * The @data_crc field contains the CRC checksum of the contents of the logical | 244 | * The @data_crc field contains the CRC checksum of the contents of the logical |
249 | * eraseblock if this is a static volume. In case of dynamic volumes, it does | 245 | * eraseblock if this is a static volume. In case of dynamic volumes, it does |
250 | * not contain the CRC checksum as a rule. The only exception is when the | 246 | * not contain the CRC checksum as a rule. The only exception is when the |
251 | * data of the physical eraseblock was moved by the wear-leveling unit, then | 247 | * data of the physical eraseblock was moved by the wear-leveling sub-system, |
252 | * the wear-leveling unit calculates the data CRC and stores it in the | 248 | * then the wear-leveling sub-system calculates the data CRC and stores it in |
253 | * @data_crc field. And of course, the @copy_flag is %in this case. | 249 | * the @data_crc field. And of course, the @copy_flag is %in this case. |
254 | * | 250 | * |
255 | * The @data_size field is used only for static volumes because UBI has to know | 251 | * The @data_size field is used only for static volumes because UBI has to know |
256 | * how many bytes of data are stored in this eraseblock. For dynamic volumes, | 252 | * how many bytes of data are stored in this eraseblock. For dynamic volumes, |
@@ -277,14 +273,14 @@ struct ubi_vid_hdr { | |||
277 | __u8 compat; | 273 | __u8 compat; |
278 | __be32 vol_id; | 274 | __be32 vol_id; |
279 | __be32 lnum; | 275 | __be32 lnum; |
280 | __be32 leb_ver; /* obsolete, to be removed, don't use */ | 276 | __u8 padding1[4]; |
281 | __be32 data_size; | 277 | __be32 data_size; |
282 | __be32 used_ebs; | 278 | __be32 used_ebs; |
283 | __be32 data_pad; | 279 | __be32 data_pad; |
284 | __be32 data_crc; | 280 | __be32 data_crc; |
285 | __u8 padding1[4]; | 281 | __u8 padding2[4]; |
286 | __be64 sqnum; | 282 | __be64 sqnum; |
287 | __u8 padding2[12]; | 283 | __u8 padding3[12]; |
288 | __be32 hdr_crc; | 284 | __be32 hdr_crc; |
289 | } __attribute__ ((packed)); | 285 | } __attribute__ ((packed)); |
290 | 286 | ||
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 67dcbd11c15c..1c3fa18c26a7 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h | |||
@@ -74,15 +74,15 @@ | |||
74 | #define UBI_IO_RETRIES 3 | 74 | #define UBI_IO_RETRIES 3 |
75 | 75 | ||
76 | /* | 76 | /* |
77 | * Error codes returned by the I/O unit. | 77 | * Error codes returned by the I/O sub-system. |
78 | * | 78 | * |
79 | * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only | 79 | * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only |
80 | * 0xFF bytes | 80 | * %0xFF bytes |
81 | * UBI_IO_PEB_FREE: the physical eraseblock is free, i.e. it contains only a | 81 | * UBI_IO_PEB_FREE: the physical eraseblock is free, i.e. it contains only a |
82 | * valid erase counter header, and the rest are %0xFF bytes | 82 | * valid erase counter header, and the rest are %0xFF bytes |
83 | * UBI_IO_BAD_EC_HDR: the erase counter header is corrupted (bad magic or CRC) | 83 | * UBI_IO_BAD_EC_HDR: the erase counter header is corrupted (bad magic or CRC) |
84 | * UBI_IO_BAD_VID_HDR: the volume identifier header is corrupted (bad magic or | 84 | * UBI_IO_BAD_VID_HDR: the volume identifier header is corrupted (bad magic or |
85 | * CRC) | 85 | * CRC) |
86 | * UBI_IO_BITFLIPS: bit-flips were detected and corrected | 86 | * UBI_IO_BITFLIPS: bit-flips were detected and corrected |
87 | */ | 87 | */ |
88 | enum { | 88 | enum { |
@@ -99,9 +99,9 @@ enum { | |||
99 | * @ec: erase counter | 99 | * @ec: erase counter |
100 | * @pnum: physical eraseblock number | 100 | * @pnum: physical eraseblock number |
101 | * | 101 | * |
102 | * This data structure is used in the WL unit. Each physical eraseblock has a | 102 | * This data structure is used in the WL sub-system. Each physical eraseblock |
103 | * corresponding &struct wl_entry object which may be kept in different | 103 | * has a corresponding &struct wl_entry object which may be kept in different |
104 | * RB-trees. See WL unit for details. | 104 | * RB-trees. See WL sub-system for details. |
105 | */ | 105 | */ |
106 | struct ubi_wl_entry { | 106 | struct ubi_wl_entry { |
107 | struct rb_node rb; | 107 | struct rb_node rb; |
@@ -118,10 +118,10 @@ struct ubi_wl_entry { | |||
118 | * @mutex: read/write mutex to implement read/write access serialization to | 118 | * @mutex: read/write mutex to implement read/write access serialization to |
119 | * the (@vol_id, @lnum) logical eraseblock | 119 | * the (@vol_id, @lnum) logical eraseblock |
120 | * | 120 | * |
121 | * This data structure is used in the EBA unit to implement per-LEB locking. | 121 | * This data structure is used in the EBA sub-system to implement per-LEB |
122 | * When a logical eraseblock is being locked - corresponding | 122 | * locking. When a logical eraseblock is being locked - corresponding |
123 | * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree). | 123 | * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree). |
124 | * See EBA unit for details. | 124 | * See EBA sub-system for details. |
125 | */ | 125 | */ |
126 | struct ubi_ltree_entry { | 126 | struct ubi_ltree_entry { |
127 | struct rb_node rb; | 127 | struct rb_node rb; |
@@ -131,6 +131,27 @@ struct ubi_ltree_entry { | |||
131 | struct rw_semaphore mutex; | 131 | struct rw_semaphore mutex; |
132 | }; | 132 | }; |
133 | 133 | ||
134 | /** | ||
135 | * struct ubi_rename_entry - volume re-name description data structure. | ||
136 | * @new_name_len: new volume name length | ||
137 | * @new_name: new volume name | ||
138 | * @remove: if not zero, this volume should be removed, not re-named | ||
139 | * @desc: descriptor of the volume | ||
140 | * @list: links re-name entries into a list | ||
141 | * | ||
142 | * This data structure is utilized in the multiple volume re-name code. Namely, | ||
143 | * UBI first creates a list of &struct ubi_rename_entry objects from the | ||
144 | * &struct ubi_rnvol_req request object, and then utilizes this list to do all | ||
145 | * the job. | ||
146 | */ | ||
147 | struct ubi_rename_entry { | ||
148 | int new_name_len; | ||
149 | char new_name[UBI_VOL_NAME_MAX + 1]; | ||
150 | int remove; | ||
151 | struct ubi_volume_desc *desc; | ||
152 | struct list_head list; | ||
153 | }; | ||
154 | |||
134 | struct ubi_volume_desc; | 155 | struct ubi_volume_desc; |
135 | 156 | ||
136 | /** | 157 | /** |
@@ -206,7 +227,7 @@ struct ubi_volume { | |||
206 | int alignment; | 227 | int alignment; |
207 | int data_pad; | 228 | int data_pad; |
208 | int name_len; | 229 | int name_len; |
209 | char name[UBI_VOL_NAME_MAX+1]; | 230 | char name[UBI_VOL_NAME_MAX + 1]; |
210 | 231 | ||
211 | int upd_ebs; | 232 | int upd_ebs; |
212 | int ch_lnum; | 233 | int ch_lnum; |
@@ -225,7 +246,7 @@ struct ubi_volume { | |||
225 | #ifdef CONFIG_MTD_UBI_GLUEBI | 246 | #ifdef CONFIG_MTD_UBI_GLUEBI |
226 | /* | 247 | /* |
227 | * Gluebi-related stuff may be compiled out. | 248 | * Gluebi-related stuff may be compiled out. |
228 | * TODO: this should not be built into UBI but should be a separate | 249 | * Note: this should not be built into UBI but should be a separate |
229 | * ubimtd driver which works on top of UBI and emulates MTD devices. | 250 | * ubimtd driver which works on top of UBI and emulates MTD devices. |
230 | */ | 251 | */ |
231 | struct ubi_volume_desc *gluebi_desc; | 252 | struct ubi_volume_desc *gluebi_desc; |
@@ -235,8 +256,7 @@ struct ubi_volume { | |||
235 | }; | 256 | }; |
236 | 257 | ||
237 | /** | 258 | /** |
238 | * struct ubi_volume_desc - descriptor of the UBI volume returned when it is | 259 | * struct ubi_volume_desc - UBI volume descriptor returned when it is opened. |
239 | * opened. | ||
240 | * @vol: reference to the corresponding volume description object | 260 | * @vol: reference to the corresponding volume description object |
241 | * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, or %UBI_EXCLUSIVE) | 261 | * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, or %UBI_EXCLUSIVE) |
242 | */ | 262 | */ |
@@ -273,7 +293,7 @@ struct ubi_wl_entry; | |||
273 | * @vtbl_size: size of the volume table in bytes | 293 | * @vtbl_size: size of the volume table in bytes |
274 | * @vtbl: in-RAM volume table copy | 294 | * @vtbl: in-RAM volume table copy |
275 | * @volumes_mutex: protects on-flash volume table and serializes volume | 295 | * @volumes_mutex: protects on-flash volume table and serializes volume |
276 | * changes, like creation, deletion, update, resize | 296 | * changes, like creation, deletion, update, re-size and re-name |
277 | * | 297 | * |
278 | * @max_ec: current highest erase counter value | 298 | * @max_ec: current highest erase counter value |
279 | * @mean_ec: current mean erase counter value | 299 | * @mean_ec: current mean erase counter value |
@@ -293,6 +313,7 @@ struct ubi_wl_entry; | |||
293 | * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works | 313 | * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works |
294 | * fields | 314 | * fields |
295 | * @move_mutex: serializes eraseblock moves | 315 | * @move_mutex: serializes eraseblock moves |
316 | * @work_sem: sycnhronizes the WL worker with use tasks | ||
296 | * @wl_scheduled: non-zero if the wear-leveling was scheduled | 317 | * @wl_scheduled: non-zero if the wear-leveling was scheduled |
297 | * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any | 318 | * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any |
298 | * physical eraseblock | 319 | * physical eraseblock |
@@ -316,11 +337,11 @@ struct ubi_wl_entry; | |||
316 | * @ro_mode: if the UBI device is in read-only mode | 337 | * @ro_mode: if the UBI device is in read-only mode |
317 | * @leb_size: logical eraseblock size | 338 | * @leb_size: logical eraseblock size |
318 | * @leb_start: starting offset of logical eraseblocks within physical | 339 | * @leb_start: starting offset of logical eraseblocks within physical |
319 | * eraseblocks | 340 | * eraseblocks |
320 | * @ec_hdr_alsize: size of the EC header aligned to @hdrs_min_io_size | 341 | * @ec_hdr_alsize: size of the EC header aligned to @hdrs_min_io_size |
321 | * @vid_hdr_alsize: size of the VID header aligned to @hdrs_min_io_size | 342 | * @vid_hdr_alsize: size of the VID header aligned to @hdrs_min_io_size |
322 | * @vid_hdr_offset: starting offset of the volume identifier header (might be | 343 | * @vid_hdr_offset: starting offset of the volume identifier header (might be |
323 | * unaligned) | 344 | * unaligned) |
324 | * @vid_hdr_aloffset: starting offset of the VID header aligned to | 345 | * @vid_hdr_aloffset: starting offset of the VID header aligned to |
325 | * @hdrs_min_io_size | 346 | * @hdrs_min_io_size |
326 | * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset | 347 | * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset |
@@ -331,6 +352,8 @@ struct ubi_wl_entry; | |||
331 | * @peb_buf1: a buffer of PEB size used for different purposes | 352 | * @peb_buf1: a buffer of PEB size used for different purposes |
332 | * @peb_buf2: another buffer of PEB size used for different purposes | 353 | * @peb_buf2: another buffer of PEB size used for different purposes |
333 | * @buf_mutex: proptects @peb_buf1 and @peb_buf2 | 354 | * @buf_mutex: proptects @peb_buf1 and @peb_buf2 |
355 | * @ckvol_mutex: serializes static volume checking when opening | ||
356 | * @mult_mutex: serializes operations on multiple volumes, like re-nameing | ||
334 | * @dbg_peb_buf: buffer of PEB size used for debugging | 357 | * @dbg_peb_buf: buffer of PEB size used for debugging |
335 | * @dbg_buf_mutex: proptects @dbg_peb_buf | 358 | * @dbg_buf_mutex: proptects @dbg_peb_buf |
336 | */ | 359 | */ |
@@ -356,16 +379,16 @@ struct ubi_device { | |||
356 | struct mutex volumes_mutex; | 379 | struct mutex volumes_mutex; |
357 | 380 | ||
358 | int max_ec; | 381 | int max_ec; |
359 | /* TODO: mean_ec is not updated run-time, fix */ | 382 | /* Note, mean_ec is not updated run-time - should be fixed */ |
360 | int mean_ec; | 383 | int mean_ec; |
361 | 384 | ||
362 | /* EBA unit's stuff */ | 385 | /* EBA sub-system's stuff */ |
363 | unsigned long long global_sqnum; | 386 | unsigned long long global_sqnum; |
364 | spinlock_t ltree_lock; | 387 | spinlock_t ltree_lock; |
365 | struct rb_root ltree; | 388 | struct rb_root ltree; |
366 | struct mutex alc_mutex; | 389 | struct mutex alc_mutex; |
367 | 390 | ||
368 | /* Wear-leveling unit's stuff */ | 391 | /* Wear-leveling sub-system's stuff */ |
369 | struct rb_root used; | 392 | struct rb_root used; |
370 | struct rb_root free; | 393 | struct rb_root free; |
371 | struct rb_root scrub; | 394 | struct rb_root scrub; |
@@ -388,7 +411,7 @@ struct ubi_device { | |||
388 | int thread_enabled; | 411 | int thread_enabled; |
389 | char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2]; | 412 | char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2]; |
390 | 413 | ||
391 | /* I/O unit's stuff */ | 414 | /* I/O sub-system's stuff */ |
392 | long long flash_size; | 415 | long long flash_size; |
393 | int peb_count; | 416 | int peb_count; |
394 | int peb_size; | 417 | int peb_size; |
@@ -411,6 +434,7 @@ struct ubi_device { | |||
411 | void *peb_buf2; | 434 | void *peb_buf2; |
412 | struct mutex buf_mutex; | 435 | struct mutex buf_mutex; |
413 | struct mutex ckvol_mutex; | 436 | struct mutex ckvol_mutex; |
437 | struct mutex mult_mutex; | ||
414 | #ifdef CONFIG_MTD_UBI_DEBUG | 438 | #ifdef CONFIG_MTD_UBI_DEBUG |
415 | void *dbg_peb_buf; | 439 | void *dbg_peb_buf; |
416 | struct mutex dbg_buf_mutex; | 440 | struct mutex dbg_buf_mutex; |
@@ -427,12 +451,15 @@ extern struct mutex ubi_devices_mutex; | |||
427 | /* vtbl.c */ | 451 | /* vtbl.c */ |
428 | int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, | 452 | int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, |
429 | struct ubi_vtbl_record *vtbl_rec); | 453 | struct ubi_vtbl_record *vtbl_rec); |
454 | int ubi_vtbl_rename_volumes(struct ubi_device *ubi, | ||
455 | struct list_head *rename_list); | ||
430 | int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si); | 456 | int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si); |
431 | 457 | ||
432 | /* vmt.c */ | 458 | /* vmt.c */ |
433 | int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req); | 459 | int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req); |
434 | int ubi_remove_volume(struct ubi_volume_desc *desc); | 460 | int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl); |
435 | int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs); | 461 | int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs); |
462 | int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list); | ||
436 | int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol); | 463 | int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol); |
437 | void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol); | 464 | void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol); |
438 | 465 | ||
@@ -447,7 +474,8 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol, | |||
447 | const void __user *buf, int count); | 474 | const void __user *buf, int count); |
448 | 475 | ||
449 | /* misc.c */ | 476 | /* misc.c */ |
450 | int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length); | 477 | int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, |
478 | int length); | ||
451 | int ubi_check_volume(struct ubi_device *ubi, int vol_id); | 479 | int ubi_check_volume(struct ubi_device *ubi, int vol_id); |
452 | void ubi_calculate_reserved(struct ubi_device *ubi); | 480 | void ubi_calculate_reserved(struct ubi_device *ubi); |
453 | 481 | ||
@@ -477,7 +505,6 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, | |||
477 | int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | 505 | int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, |
478 | struct ubi_vid_hdr *vid_hdr); | 506 | struct ubi_vid_hdr *vid_hdr); |
479 | int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); | 507 | int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); |
480 | void ubi_eba_close(const struct ubi_device *ubi); | ||
481 | 508 | ||
482 | /* wl.c */ | 509 | /* wl.c */ |
483 | int ubi_wl_get_peb(struct ubi_device *ubi, int dtype); | 510 | int ubi_wl_get_peb(struct ubi_device *ubi, int dtype); |
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c index ddaa1a56cc69..8b89cc18ff0b 100644 --- a/drivers/mtd/ubi/upd.c +++ b/drivers/mtd/ubi/upd.c | |||
@@ -39,7 +39,7 @@ | |||
39 | */ | 39 | */ |
40 | 40 | ||
41 | #include <linux/err.h> | 41 | #include <linux/err.h> |
42 | #include <asm/uaccess.h> | 42 | #include <linux/uaccess.h> |
43 | #include <asm/div64.h> | 43 | #include <asm/div64.h> |
44 | #include "ubi.h" | 44 | #include "ubi.h" |
45 | 45 | ||
@@ -56,11 +56,11 @@ static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol) | |||
56 | int err; | 56 | int err; |
57 | struct ubi_vtbl_record vtbl_rec; | 57 | struct ubi_vtbl_record vtbl_rec; |
58 | 58 | ||
59 | dbg_msg("set update marker for volume %d", vol->vol_id); | 59 | dbg_gen("set update marker for volume %d", vol->vol_id); |
60 | 60 | ||
61 | if (vol->upd_marker) { | 61 | if (vol->upd_marker) { |
62 | ubi_assert(ubi->vtbl[vol->vol_id].upd_marker); | 62 | ubi_assert(ubi->vtbl[vol->vol_id].upd_marker); |
63 | dbg_msg("already set"); | 63 | dbg_gen("already set"); |
64 | return 0; | 64 | return 0; |
65 | } | 65 | } |
66 | 66 | ||
@@ -92,7 +92,7 @@ static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol, | |||
92 | uint64_t tmp; | 92 | uint64_t tmp; |
93 | struct ubi_vtbl_record vtbl_rec; | 93 | struct ubi_vtbl_record vtbl_rec; |
94 | 94 | ||
95 | dbg_msg("clear update marker for volume %d", vol->vol_id); | 95 | dbg_gen("clear update marker for volume %d", vol->vol_id); |
96 | 96 | ||
97 | memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id], | 97 | memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id], |
98 | sizeof(struct ubi_vtbl_record)); | 98 | sizeof(struct ubi_vtbl_record)); |
@@ -133,7 +133,7 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, | |||
133 | int i, err; | 133 | int i, err; |
134 | uint64_t tmp; | 134 | uint64_t tmp; |
135 | 135 | ||
136 | dbg_msg("start update of volume %d, %llu bytes", vol->vol_id, bytes); | 136 | dbg_gen("start update of volume %d, %llu bytes", vol->vol_id, bytes); |
137 | ubi_assert(!vol->updating && !vol->changing_leb); | 137 | ubi_assert(!vol->updating && !vol->changing_leb); |
138 | vol->updating = 1; | 138 | vol->updating = 1; |
139 | 139 | ||
@@ -183,7 +183,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, | |||
183 | { | 183 | { |
184 | ubi_assert(!vol->updating && !vol->changing_leb); | 184 | ubi_assert(!vol->updating && !vol->changing_leb); |
185 | 185 | ||
186 | dbg_msg("start changing LEB %d:%d, %u bytes", | 186 | dbg_gen("start changing LEB %d:%d, %u bytes", |
187 | vol->vol_id, req->lnum, req->bytes); | 187 | vol->vol_id, req->lnum, req->bytes); |
188 | if (req->bytes == 0) | 188 | if (req->bytes == 0) |
189 | return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0, | 189 | return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0, |
@@ -237,16 +237,17 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, | |||
237 | int err; | 237 | int err; |
238 | 238 | ||
239 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) { | 239 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) { |
240 | len = ALIGN(len, ubi->min_io_size); | 240 | int l = ALIGN(len, ubi->min_io_size); |
241 | memset(buf + len, 0xFF, len - len); | ||
242 | 241 | ||
243 | len = ubi_calc_data_len(ubi, buf, len); | 242 | memset(buf + len, 0xFF, l - len); |
243 | len = ubi_calc_data_len(ubi, buf, l); | ||
244 | if (len == 0) { | 244 | if (len == 0) { |
245 | dbg_msg("all %d bytes contain 0xFF - skip", len); | 245 | dbg_gen("all %d bytes contain 0xFF - skip", len); |
246 | return 0; | 246 | return 0; |
247 | } | 247 | } |
248 | 248 | ||
249 | err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, UBI_UNKNOWN); | 249 | err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, |
250 | UBI_UNKNOWN); | ||
250 | } else { | 251 | } else { |
251 | /* | 252 | /* |
252 | * When writing static volume, and this is the last logical | 253 | * When writing static volume, and this is the last logical |
@@ -267,6 +268,7 @@ static int write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, | |||
267 | 268 | ||
268 | /** | 269 | /** |
269 | * ubi_more_update_data - write more update data. | 270 | * ubi_more_update_data - write more update data. |
271 | * @ubi: UBI device description object | ||
270 | * @vol: volume description object | 272 | * @vol: volume description object |
271 | * @buf: write data (user-space memory buffer) | 273 | * @buf: write data (user-space memory buffer) |
272 | * @count: how much bytes to write | 274 | * @count: how much bytes to write |
@@ -283,7 +285,7 @@ int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol, | |||
283 | uint64_t tmp; | 285 | uint64_t tmp; |
284 | int lnum, offs, err = 0, len, to_write = count; | 286 | int lnum, offs, err = 0, len, to_write = count; |
285 | 287 | ||
286 | dbg_msg("write %d of %lld bytes, %lld already passed", | 288 | dbg_gen("write %d of %lld bytes, %lld already passed", |
287 | count, vol->upd_bytes, vol->upd_received); | 289 | count, vol->upd_bytes, vol->upd_received); |
288 | 290 | ||
289 | if (ubi->ro_mode) | 291 | if (ubi->ro_mode) |
@@ -384,6 +386,7 @@ int ubi_more_update_data(struct ubi_device *ubi, struct ubi_volume *vol, | |||
384 | 386 | ||
385 | /** | 387 | /** |
386 | * ubi_more_leb_change_data - accept more data for atomic LEB change. | 388 | * ubi_more_leb_change_data - accept more data for atomic LEB change. |
389 | * @ubi: UBI device description object | ||
387 | * @vol: volume description object | 390 | * @vol: volume description object |
388 | * @buf: write data (user-space memory buffer) | 391 | * @buf: write data (user-space memory buffer) |
389 | * @count: how much bytes to write | 392 | * @count: how much bytes to write |
@@ -400,7 +403,7 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol, | |||
400 | { | 403 | { |
401 | int err; | 404 | int err; |
402 | 405 | ||
403 | dbg_msg("write %d of %lld bytes, %lld already passed", | 406 | dbg_gen("write %d of %lld bytes, %lld already passed", |
404 | count, vol->upd_bytes, vol->upd_received); | 407 | count, vol->upd_bytes, vol->upd_received); |
405 | 408 | ||
406 | if (ubi->ro_mode) | 409 | if (ubi->ro_mode) |
@@ -418,7 +421,8 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol, | |||
418 | if (vol->upd_received == vol->upd_bytes) { | 421 | if (vol->upd_received == vol->upd_bytes) { |
419 | int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size); | 422 | int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size); |
420 | 423 | ||
421 | memset(vol->upd_buf + vol->upd_bytes, 0xFF, len - vol->upd_bytes); | 424 | memset(vol->upd_buf + vol->upd_bytes, 0xFF, |
425 | len - vol->upd_bytes); | ||
422 | len = ubi_calc_data_len(ubi, vol->upd_buf, len); | 426 | len = ubi_calc_data_len(ubi, vol->upd_buf, len); |
423 | err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum, | 427 | err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum, |
424 | vol->upd_buf, len, UBI_UNKNOWN); | 428 | vol->upd_buf, len, UBI_UNKNOWN); |
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 5be58d85c639..3531ca9a1e24 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c | |||
@@ -28,9 +28,9 @@ | |||
28 | #include "ubi.h" | 28 | #include "ubi.h" |
29 | 29 | ||
30 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID | 30 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID |
31 | static void paranoid_check_volumes(struct ubi_device *ubi); | 31 | static int paranoid_check_volumes(struct ubi_device *ubi); |
32 | #else | 32 | #else |
33 | #define paranoid_check_volumes(ubi) | 33 | #define paranoid_check_volumes(ubi) 0 |
34 | #endif | 34 | #endif |
35 | 35 | ||
36 | static ssize_t vol_attribute_show(struct device *dev, | 36 | static ssize_t vol_attribute_show(struct device *dev, |
@@ -127,6 +127,7 @@ static void vol_release(struct device *dev) | |||
127 | { | 127 | { |
128 | struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); | 128 | struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); |
129 | 129 | ||
130 | kfree(vol->eba_tbl); | ||
130 | kfree(vol); | 131 | kfree(vol); |
131 | } | 132 | } |
132 | 133 | ||
@@ -201,7 +202,7 @@ static void volume_sysfs_close(struct ubi_volume *vol) | |||
201 | */ | 202 | */ |
202 | int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) | 203 | int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) |
203 | { | 204 | { |
204 | int i, err, vol_id = req->vol_id, dont_free = 0; | 205 | int i, err, vol_id = req->vol_id, do_free = 1; |
205 | struct ubi_volume *vol; | 206 | struct ubi_volume *vol; |
206 | struct ubi_vtbl_record vtbl_rec; | 207 | struct ubi_vtbl_record vtbl_rec; |
207 | uint64_t bytes; | 208 | uint64_t bytes; |
@@ -217,7 +218,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) | |||
217 | spin_lock(&ubi->volumes_lock); | 218 | spin_lock(&ubi->volumes_lock); |
218 | if (vol_id == UBI_VOL_NUM_AUTO) { | 219 | if (vol_id == UBI_VOL_NUM_AUTO) { |
219 | /* Find unused volume ID */ | 220 | /* Find unused volume ID */ |
220 | dbg_msg("search for vacant volume ID"); | 221 | dbg_gen("search for vacant volume ID"); |
221 | for (i = 0; i < ubi->vtbl_slots; i++) | 222 | for (i = 0; i < ubi->vtbl_slots; i++) |
222 | if (!ubi->volumes[i]) { | 223 | if (!ubi->volumes[i]) { |
223 | vol_id = i; | 224 | vol_id = i; |
@@ -232,7 +233,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) | |||
232 | req->vol_id = vol_id; | 233 | req->vol_id = vol_id; |
233 | } | 234 | } |
234 | 235 | ||
235 | dbg_msg("volume ID %d, %llu bytes, type %d, name %s", | 236 | dbg_gen("volume ID %d, %llu bytes, type %d, name %s", |
236 | vol_id, (unsigned long long)req->bytes, | 237 | vol_id, (unsigned long long)req->bytes, |
237 | (int)req->vol_type, req->name); | 238 | (int)req->vol_type, req->name); |
238 | 239 | ||
@@ -252,7 +253,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) | |||
252 | goto out_unlock; | 253 | goto out_unlock; |
253 | } | 254 | } |
254 | 255 | ||
255 | /* Calculate how many eraseblocks are requested */ | 256 | /* Calculate how many eraseblocks are requested */ |
256 | vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment; | 257 | vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment; |
257 | bytes = req->bytes; | 258 | bytes = req->bytes; |
258 | if (do_div(bytes, vol->usable_leb_size)) | 259 | if (do_div(bytes, vol->usable_leb_size)) |
@@ -274,7 +275,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) | |||
274 | vol->data_pad = ubi->leb_size % vol->alignment; | 275 | vol->data_pad = ubi->leb_size % vol->alignment; |
275 | vol->vol_type = req->vol_type; | 276 | vol->vol_type = req->vol_type; |
276 | vol->name_len = req->name_len; | 277 | vol->name_len = req->name_len; |
277 | memcpy(vol->name, req->name, vol->name_len + 1); | 278 | memcpy(vol->name, req->name, vol->name_len); |
278 | vol->ubi = ubi; | 279 | vol->ubi = ubi; |
279 | 280 | ||
280 | /* | 281 | /* |
@@ -349,7 +350,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) | |||
349 | vtbl_rec.vol_type = UBI_VID_DYNAMIC; | 350 | vtbl_rec.vol_type = UBI_VID_DYNAMIC; |
350 | else | 351 | else |
351 | vtbl_rec.vol_type = UBI_VID_STATIC; | 352 | vtbl_rec.vol_type = UBI_VID_STATIC; |
352 | memcpy(vtbl_rec.name, vol->name, vol->name_len + 1); | 353 | memcpy(vtbl_rec.name, vol->name, vol->name_len); |
353 | 354 | ||
354 | err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); | 355 | err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); |
355 | if (err) | 356 | if (err) |
@@ -360,19 +361,19 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) | |||
360 | ubi->vol_count += 1; | 361 | ubi->vol_count += 1; |
361 | spin_unlock(&ubi->volumes_lock); | 362 | spin_unlock(&ubi->volumes_lock); |
362 | 363 | ||
363 | paranoid_check_volumes(ubi); | 364 | err = paranoid_check_volumes(ubi); |
364 | return 0; | 365 | return err; |
365 | 366 | ||
366 | out_sysfs: | 367 | out_sysfs: |
367 | /* | 368 | /* |
368 | * We have registered our device, we should not free the volume* | 369 | * We have registered our device, we should not free the volume |
369 | * description object in this function in case of an error - it is | 370 | * description object in this function in case of an error - it is |
370 | * freed by the release function. | 371 | * freed by the release function. |
371 | * | 372 | * |
372 | * Get device reference to prevent the release function from being | 373 | * Get device reference to prevent the release function from being |
373 | * called just after sysfs has been closed. | 374 | * called just after sysfs has been closed. |
374 | */ | 375 | */ |
375 | dont_free = 1; | 376 | do_free = 0; |
376 | get_device(&vol->dev); | 377 | get_device(&vol->dev); |
377 | volume_sysfs_close(vol); | 378 | volume_sysfs_close(vol); |
378 | out_gluebi: | 379 | out_gluebi: |
@@ -382,17 +383,18 @@ out_gluebi: | |||
382 | out_cdev: | 383 | out_cdev: |
383 | cdev_del(&vol->cdev); | 384 | cdev_del(&vol->cdev); |
384 | out_mapping: | 385 | out_mapping: |
385 | kfree(vol->eba_tbl); | 386 | if (do_free) |
387 | kfree(vol->eba_tbl); | ||
386 | out_acc: | 388 | out_acc: |
387 | spin_lock(&ubi->volumes_lock); | 389 | spin_lock(&ubi->volumes_lock); |
388 | ubi->rsvd_pebs -= vol->reserved_pebs; | 390 | ubi->rsvd_pebs -= vol->reserved_pebs; |
389 | ubi->avail_pebs += vol->reserved_pebs; | 391 | ubi->avail_pebs += vol->reserved_pebs; |
390 | out_unlock: | 392 | out_unlock: |
391 | spin_unlock(&ubi->volumes_lock); | 393 | spin_unlock(&ubi->volumes_lock); |
392 | if (dont_free) | 394 | if (do_free) |
393 | put_device(&vol->dev); | ||
394 | else | ||
395 | kfree(vol); | 395 | kfree(vol); |
396 | else | ||
397 | put_device(&vol->dev); | ||
396 | ubi_err("cannot create volume %d, error %d", vol_id, err); | 398 | ubi_err("cannot create volume %d, error %d", vol_id, err); |
397 | return err; | 399 | return err; |
398 | } | 400 | } |
@@ -400,19 +402,20 @@ out_unlock: | |||
400 | /** | 402 | /** |
401 | * ubi_remove_volume - remove volume. | 403 | * ubi_remove_volume - remove volume. |
402 | * @desc: volume descriptor | 404 | * @desc: volume descriptor |
405 | * @no_vtbl: do not change volume table if not zero | ||
403 | * | 406 | * |
404 | * This function removes volume described by @desc. The volume has to be opened | 407 | * This function removes volume described by @desc. The volume has to be opened |
405 | * in "exclusive" mode. Returns zero in case of success and a negative error | 408 | * in "exclusive" mode. Returns zero in case of success and a negative error |
406 | * code in case of failure. The caller has to have the @ubi->volumes_mutex | 409 | * code in case of failure. The caller has to have the @ubi->volumes_mutex |
407 | * locked. | 410 | * locked. |
408 | */ | 411 | */ |
409 | int ubi_remove_volume(struct ubi_volume_desc *desc) | 412 | int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl) |
410 | { | 413 | { |
411 | struct ubi_volume *vol = desc->vol; | 414 | struct ubi_volume *vol = desc->vol; |
412 | struct ubi_device *ubi = vol->ubi; | 415 | struct ubi_device *ubi = vol->ubi; |
413 | int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs; | 416 | int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs; |
414 | 417 | ||
415 | dbg_msg("remove UBI volume %d", vol_id); | 418 | dbg_gen("remove UBI volume %d", vol_id); |
416 | ubi_assert(desc->mode == UBI_EXCLUSIVE); | 419 | ubi_assert(desc->mode == UBI_EXCLUSIVE); |
417 | ubi_assert(vol == ubi->volumes[vol_id]); | 420 | ubi_assert(vol == ubi->volumes[vol_id]); |
418 | 421 | ||
@@ -435,9 +438,11 @@ int ubi_remove_volume(struct ubi_volume_desc *desc) | |||
435 | if (err) | 438 | if (err) |
436 | goto out_err; | 439 | goto out_err; |
437 | 440 | ||
438 | err = ubi_change_vtbl_record(ubi, vol_id, NULL); | 441 | if (!no_vtbl) { |
439 | if (err) | 442 | err = ubi_change_vtbl_record(ubi, vol_id, NULL); |
440 | goto out_err; | 443 | if (err) |
444 | goto out_err; | ||
445 | } | ||
441 | 446 | ||
442 | for (i = 0; i < vol->reserved_pebs; i++) { | 447 | for (i = 0; i < vol->reserved_pebs; i++) { |
443 | err = ubi_eba_unmap_leb(ubi, vol, i); | 448 | err = ubi_eba_unmap_leb(ubi, vol, i); |
@@ -445,8 +450,6 @@ int ubi_remove_volume(struct ubi_volume_desc *desc) | |||
445 | goto out_err; | 450 | goto out_err; |
446 | } | 451 | } |
447 | 452 | ||
448 | kfree(vol->eba_tbl); | ||
449 | vol->eba_tbl = NULL; | ||
450 | cdev_del(&vol->cdev); | 453 | cdev_del(&vol->cdev); |
451 | volume_sysfs_close(vol); | 454 | volume_sysfs_close(vol); |
452 | 455 | ||
@@ -465,8 +468,9 @@ int ubi_remove_volume(struct ubi_volume_desc *desc) | |||
465 | ubi->vol_count -= 1; | 468 | ubi->vol_count -= 1; |
466 | spin_unlock(&ubi->volumes_lock); | 469 | spin_unlock(&ubi->volumes_lock); |
467 | 470 | ||
468 | paranoid_check_volumes(ubi); | 471 | if (!no_vtbl) |
469 | return 0; | 472 | err = paranoid_check_volumes(ubi); |
473 | return err; | ||
470 | 474 | ||
471 | out_err: | 475 | out_err: |
472 | ubi_err("cannot remove volume %d, error %d", vol_id, err); | 476 | ubi_err("cannot remove volume %d, error %d", vol_id, err); |
@@ -497,7 +501,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) | |||
497 | if (ubi->ro_mode) | 501 | if (ubi->ro_mode) |
498 | return -EROFS; | 502 | return -EROFS; |
499 | 503 | ||
500 | dbg_msg("re-size volume %d to from %d to %d PEBs", | 504 | dbg_gen("re-size volume %d to from %d to %d PEBs", |
501 | vol_id, vol->reserved_pebs, reserved_pebs); | 505 | vol_id, vol->reserved_pebs, reserved_pebs); |
502 | 506 | ||
503 | if (vol->vol_type == UBI_STATIC_VOLUME && | 507 | if (vol->vol_type == UBI_STATIC_VOLUME && |
@@ -586,8 +590,8 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) | |||
586 | (long long)vol->used_ebs * vol->usable_leb_size; | 590 | (long long)vol->used_ebs * vol->usable_leb_size; |
587 | } | 591 | } |
588 | 592 | ||
589 | paranoid_check_volumes(ubi); | 593 | err = paranoid_check_volumes(ubi); |
590 | return 0; | 594 | return err; |
591 | 595 | ||
592 | out_acc: | 596 | out_acc: |
593 | if (pebs > 0) { | 597 | if (pebs > 0) { |
@@ -602,6 +606,44 @@ out_free: | |||
602 | } | 606 | } |
603 | 607 | ||
604 | /** | 608 | /** |
609 | * ubi_rename_volumes - re-name UBI volumes. | ||
610 | * @ubi: UBI device description object | ||
611 | * @rename_list: list of &struct ubi_rename_entry objects | ||
612 | * | ||
613 | * This function re-names or removes volumes specified in the re-name list. | ||
614 | * Returns zero in case of success and a negative error code in case of | ||
615 | * failure. | ||
616 | */ | ||
617 | int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list) | ||
618 | { | ||
619 | int err; | ||
620 | struct ubi_rename_entry *re; | ||
621 | |||
622 | err = ubi_vtbl_rename_volumes(ubi, rename_list); | ||
623 | if (err) | ||
624 | return err; | ||
625 | |||
626 | list_for_each_entry(re, rename_list, list) { | ||
627 | if (re->remove) { | ||
628 | err = ubi_remove_volume(re->desc, 1); | ||
629 | if (err) | ||
630 | break; | ||
631 | } else { | ||
632 | struct ubi_volume *vol = re->desc->vol; | ||
633 | |||
634 | spin_lock(&ubi->volumes_lock); | ||
635 | vol->name_len = re->new_name_len; | ||
636 | memcpy(vol->name, re->new_name, re->new_name_len + 1); | ||
637 | spin_unlock(&ubi->volumes_lock); | ||
638 | } | ||
639 | } | ||
640 | |||
641 | if (!err) | ||
642 | err = paranoid_check_volumes(ubi); | ||
643 | return err; | ||
644 | } | ||
645 | |||
646 | /** | ||
605 | * ubi_add_volume - add volume. | 647 | * ubi_add_volume - add volume. |
606 | * @ubi: UBI device description object | 648 | * @ubi: UBI device description object |
607 | * @vol: volume description object | 649 | * @vol: volume description object |
@@ -615,8 +657,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol) | |||
615 | int err, vol_id = vol->vol_id; | 657 | int err, vol_id = vol->vol_id; |
616 | dev_t dev; | 658 | dev_t dev; |
617 | 659 | ||
618 | dbg_msg("add volume %d", vol_id); | 660 | dbg_gen("add volume %d", vol_id); |
619 | ubi_dbg_dump_vol_info(vol); | ||
620 | 661 | ||
621 | /* Register character device for the volume */ | 662 | /* Register character device for the volume */ |
622 | cdev_init(&vol->cdev, &ubi_vol_cdev_operations); | 663 | cdev_init(&vol->cdev, &ubi_vol_cdev_operations); |
@@ -650,8 +691,8 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol) | |||
650 | return err; | 691 | return err; |
651 | } | 692 | } |
652 | 693 | ||
653 | paranoid_check_volumes(ubi); | 694 | err = paranoid_check_volumes(ubi); |
654 | return 0; | 695 | return err; |
655 | 696 | ||
656 | out_gluebi: | 697 | out_gluebi: |
657 | err = ubi_destroy_gluebi(vol); | 698 | err = ubi_destroy_gluebi(vol); |
@@ -672,7 +713,7 @@ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol) | |||
672 | { | 713 | { |
673 | int err; | 714 | int err; |
674 | 715 | ||
675 | dbg_msg("free volume %d", vol->vol_id); | 716 | dbg_gen("free volume %d", vol->vol_id); |
676 | 717 | ||
677 | ubi->volumes[vol->vol_id] = NULL; | 718 | ubi->volumes[vol->vol_id] = NULL; |
678 | err = ubi_destroy_gluebi(vol); | 719 | err = ubi_destroy_gluebi(vol); |
@@ -686,8 +727,10 @@ void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol) | |||
686 | * paranoid_check_volume - check volume information. | 727 | * paranoid_check_volume - check volume information. |
687 | * @ubi: UBI device description object | 728 | * @ubi: UBI device description object |
688 | * @vol_id: volume ID | 729 | * @vol_id: volume ID |
730 | * | ||
731 | * Returns zero if volume is all right and a a negative error code if not. | ||
689 | */ | 732 | */ |
690 | static void paranoid_check_volume(struct ubi_device *ubi, int vol_id) | 733 | static int paranoid_check_volume(struct ubi_device *ubi, int vol_id) |
691 | { | 734 | { |
692 | int idx = vol_id2idx(ubi, vol_id); | 735 | int idx = vol_id2idx(ubi, vol_id); |
693 | int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker; | 736 | int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker; |
@@ -705,16 +748,7 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id) | |||
705 | goto fail; | 748 | goto fail; |
706 | } | 749 | } |
707 | spin_unlock(&ubi->volumes_lock); | 750 | spin_unlock(&ubi->volumes_lock); |
708 | return; | 751 | return 0; |
709 | } | ||
710 | |||
711 | if (vol->exclusive) { | ||
712 | /* | ||
713 | * The volume may be being created at the moment, do not check | ||
714 | * it (e.g., it may be in the middle of ubi_create_volume(). | ||
715 | */ | ||
716 | spin_unlock(&ubi->volumes_lock); | ||
717 | return; | ||
718 | } | 752 | } |
719 | 753 | ||
720 | if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 || | 754 | if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 || |
@@ -727,7 +761,7 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id) | |||
727 | goto fail; | 761 | goto fail; |
728 | } | 762 | } |
729 | 763 | ||
730 | n = vol->alignment % ubi->min_io_size; | 764 | n = vol->alignment & (ubi->min_io_size - 1); |
731 | if (vol->alignment != 1 && n) { | 765 | if (vol->alignment != 1 && n) { |
732 | ubi_err("alignment is not multiple of min I/O unit"); | 766 | ubi_err("alignment is not multiple of min I/O unit"); |
733 | goto fail; | 767 | goto fail; |
@@ -824,31 +858,39 @@ static void paranoid_check_volume(struct ubi_device *ubi, int vol_id) | |||
824 | 858 | ||
825 | if (alignment != vol->alignment || data_pad != vol->data_pad || | 859 | if (alignment != vol->alignment || data_pad != vol->data_pad || |
826 | upd_marker != vol->upd_marker || vol_type != vol->vol_type || | 860 | upd_marker != vol->upd_marker || vol_type != vol->vol_type || |
827 | name_len!= vol->name_len || strncmp(name, vol->name, name_len)) { | 861 | name_len != vol->name_len || strncmp(name, vol->name, name_len)) { |
828 | ubi_err("volume info is different"); | 862 | ubi_err("volume info is different"); |
829 | goto fail; | 863 | goto fail; |
830 | } | 864 | } |
831 | 865 | ||
832 | spin_unlock(&ubi->volumes_lock); | 866 | spin_unlock(&ubi->volumes_lock); |
833 | return; | 867 | return 0; |
834 | 868 | ||
835 | fail: | 869 | fail: |
836 | ubi_err("paranoid check failed for volume %d", vol_id); | 870 | ubi_err("paranoid check failed for volume %d", vol_id); |
837 | ubi_dbg_dump_vol_info(vol); | 871 | if (vol) |
872 | ubi_dbg_dump_vol_info(vol); | ||
838 | ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id); | 873 | ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id); |
839 | spin_unlock(&ubi->volumes_lock); | 874 | spin_unlock(&ubi->volumes_lock); |
840 | BUG(); | 875 | return -EINVAL; |
841 | } | 876 | } |
842 | 877 | ||
843 | /** | 878 | /** |
844 | * paranoid_check_volumes - check information about all volumes. | 879 | * paranoid_check_volumes - check information about all volumes. |
845 | * @ubi: UBI device description object | 880 | * @ubi: UBI device description object |
881 | * | ||
882 | * Returns zero if volumes are all right and a a negative error code if not. | ||
846 | */ | 883 | */ |
847 | static void paranoid_check_volumes(struct ubi_device *ubi) | 884 | static int paranoid_check_volumes(struct ubi_device *ubi) |
848 | { | 885 | { |
849 | int i; | 886 | int i, err = 0; |
850 | 887 | ||
851 | for (i = 0; i < ubi->vtbl_slots; i++) | 888 | for (i = 0; i < ubi->vtbl_slots; i++) { |
852 | paranoid_check_volume(ubi, i); | 889 | err = paranoid_check_volume(ubi, i); |
890 | if (err) | ||
891 | break; | ||
892 | } | ||
893 | |||
894 | return err; | ||
853 | } | 895 | } |
854 | #endif | 896 | #endif |
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index af36b12be278..217d0e111b2a 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c | |||
@@ -115,8 +115,58 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, | |||
115 | } | 115 | } |
116 | 116 | ||
117 | /** | 117 | /** |
118 | * vtbl_check - check if volume table is not corrupted and contains sensible | 118 | * ubi_vtbl_rename_volumes - rename UBI volumes in the volume table. |
119 | * data. | 119 | * @ubi: UBI device description object |
120 | * @rename_list: list of &struct ubi_rename_entry objects | ||
121 | * | ||
122 | * This function re-names multiple volumes specified in @req in the volume | ||
123 | * table. Returns zero in case of success and a negative error code in case of | ||
124 | * failure. | ||
125 | */ | ||
126 | int ubi_vtbl_rename_volumes(struct ubi_device *ubi, | ||
127 | struct list_head *rename_list) | ||
128 | { | ||
129 | int i, err; | ||
130 | struct ubi_rename_entry *re; | ||
131 | struct ubi_volume *layout_vol; | ||
132 | |||
133 | list_for_each_entry(re, rename_list, list) { | ||
134 | uint32_t crc; | ||
135 | struct ubi_volume *vol = re->desc->vol; | ||
136 | struct ubi_vtbl_record *vtbl_rec = &ubi->vtbl[vol->vol_id]; | ||
137 | |||
138 | if (re->remove) { | ||
139 | memcpy(vtbl_rec, &empty_vtbl_record, | ||
140 | sizeof(struct ubi_vtbl_record)); | ||
141 | continue; | ||
142 | } | ||
143 | |||
144 | vtbl_rec->name_len = cpu_to_be16(re->new_name_len); | ||
145 | memcpy(vtbl_rec->name, re->new_name, re->new_name_len); | ||
146 | memset(vtbl_rec->name + re->new_name_len, 0, | ||
147 | UBI_VOL_NAME_MAX + 1 - re->new_name_len); | ||
148 | crc = crc32(UBI_CRC32_INIT, vtbl_rec, | ||
149 | UBI_VTBL_RECORD_SIZE_CRC); | ||
150 | vtbl_rec->crc = cpu_to_be32(crc); | ||
151 | } | ||
152 | |||
153 | layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)]; | ||
154 | for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { | ||
155 | err = ubi_eba_unmap_leb(ubi, layout_vol, i); | ||
156 | if (err) | ||
157 | return err; | ||
158 | |||
159 | err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0, | ||
160 | ubi->vtbl_size, UBI_LONGTERM); | ||
161 | if (err) | ||
162 | return err; | ||
163 | } | ||
164 | |||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | /** | ||
169 | * vtbl_check - check if volume table is not corrupted and sensible. | ||
120 | * @ubi: UBI device description object | 170 | * @ubi: UBI device description object |
121 | * @vtbl: volume table | 171 | * @vtbl: volume table |
122 | * | 172 | * |
@@ -127,7 +177,7 @@ static int vtbl_check(const struct ubi_device *ubi, | |||
127 | const struct ubi_vtbl_record *vtbl) | 177 | const struct ubi_vtbl_record *vtbl) |
128 | { | 178 | { |
129 | int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len; | 179 | int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len; |
130 | int upd_marker; | 180 | int upd_marker, err; |
131 | uint32_t crc; | 181 | uint32_t crc; |
132 | const char *name; | 182 | const char *name; |
133 | 183 | ||
@@ -153,7 +203,7 @@ static int vtbl_check(const struct ubi_device *ubi, | |||
153 | if (reserved_pebs == 0) { | 203 | if (reserved_pebs == 0) { |
154 | if (memcmp(&vtbl[i], &empty_vtbl_record, | 204 | if (memcmp(&vtbl[i], &empty_vtbl_record, |
155 | UBI_VTBL_RECORD_SIZE)) { | 205 | UBI_VTBL_RECORD_SIZE)) { |
156 | dbg_err("bad empty record"); | 206 | err = 2; |
157 | goto bad; | 207 | goto bad; |
158 | } | 208 | } |
159 | continue; | 209 | continue; |
@@ -161,56 +211,57 @@ static int vtbl_check(const struct ubi_device *ubi, | |||
161 | 211 | ||
162 | if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 || | 212 | if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 || |
163 | name_len < 0) { | 213 | name_len < 0) { |
164 | dbg_err("negative values"); | 214 | err = 3; |
165 | goto bad; | 215 | goto bad; |
166 | } | 216 | } |
167 | 217 | ||
168 | if (alignment > ubi->leb_size || alignment == 0) { | 218 | if (alignment > ubi->leb_size || alignment == 0) { |
169 | dbg_err("bad alignment"); | 219 | err = 4; |
170 | goto bad; | 220 | goto bad; |
171 | } | 221 | } |
172 | 222 | ||
173 | n = alignment % ubi->min_io_size; | 223 | n = alignment & (ubi->min_io_size - 1); |
174 | if (alignment != 1 && n) { | 224 | if (alignment != 1 && n) { |
175 | dbg_err("alignment is not multiple of min I/O unit"); | 225 | err = 5; |
176 | goto bad; | 226 | goto bad; |
177 | } | 227 | } |
178 | 228 | ||
179 | n = ubi->leb_size % alignment; | 229 | n = ubi->leb_size % alignment; |
180 | if (data_pad != n) { | 230 | if (data_pad != n) { |
181 | dbg_err("bad data_pad, has to be %d", n); | 231 | dbg_err("bad data_pad, has to be %d", n); |
232 | err = 6; | ||
182 | goto bad; | 233 | goto bad; |
183 | } | 234 | } |
184 | 235 | ||
185 | if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { | 236 | if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { |
186 | dbg_err("bad vol_type"); | 237 | err = 7; |
187 | goto bad; | 238 | goto bad; |
188 | } | 239 | } |
189 | 240 | ||
190 | if (upd_marker != 0 && upd_marker != 1) { | 241 | if (upd_marker != 0 && upd_marker != 1) { |
191 | dbg_err("bad upd_marker"); | 242 | err = 8; |
192 | goto bad; | 243 | goto bad; |
193 | } | 244 | } |
194 | 245 | ||
195 | if (reserved_pebs > ubi->good_peb_count) { | 246 | if (reserved_pebs > ubi->good_peb_count) { |
196 | dbg_err("too large reserved_pebs, good PEBs %d", | 247 | dbg_err("too large reserved_pebs, good PEBs %d", |
197 | ubi->good_peb_count); | 248 | ubi->good_peb_count); |
249 | err = 9; | ||
198 | goto bad; | 250 | goto bad; |
199 | } | 251 | } |
200 | 252 | ||
201 | if (name_len > UBI_VOL_NAME_MAX) { | 253 | if (name_len > UBI_VOL_NAME_MAX) { |
202 | dbg_err("too long volume name, max %d", | 254 | err = 10; |
203 | UBI_VOL_NAME_MAX); | ||
204 | goto bad; | 255 | goto bad; |
205 | } | 256 | } |
206 | 257 | ||
207 | if (name[0] == '\0') { | 258 | if (name[0] == '\0') { |
208 | dbg_err("NULL volume name"); | 259 | err = 11; |
209 | goto bad; | 260 | goto bad; |
210 | } | 261 | } |
211 | 262 | ||
212 | if (name_len != strnlen(name, name_len + 1)) { | 263 | if (name_len != strnlen(name, name_len + 1)) { |
213 | dbg_err("bad name_len"); | 264 | err = 12; |
214 | goto bad; | 265 | goto bad; |
215 | } | 266 | } |
216 | } | 267 | } |
@@ -235,7 +286,7 @@ static int vtbl_check(const struct ubi_device *ubi, | |||
235 | return 0; | 286 | return 0; |
236 | 287 | ||
237 | bad: | 288 | bad: |
238 | ubi_err("volume table check failed, record %d", i); | 289 | ubi_err("volume table check failed: record %d, error %d", i, err); |
239 | ubi_dbg_dump_vtbl_record(&vtbl[i], i); | 290 | ubi_dbg_dump_vtbl_record(&vtbl[i], i); |
240 | return -EINVAL; | 291 | return -EINVAL; |
241 | } | 292 | } |
@@ -287,7 +338,6 @@ retry: | |||
287 | vid_hdr->data_pad = cpu_to_be32(0); | 338 | vid_hdr->data_pad = cpu_to_be32(0); |
288 | vid_hdr->lnum = cpu_to_be32(copy); | 339 | vid_hdr->lnum = cpu_to_be32(copy); |
289 | vid_hdr->sqnum = cpu_to_be64(++si->max_sqnum); | 340 | vid_hdr->sqnum = cpu_to_be64(++si->max_sqnum); |
290 | vid_hdr->leb_ver = cpu_to_be32(old_seb ? old_seb->leb_ver + 1: 0); | ||
291 | 341 | ||
292 | /* The EC header is already there, write the VID header */ | 342 | /* The EC header is already there, write the VID header */ |
293 | err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr); | 343 | err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr); |
@@ -370,7 +420,7 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi, | |||
370 | * to LEB 0. | 420 | * to LEB 0. |
371 | */ | 421 | */ |
372 | 422 | ||
373 | dbg_msg("check layout volume"); | 423 | dbg_gen("check layout volume"); |
374 | 424 | ||
375 | /* Read both LEB 0 and LEB 1 into memory */ | 425 | /* Read both LEB 0 and LEB 1 into memory */ |
376 | ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) { | 426 | ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) { |
@@ -384,7 +434,16 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi, | |||
384 | err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0, | 434 | err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0, |
385 | ubi->vtbl_size); | 435 | ubi->vtbl_size); |
386 | if (err == UBI_IO_BITFLIPS || err == -EBADMSG) | 436 | if (err == UBI_IO_BITFLIPS || err == -EBADMSG) |
387 | /* Scrub the PEB later */ | 437 | /* |
438 | * Scrub the PEB later. Note, -EBADMSG indicates an | ||
439 | * uncorrectable ECC error, but we have our own CRC and | ||
440 | * the data will be checked later. If the data is OK, | ||
441 | * the PEB will be scrubbed (because we set | ||
442 | * seb->scrub). If the data is not OK, the contents of | ||
443 | * the PEB will be recovered from the second copy, and | ||
444 | * seb->scrub will be cleared in | ||
445 | * 'ubi_scan_add_used()'. | ||
446 | */ | ||
388 | seb->scrub = 1; | 447 | seb->scrub = 1; |
389 | else if (err) | 448 | else if (err) |
390 | goto out_free; | 449 | goto out_free; |
@@ -400,7 +459,8 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi, | |||
400 | if (!leb_corrupted[0]) { | 459 | if (!leb_corrupted[0]) { |
401 | /* LEB 0 is OK */ | 460 | /* LEB 0 is OK */ |
402 | if (leb[1]) | 461 | if (leb[1]) |
403 | leb_corrupted[1] = memcmp(leb[0], leb[1], ubi->vtbl_size); | 462 | leb_corrupted[1] = memcmp(leb[0], leb[1], |
463 | ubi->vtbl_size); | ||
404 | if (leb_corrupted[1]) { | 464 | if (leb_corrupted[1]) { |
405 | ubi_warn("volume table copy #2 is corrupted"); | 465 | ubi_warn("volume table copy #2 is corrupted"); |
406 | err = create_vtbl(ubi, si, 1, leb[0]); | 466 | err = create_vtbl(ubi, si, 1, leb[0]); |
@@ -620,30 +680,32 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si, | |||
620 | static int check_sv(const struct ubi_volume *vol, | 680 | static int check_sv(const struct ubi_volume *vol, |
621 | const struct ubi_scan_volume *sv) | 681 | const struct ubi_scan_volume *sv) |
622 | { | 682 | { |
683 | int err; | ||
684 | |||
623 | if (sv->highest_lnum >= vol->reserved_pebs) { | 685 | if (sv->highest_lnum >= vol->reserved_pebs) { |
624 | dbg_err("bad highest_lnum"); | 686 | err = 1; |
625 | goto bad; | 687 | goto bad; |
626 | } | 688 | } |
627 | if (sv->leb_count > vol->reserved_pebs) { | 689 | if (sv->leb_count > vol->reserved_pebs) { |
628 | dbg_err("bad leb_count"); | 690 | err = 2; |
629 | goto bad; | 691 | goto bad; |
630 | } | 692 | } |
631 | if (sv->vol_type != vol->vol_type) { | 693 | if (sv->vol_type != vol->vol_type) { |
632 | dbg_err("bad vol_type"); | 694 | err = 3; |
633 | goto bad; | 695 | goto bad; |
634 | } | 696 | } |
635 | if (sv->used_ebs > vol->reserved_pebs) { | 697 | if (sv->used_ebs > vol->reserved_pebs) { |
636 | dbg_err("bad used_ebs"); | 698 | err = 4; |
637 | goto bad; | 699 | goto bad; |
638 | } | 700 | } |
639 | if (sv->data_pad != vol->data_pad) { | 701 | if (sv->data_pad != vol->data_pad) { |
640 | dbg_err("bad data_pad"); | 702 | err = 5; |
641 | goto bad; | 703 | goto bad; |
642 | } | 704 | } |
643 | return 0; | 705 | return 0; |
644 | 706 | ||
645 | bad: | 707 | bad: |
646 | ubi_err("bad scanning information"); | 708 | ubi_err("bad scanning information, error %d", err); |
647 | ubi_dbg_dump_sv(sv); | 709 | ubi_dbg_dump_sv(sv); |
648 | ubi_dbg_dump_vol_info(vol); | 710 | ubi_dbg_dump_vol_info(vol); |
649 | return -EINVAL; | 711 | return -EINVAL; |
@@ -672,14 +734,13 @@ static int check_scanning_info(const struct ubi_device *ubi, | |||
672 | return -EINVAL; | 734 | return -EINVAL; |
673 | } | 735 | } |
674 | 736 | ||
675 | if (si->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT&& | 737 | if (si->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT && |
676 | si->highest_vol_id < UBI_INTERNAL_VOL_START) { | 738 | si->highest_vol_id < UBI_INTERNAL_VOL_START) { |
677 | ubi_err("too large volume ID %d found by scanning", | 739 | ubi_err("too large volume ID %d found by scanning", |
678 | si->highest_vol_id); | 740 | si->highest_vol_id); |
679 | return -EINVAL; | 741 | return -EINVAL; |
680 | } | 742 | } |
681 | 743 | ||
682 | |||
683 | for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { | 744 | for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { |
684 | cond_resched(); | 745 | cond_resched(); |
685 | 746 | ||
@@ -717,8 +778,7 @@ static int check_scanning_info(const struct ubi_device *ubi, | |||
717 | } | 778 | } |
718 | 779 | ||
719 | /** | 780 | /** |
720 | * ubi_read_volume_table - read volume table. | 781 | * ubi_read_volume_table - read the volume table. |
721 | * information. | ||
722 | * @ubi: UBI device description object | 782 | * @ubi: UBI device description object |
723 | * @si: scanning information | 783 | * @si: scanning information |
724 | * | 784 | * |
@@ -797,11 +857,10 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si) | |||
797 | 857 | ||
798 | out_free: | 858 | out_free: |
799 | vfree(ubi->vtbl); | 859 | vfree(ubi->vtbl); |
800 | for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) | 860 | for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { |
801 | if (ubi->volumes[i]) { | 861 | kfree(ubi->volumes[i]); |
802 | kfree(ubi->volumes[i]); | 862 | ubi->volumes[i] = NULL; |
803 | ubi->volumes[i] = NULL; | 863 | } |
804 | } | ||
805 | return err; | 864 | return err; |
806 | } | 865 | } |
807 | 866 | ||
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index a471a491f0ab..05d70937b543 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
@@ -19,22 +19,22 @@ | |||
19 | */ | 19 | */ |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * UBI wear-leveling unit. | 22 | * UBI wear-leveling sub-system. |
23 | * | 23 | * |
24 | * This unit is responsible for wear-leveling. It works in terms of physical | 24 | * This sub-system is responsible for wear-leveling. It works in terms of |
25 | * eraseblocks and erase counters and knows nothing about logical eraseblocks, | 25 | * physical* eraseblocks and erase counters and knows nothing about logical |
26 | * volumes, etc. From this unit's perspective all physical eraseblocks are of | 26 | * eraseblocks, volumes, etc. From this sub-system's perspective all physical |
27 | * two types - used and free. Used physical eraseblocks are those that were | 27 | * eraseblocks are of two types - used and free. Used physical eraseblocks are |
28 | * "get" by the 'ubi_wl_get_peb()' function, and free physical eraseblocks are | 28 | * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical |
29 | * those that were put by the 'ubi_wl_put_peb()' function. | 29 | * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function. |
30 | * | 30 | * |
31 | * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter | 31 | * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter |
32 | * header. The rest of the physical eraseblock contains only 0xFF bytes. | 32 | * header. The rest of the physical eraseblock contains only %0xFF bytes. |
33 | * | 33 | * |
34 | * When physical eraseblocks are returned to the WL unit by means of the | 34 | * When physical eraseblocks are returned to the WL sub-system by means of the |
35 | * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is | 35 | * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is |
36 | * done asynchronously in context of the per-UBI device background thread, | 36 | * done asynchronously in context of the per-UBI device background thread, |
37 | * which is also managed by the WL unit. | 37 | * which is also managed by the WL sub-system. |
38 | * | 38 | * |
39 | * The wear-leveling is ensured by means of moving the contents of used | 39 | * The wear-leveling is ensured by means of moving the contents of used |
40 | * physical eraseblocks with low erase counter to free physical eraseblocks | 40 | * physical eraseblocks with low erase counter to free physical eraseblocks |
@@ -43,34 +43,36 @@ | |||
43 | * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick | 43 | * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick |
44 | * an "optimal" physical eraseblock. For example, when it is known that the | 44 | * an "optimal" physical eraseblock. For example, when it is known that the |
45 | * physical eraseblock will be "put" soon because it contains short-term data, | 45 | * physical eraseblock will be "put" soon because it contains short-term data, |
46 | * the WL unit may pick a free physical eraseblock with low erase counter, and | 46 | * the WL sub-system may pick a free physical eraseblock with low erase |
47 | * so forth. | 47 | * counter, and so forth. |
48 | * | 48 | * |
49 | * If the WL unit fails to erase a physical eraseblock, it marks it as bad. | 49 | * If the WL sub-system fails to erase a physical eraseblock, it marks it as |
50 | * bad. | ||
50 | * | 51 | * |
51 | * This unit is also responsible for scrubbing. If a bit-flip is detected in a | 52 | * This sub-system is also responsible for scrubbing. If a bit-flip is detected |
52 | * physical eraseblock, it has to be moved. Technically this is the same as | 53 | * in a physical eraseblock, it has to be moved. Technically this is the same |
53 | * moving it for wear-leveling reasons. | 54 | * as moving it for wear-leveling reasons. |
54 | * | 55 | * |
55 | * As it was said, for the UBI unit all physical eraseblocks are either "free" | 56 | * As it was said, for the UBI sub-system all physical eraseblocks are either |
56 | * or "used". Free eraseblock are kept in the @wl->free RB-tree, while used | 57 | * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while |
57 | * eraseblocks are kept in a set of different RB-trees: @wl->used, | 58 | * used eraseblocks are kept in a set of different RB-trees: @wl->used, |
58 | * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub. | 59 | * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub. |
59 | * | 60 | * |
60 | * Note, in this implementation, we keep a small in-RAM object for each physical | 61 | * Note, in this implementation, we keep a small in-RAM object for each physical |
61 | * eraseblock. This is surely not a scalable solution. But it appears to be good | 62 | * eraseblock. This is surely not a scalable solution. But it appears to be good |
62 | * enough for moderately large flashes and it is simple. In future, one may | 63 | * enough for moderately large flashes and it is simple. In future, one may |
63 | * re-work this unit and make it more scalable. | 64 | * re-work this sub-system and make it more scalable. |
64 | * | 65 | * |
65 | * At the moment this unit does not utilize the sequence number, which was | 66 | * At the moment this sub-system does not utilize the sequence number, which |
66 | * introduced relatively recently. But it would be wise to do this because the | 67 | * was introduced relatively recently. But it would be wise to do this because |
67 | * sequence number of a logical eraseblock characterizes how old is it. For | 68 | * the sequence number of a logical eraseblock characterizes how old is it. For |
68 | * example, when we move a PEB with low erase counter, and we need to pick the | 69 | * example, when we move a PEB with low erase counter, and we need to pick the |
69 | * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we | 70 | * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we |
70 | * pick target PEB with an average EC if our PEB is not very "old". This is a | 71 | * pick target PEB with an average EC if our PEB is not very "old". This is a |
71 | * room for future re-works of the WL unit. | 72 | * room for future re-works of the WL sub-system. |
72 | * | 73 | * |
73 | * FIXME: looks too complex, should be simplified (later). | 74 | * Note: the stuff with protection trees looks too complex and is difficult to |
75 | * understand. Should be fixed. | ||
74 | */ | 76 | */ |
75 | 77 | ||
76 | #include <linux/slab.h> | 78 | #include <linux/slab.h> |
@@ -92,20 +94,21 @@ | |||
92 | 94 | ||
93 | /* | 95 | /* |
94 | * Maximum difference between two erase counters. If this threshold is | 96 | * Maximum difference between two erase counters. If this threshold is |
95 | * exceeded, the WL unit starts moving data from used physical eraseblocks with | 97 | * exceeded, the WL sub-system starts moving data from used physical |
96 | * low erase counter to free physical eraseblocks with high erase counter. | 98 | * eraseblocks with low erase counter to free physical eraseblocks with high |
99 | * erase counter. | ||
97 | */ | 100 | */ |
98 | #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD | 101 | #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD |
99 | 102 | ||
100 | /* | 103 | /* |
101 | * When a physical eraseblock is moved, the WL unit has to pick the target | 104 | * When a physical eraseblock is moved, the WL sub-system has to pick the target |
102 | * physical eraseblock to move to. The simplest way would be just to pick the | 105 | * physical eraseblock to move to. The simplest way would be just to pick the |
103 | * one with the highest erase counter. But in certain workloads this could lead | 106 | * one with the highest erase counter. But in certain workloads this could lead |
104 | * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a | 107 | * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a |
105 | * situation when the picked physical eraseblock is constantly erased after the | 108 | * situation when the picked physical eraseblock is constantly erased after the |
106 | * data is written to it. So, we have a constant which limits the highest erase | 109 | * data is written to it. So, we have a constant which limits the highest erase |
107 | * counter of the free physical eraseblock to pick. Namely, the WL unit does | 110 | * counter of the free physical eraseblock to pick. Namely, the WL sub-system |
108 | * not pick eraseblocks with erase counter greater then the lowest erase | 111 | * does not pick eraseblocks with erase counter greater then the lowest erase |
109 | * counter plus %WL_FREE_MAX_DIFF. | 112 | * counter plus %WL_FREE_MAX_DIFF. |
110 | */ | 113 | */ |
111 | #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD) | 114 | #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD) |
@@ -123,11 +126,11 @@ | |||
123 | * @abs_ec: the absolute erase counter value when the protection ends | 126 | * @abs_ec: the absolute erase counter value when the protection ends |
124 | * @e: the wear-leveling entry of the physical eraseblock under protection | 127 | * @e: the wear-leveling entry of the physical eraseblock under protection |
125 | * | 128 | * |
126 | * When the WL unit returns a physical eraseblock, the physical eraseblock is | 129 | * When the WL sub-system returns a physical eraseblock, the physical |
127 | * protected from being moved for some "time". For this reason, the physical | 130 | * eraseblock is protected from being moved for some "time". For this reason, |
128 | * eraseblock is not directly moved from the @wl->free tree to the @wl->used | 131 | * the physical eraseblock is not directly moved from the @wl->free tree to the |
129 | * tree. There is one more tree in between where this physical eraseblock is | 132 | * @wl->used tree. There is one more tree in between where this physical |
130 | * temporarily stored (@wl->prot). | 133 | * eraseblock is temporarily stored (@wl->prot). |
131 | * | 134 | * |
132 | * All this protection stuff is needed because: | 135 | * All this protection stuff is needed because: |
133 | * o we don't want to move physical eraseblocks just after we have given them | 136 | * o we don't want to move physical eraseblocks just after we have given them |
@@ -175,7 +178,6 @@ struct ubi_wl_prot_entry { | |||
175 | * @list: a link in the list of pending works | 178 | * @list: a link in the list of pending works |
176 | * @func: worker function | 179 | * @func: worker function |
177 | * @priv: private data of the worker function | 180 | * @priv: private data of the worker function |
178 | * | ||
179 | * @e: physical eraseblock to erase | 181 | * @e: physical eraseblock to erase |
180 | * @torture: if the physical eraseblock has to be tortured | 182 | * @torture: if the physical eraseblock has to be tortured |
181 | * | 183 | * |
@@ -473,52 +475,47 @@ retry: | |||
473 | } | 475 | } |
474 | 476 | ||
475 | switch (dtype) { | 477 | switch (dtype) { |
476 | case UBI_LONGTERM: | 478 | case UBI_LONGTERM: |
477 | /* | 479 | /* |
478 | * For long term data we pick a physical eraseblock | 480 | * For long term data we pick a physical eraseblock with high |
479 | * with high erase counter. But the highest erase | 481 | * erase counter. But the highest erase counter we can pick is |
480 | * counter we can pick is bounded by the the lowest | 482 | * bounded by the the lowest erase counter plus |
481 | * erase counter plus %WL_FREE_MAX_DIFF. | 483 | * %WL_FREE_MAX_DIFF. |
482 | */ | 484 | */ |
483 | e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); | 485 | e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); |
484 | protect = LT_PROTECTION; | 486 | protect = LT_PROTECTION; |
485 | break; | 487 | break; |
486 | case UBI_UNKNOWN: | 488 | case UBI_UNKNOWN: |
487 | /* | 489 | /* |
488 | * For unknown data we pick a physical eraseblock with | 490 | * For unknown data we pick a physical eraseblock with medium |
489 | * medium erase counter. But we by no means can pick a | 491 | * erase counter. But we by no means can pick a physical |
490 | * physical eraseblock with erase counter greater or | 492 | * eraseblock with erase counter greater or equivalent than the |
491 | * equivalent than the lowest erase counter plus | 493 | * lowest erase counter plus %WL_FREE_MAX_DIFF. |
492 | * %WL_FREE_MAX_DIFF. | 494 | */ |
493 | */ | 495 | first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); |
494 | first = rb_entry(rb_first(&ubi->free), | 496 | last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, rb); |
495 | struct ubi_wl_entry, rb); | ||
496 | last = rb_entry(rb_last(&ubi->free), | ||
497 | struct ubi_wl_entry, rb); | ||
498 | 497 | ||
499 | if (last->ec - first->ec < WL_FREE_MAX_DIFF) | 498 | if (last->ec - first->ec < WL_FREE_MAX_DIFF) |
500 | e = rb_entry(ubi->free.rb_node, | 499 | e = rb_entry(ubi->free.rb_node, |
501 | struct ubi_wl_entry, rb); | 500 | struct ubi_wl_entry, rb); |
502 | else { | 501 | else { |
503 | medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; | 502 | medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; |
504 | e = find_wl_entry(&ubi->free, medium_ec); | 503 | e = find_wl_entry(&ubi->free, medium_ec); |
505 | } | 504 | } |
506 | protect = U_PROTECTION; | 505 | protect = U_PROTECTION; |
507 | break; | 506 | break; |
508 | case UBI_SHORTTERM: | 507 | case UBI_SHORTTERM: |
509 | /* | 508 | /* |
510 | * For short term data we pick a physical eraseblock | 509 | * For short term data we pick a physical eraseblock with the |
511 | * with the lowest erase counter as we expect it will | 510 | * lowest erase counter as we expect it will be erased soon. |
512 | * be erased soon. | 511 | */ |
513 | */ | 512 | e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); |
514 | e = rb_entry(rb_first(&ubi->free), | 513 | protect = ST_PROTECTION; |
515 | struct ubi_wl_entry, rb); | 514 | break; |
516 | protect = ST_PROTECTION; | 515 | default: |
517 | break; | 516 | protect = 0; |
518 | default: | 517 | e = NULL; |
519 | protect = 0; | 518 | BUG(); |
520 | e = NULL; | ||
521 | BUG(); | ||
522 | } | 519 | } |
523 | 520 | ||
524 | /* | 521 | /* |
@@ -582,7 +579,8 @@ found: | |||
582 | * This function returns zero in case of success and a negative error code in | 579 | * This function returns zero in case of success and a negative error code in |
583 | * case of failure. | 580 | * case of failure. |
584 | */ | 581 | */ |
585 | static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture) | 582 | static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, |
583 | int torture) | ||
586 | { | 584 | { |
587 | int err; | 585 | int err; |
588 | struct ubi_ec_hdr *ec_hdr; | 586 | struct ubi_ec_hdr *ec_hdr; |
@@ -634,8 +632,7 @@ out_free: | |||
634 | } | 632 | } |
635 | 633 | ||
636 | /** | 634 | /** |
637 | * check_protection_over - check if it is time to stop protecting some | 635 | * check_protection_over - check if it is time to stop protecting some PEBs. |
638 | * physical eraseblocks. | ||
639 | * @ubi: UBI device description object | 636 | * @ubi: UBI device description object |
640 | * | 637 | * |
641 | * This function is called after each erase operation, when the absolute erase | 638 | * This function is called after each erase operation, when the absolute erase |
@@ -871,6 +868,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
871 | } | 868 | } |
872 | 869 | ||
873 | ubi_free_vid_hdr(ubi, vid_hdr); | 870 | ubi_free_vid_hdr(ubi, vid_hdr); |
871 | if (scrubbing && !protect) | ||
872 | ubi_msg("scrubbed PEB %d, data moved to PEB %d", | ||
873 | e1->pnum, e2->pnum); | ||
874 | |||
874 | spin_lock(&ubi->wl_lock); | 875 | spin_lock(&ubi->wl_lock); |
875 | if (protect) | 876 | if (protect) |
876 | prot_tree_add(ubi, e1, pe, protect); | 877 | prot_tree_add(ubi, e1, pe, protect); |
@@ -1054,8 +1055,8 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | |||
1054 | spin_unlock(&ubi->wl_lock); | 1055 | spin_unlock(&ubi->wl_lock); |
1055 | 1056 | ||
1056 | /* | 1057 | /* |
1057 | * One more erase operation has happened, take care about protected | 1058 | * One more erase operation has happened, take care about |
1058 | * physical eraseblocks. | 1059 | * protected physical eraseblocks. |
1059 | */ | 1060 | */ |
1060 | check_protection_over(ubi); | 1061 | check_protection_over(ubi); |
1061 | 1062 | ||
@@ -1136,7 +1137,7 @@ out_ro: | |||
1136 | } | 1137 | } |
1137 | 1138 | ||
1138 | /** | 1139 | /** |
1139 | * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit. | 1140 | * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system. |
1140 | * @ubi: UBI device description object | 1141 | * @ubi: UBI device description object |
1141 | * @pnum: physical eraseblock to return | 1142 | * @pnum: physical eraseblock to return |
1142 | * @torture: if this physical eraseblock has to be tortured | 1143 | * @torture: if this physical eraseblock has to be tortured |
@@ -1175,11 +1176,11 @@ retry: | |||
1175 | /* | 1176 | /* |
1176 | * User is putting the physical eraseblock which was selected | 1177 | * User is putting the physical eraseblock which was selected |
1177 | * as the target the data is moved to. It may happen if the EBA | 1178 | * as the target the data is moved to. It may happen if the EBA |
1178 | * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but | 1179 | * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()' |
1179 | * the WL unit has not put the PEB to the "used" tree yet, but | 1180 | * but the WL sub-system has not put the PEB to the "used" tree |
1180 | * it is about to do this. So we just set a flag which will | 1181 | * yet, but it is about to do this. So we just set a flag which |
1181 | * tell the WL worker that the PEB is not needed anymore and | 1182 | * will tell the WL worker that the PEB is not needed anymore |
1182 | * should be scheduled for erasure. | 1183 | * and should be scheduled for erasure. |
1183 | */ | 1184 | */ |
1184 | dbg_wl("PEB %d is the target of data moving", pnum); | 1185 | dbg_wl("PEB %d is the target of data moving", pnum); |
1185 | ubi_assert(!ubi->move_to_put); | 1186 | ubi_assert(!ubi->move_to_put); |
@@ -1229,7 +1230,7 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) | |||
1229 | { | 1230 | { |
1230 | struct ubi_wl_entry *e; | 1231 | struct ubi_wl_entry *e; |
1231 | 1232 | ||
1232 | ubi_msg("schedule PEB %d for scrubbing", pnum); | 1233 | dbg_msg("schedule PEB %d for scrubbing", pnum); |
1233 | 1234 | ||
1234 | retry: | 1235 | retry: |
1235 | spin_lock(&ubi->wl_lock); | 1236 | spin_lock(&ubi->wl_lock); |
@@ -1368,7 +1369,7 @@ int ubi_thread(void *u) | |||
1368 | int err; | 1369 | int err; |
1369 | 1370 | ||
1370 | if (kthread_should_stop()) | 1371 | if (kthread_should_stop()) |
1371 | goto out; | 1372 | break; |
1372 | 1373 | ||
1373 | if (try_to_freeze()) | 1374 | if (try_to_freeze()) |
1374 | continue; | 1375 | continue; |
@@ -1403,7 +1404,6 @@ int ubi_thread(void *u) | |||
1403 | cond_resched(); | 1404 | cond_resched(); |
1404 | } | 1405 | } |
1405 | 1406 | ||
1406 | out: | ||
1407 | dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); | 1407 | dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); |
1408 | return 0; | 1408 | return 0; |
1409 | } | 1409 | } |
@@ -1426,8 +1426,7 @@ static void cancel_pending(struct ubi_device *ubi) | |||
1426 | } | 1426 | } |
1427 | 1427 | ||
1428 | /** | 1428 | /** |
1429 | * ubi_wl_init_scan - initialize the wear-leveling unit using scanning | 1429 | * ubi_wl_init_scan - initialize the WL sub-system using scanning information. |
1430 | * information. | ||
1431 | * @ubi: UBI device description object | 1430 | * @ubi: UBI device description object |
1432 | * @si: scanning information | 1431 | * @si: scanning information |
1433 | * | 1432 | * |
@@ -1584,13 +1583,12 @@ static void protection_trees_destroy(struct ubi_device *ubi) | |||
1584 | } | 1583 | } |
1585 | 1584 | ||
1586 | /** | 1585 | /** |
1587 | * ubi_wl_close - close the wear-leveling unit. | 1586 | * ubi_wl_close - close the wear-leveling sub-system. |
1588 | * @ubi: UBI device description object | 1587 | * @ubi: UBI device description object |
1589 | */ | 1588 | */ |
1590 | void ubi_wl_close(struct ubi_device *ubi) | 1589 | void ubi_wl_close(struct ubi_device *ubi) |
1591 | { | 1590 | { |
1592 | dbg_wl("close the UBI wear-leveling unit"); | 1591 | dbg_wl("close the WL sub-system"); |
1593 | |||
1594 | cancel_pending(ubi); | 1592 | cancel_pending(ubi); |
1595 | protection_trees_destroy(ubi); | 1593 | protection_trees_destroy(ubi); |
1596 | tree_destroy(&ubi->used); | 1594 | tree_destroy(&ubi->used); |
@@ -1602,8 +1600,7 @@ void ubi_wl_close(struct ubi_device *ubi) | |||
1602 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID | 1600 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID |
1603 | 1601 | ||
1604 | /** | 1602 | /** |
1605 | * paranoid_check_ec - make sure that the erase counter of a physical eraseblock | 1603 | * paranoid_check_ec - make sure that the erase counter of a PEB is correct. |
1606 | * is correct. | ||
1607 | * @ubi: UBI device description object | 1604 | * @ubi: UBI device description object |
1608 | * @pnum: the physical eraseblock number to check | 1605 | * @pnum: the physical eraseblock number to check |
1609 | * @ec: the erase counter to check | 1606 | * @ec: the erase counter to check |
@@ -1644,13 +1641,12 @@ out_free: | |||
1644 | } | 1641 | } |
1645 | 1642 | ||
1646 | /** | 1643 | /** |
1647 | * paranoid_check_in_wl_tree - make sure that a wear-leveling entry is present | 1644 | * paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree. |
1648 | * in a WL RB-tree. | ||
1649 | * @e: the wear-leveling entry to check | 1645 | * @e: the wear-leveling entry to check |
1650 | * @root: the root of the tree | 1646 | * @root: the root of the tree |
1651 | * | 1647 | * |
1652 | * This function returns zero if @e is in the @root RB-tree and %1 if it | 1648 | * This function returns zero if @e is in the @root RB-tree and %1 if it is |
1653 | * is not. | 1649 | * not. |
1654 | */ | 1650 | */ |
1655 | static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, | 1651 | static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, |
1656 | struct rb_root *root) | 1652 | struct rb_root *root) |