diff options
Diffstat (limited to 'drivers/mtd')
-rw-r--r-- | drivers/mtd/Kconfig | 8 | ||||
-rw-r--r-- | drivers/mtd/Makefile | 1 | ||||
-rw-r--r-- | drivers/mtd/chips/cfi_cmdset_0001.c | 25 | ||||
-rw-r--r-- | drivers/mtd/chips/cfi_cmdset_0002.c | 2 | ||||
-rw-r--r-- | drivers/mtd/chips/jedec_probe.c | 11 | ||||
-rw-r--r-- | drivers/mtd/cmdlinepart.c | 9 | ||||
-rw-r--r-- | drivers/mtd/devices/lart.c | 2 | ||||
-rw-r--r-- | drivers/mtd/maps/physmap_of.c | 89 | ||||
-rw-r--r-- | drivers/mtd/mtdchar.c | 12 | ||||
-rw-r--r-- | drivers/mtd/mtdcore.c | 2 | ||||
-rw-r--r-- | drivers/mtd/mtdoops.c | 165 | ||||
-rw-r--r-- | drivers/mtd/nand/at91_nand.c | 12 | ||||
-rw-r--r-- | drivers/mtd/ofpart.c | 74 | ||||
-rw-r--r-- | drivers/mtd/ubi/cdev.c | 2 | ||||
-rw-r--r-- | drivers/mtd/ubi/scan.c | 2 | ||||
-rw-r--r-- | drivers/mtd/ubi/wl.c | 2 |
16 files changed, 260 insertions, 158 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index 661eac09f5cb..e8503341e3b1 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig | |||
@@ -150,6 +150,14 @@ config MTD_AFS_PARTS | |||
150 | for your particular device. It won't happen automatically. The | 150 | for your particular device. It won't happen automatically. The |
151 | 'armflash' map driver (CONFIG_MTD_ARMFLASH) does this, for example. | 151 | 'armflash' map driver (CONFIG_MTD_ARMFLASH) does this, for example. |
152 | 152 | ||
153 | config MTD_OF_PARTS | ||
154 | tristate "Flash partition map based on OF description" | ||
155 | depends on PPC_OF && MTD_PARTITIONS | ||
156 | help | ||
157 | This provides a partition parsing function which derives | ||
158 | the partition map from the children of the flash node, | ||
159 | as described in Documentation/powerpc/booting-without-of.txt. | ||
160 | |||
153 | comment "User Modules And Translation Layers" | 161 | comment "User Modules And Translation Layers" |
154 | 162 | ||
155 | config MTD_CHAR | 163 | config MTD_CHAR |
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile index 7f0b04b4caa7..538e33d11d46 100644 --- a/drivers/mtd/Makefile +++ b/drivers/mtd/Makefile | |||
@@ -11,6 +11,7 @@ obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o | |||
11 | obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o | 11 | obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o |
12 | obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o | 12 | obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o |
13 | obj-$(CONFIG_MTD_AFS_PARTS) += afs.o | 13 | obj-$(CONFIG_MTD_AFS_PARTS) += afs.o |
14 | obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o | ||
14 | 15 | ||
15 | # 'Users' - code which presents functionality to userspace. | 16 | # 'Users' - code which presents functionality to userspace. |
16 | obj-$(CONFIG_MTD_CHAR) += mtdchar.o | 17 | obj-$(CONFIG_MTD_CHAR) += mtdchar.o |
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 350671ec5226..47794d23a42e 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c | |||
@@ -269,10 +269,16 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param) | |||
269 | /* | 269 | /* |
270 | * Some chips power-up with all sectors locked by default. | 270 | * Some chips power-up with all sectors locked by default. |
271 | */ | 271 | */ |
272 | static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param) | 272 | static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param) |
273 | { | 273 | { |
274 | printk(KERN_INFO "Using auto-unlock on power-up/resume\n" ); | 274 | struct map_info *map = mtd->priv; |
275 | mtd->flags |= MTD_STUPID_LOCK; | 275 | struct cfi_private *cfi = map->fldrv_priv; |
276 | struct cfi_pri_intelext *cfip = cfi->cmdset_priv; | ||
277 | |||
278 | if (cfip->FeatureSupport&32) { | ||
279 | printk(KERN_INFO "Using auto-unlock on power-up/resume\n" ); | ||
280 | mtd->flags |= MTD_POWERUP_LOCK; | ||
281 | } | ||
276 | } | 282 | } |
277 | 283 | ||
278 | static struct cfi_fixup cfi_fixup_table[] = { | 284 | static struct cfi_fixup cfi_fixup_table[] = { |
@@ -288,7 +294,7 @@ static struct cfi_fixup cfi_fixup_table[] = { | |||
288 | #endif | 294 | #endif |
289 | { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL }, | 295 | { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL }, |
290 | { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL }, | 296 | { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL }, |
291 | { MANUFACTURER_INTEL, 0x891c, fixup_use_powerup_lock, NULL, }, | 297 | { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, }, |
292 | { 0, 0, NULL, NULL } | 298 | { 0, 0, NULL, NULL } |
293 | }; | 299 | }; |
294 | 300 | ||
@@ -1562,9 +1568,12 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1562 | int ret, wbufsize, word_gap, words; | 1568 | int ret, wbufsize, word_gap, words; |
1563 | const struct kvec *vec; | 1569 | const struct kvec *vec; |
1564 | unsigned long vec_seek; | 1570 | unsigned long vec_seek; |
1571 | unsigned long initial_adr; | ||
1572 | int initial_len = len; | ||
1565 | 1573 | ||
1566 | wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; | 1574 | wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; |
1567 | adr += chip->start; | 1575 | adr += chip->start; |
1576 | initial_adr = adr; | ||
1568 | cmd_adr = adr & ~(wbufsize-1); | 1577 | cmd_adr = adr & ~(wbufsize-1); |
1569 | 1578 | ||
1570 | /* Let's determine this according to the interleave only once */ | 1579 | /* Let's determine this according to the interleave only once */ |
@@ -1577,7 +1586,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1577 | return ret; | 1586 | return ret; |
1578 | } | 1587 | } |
1579 | 1588 | ||
1580 | XIP_INVAL_CACHED_RANGE(map, adr, len); | 1589 | XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len); |
1581 | ENABLE_VPP(map); | 1590 | ENABLE_VPP(map); |
1582 | xip_disable(map, chip, cmd_adr); | 1591 | xip_disable(map, chip, cmd_adr); |
1583 | 1592 | ||
@@ -1668,7 +1677,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1668 | chip->state = FL_WRITING; | 1677 | chip->state = FL_WRITING; |
1669 | 1678 | ||
1670 | ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, | 1679 | ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, |
1671 | adr, len, | 1680 | initial_adr, initial_len, |
1672 | chip->buffer_write_time); | 1681 | chip->buffer_write_time); |
1673 | if (ret) { | 1682 | if (ret) { |
1674 | map_write(map, CMD(0x70), cmd_adr); | 1683 | map_write(map, CMD(0x70), cmd_adr); |
@@ -2349,7 +2358,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd) | |||
2349 | struct flchip *chip; | 2358 | struct flchip *chip; |
2350 | int ret = 0; | 2359 | int ret = 0; |
2351 | 2360 | ||
2352 | if ((mtd->flags & MTD_STUPID_LOCK) | 2361 | if ((mtd->flags & MTD_POWERUP_LOCK) |
2353 | && extp && (extp->FeatureSupport & (1 << 5))) | 2362 | && extp && (extp->FeatureSupport & (1 << 5))) |
2354 | cfi_intelext_save_locks(mtd); | 2363 | cfi_intelext_save_locks(mtd); |
2355 | 2364 | ||
@@ -2460,7 +2469,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd) | |||
2460 | spin_unlock(chip->mutex); | 2469 | spin_unlock(chip->mutex); |
2461 | } | 2470 | } |
2462 | 2471 | ||
2463 | if ((mtd->flags & MTD_STUPID_LOCK) | 2472 | if ((mtd->flags & MTD_POWERUP_LOCK) |
2464 | && extp && (extp->FeatureSupport & (1 << 5))) | 2473 | && extp && (extp->FeatureSupport & (1 << 5))) |
2465 | cfi_intelext_restore_locks(mtd); | 2474 | cfi_intelext_restore_locks(mtd); |
2466 | } | 2475 | } |
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 796bfeadea21..d072e87ce4e2 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c | |||
@@ -217,7 +217,7 @@ static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param) | |||
217 | { | 217 | { |
218 | mtd->lock = cfi_atmel_lock; | 218 | mtd->lock = cfi_atmel_lock; |
219 | mtd->unlock = cfi_atmel_unlock; | 219 | mtd->unlock = cfi_atmel_unlock; |
220 | mtd->flags |= MTD_STUPID_LOCK; | 220 | mtd->flags |= MTD_POWERUP_LOCK; |
221 | } | 221 | } |
222 | 222 | ||
223 | static struct cfi_fixup cfi_fixup_table[] = { | 223 | static struct cfi_fixup cfi_fixup_table[] = { |
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c index 640593845218..4be51a86a85c 100644 --- a/drivers/mtd/chips/jedec_probe.c +++ b/drivers/mtd/chips/jedec_probe.c | |||
@@ -1646,14 +1646,6 @@ static const struct amd_flash_info jedec_table[] = { | |||
1646 | } | 1646 | } |
1647 | }; | 1647 | }; |
1648 | 1648 | ||
1649 | |||
1650 | static int cfi_jedec_setup(struct cfi_private *p_cfi, int index); | ||
1651 | |||
1652 | static int jedec_probe_chip(struct map_info *map, uint32_t base, | ||
1653 | unsigned long *chip_map, struct cfi_private *cfi); | ||
1654 | |||
1655 | static struct mtd_info *jedec_probe(struct map_info *map); | ||
1656 | |||
1657 | static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base, | 1649 | static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base, |
1658 | struct cfi_private *cfi) | 1650 | struct cfi_private *cfi) |
1659 | { | 1651 | { |
@@ -1676,8 +1668,7 @@ static inline u32 jedec_read_id(struct map_info *map, uint32_t base, | |||
1676 | return result.x[0] & mask; | 1668 | return result.x[0] & mask; |
1677 | } | 1669 | } |
1678 | 1670 | ||
1679 | static inline void jedec_reset(u32 base, struct map_info *map, | 1671 | static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi) |
1680 | struct cfi_private *cfi) | ||
1681 | { | 1672 | { |
1682 | /* Reset */ | 1673 | /* Reset */ |
1683 | 1674 | ||
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c index 23fab14f1637..b44292abd9f7 100644 --- a/drivers/mtd/cmdlinepart.c +++ b/drivers/mtd/cmdlinepart.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * | 9 | * |
10 | * mtdparts=<mtddef>[;<mtddef] | 10 | * mtdparts=<mtddef>[;<mtddef] |
11 | * <mtddef> := <mtd-id>:<partdef>[,<partdef>] | 11 | * <mtddef> := <mtd-id>:<partdef>[,<partdef>] |
12 | * <partdef> := <size>[@offset][<name>][ro] | 12 | * <partdef> := <size>[@offset][<name>][ro][lk] |
13 | * <mtd-id> := unique name used in mapping driver/device (mtd->name) | 13 | * <mtd-id> := unique name used in mapping driver/device (mtd->name) |
14 | * <size> := standard linux memsize OR "-" to denote all remaining space | 14 | * <size> := standard linux memsize OR "-" to denote all remaining space |
15 | * <name> := '(' NAME ')' | 15 | * <name> := '(' NAME ')' |
@@ -143,6 +143,13 @@ static struct mtd_partition * newpart(char *s, | |||
143 | s += 2; | 143 | s += 2; |
144 | } | 144 | } |
145 | 145 | ||
146 | /* if lk is found do NOT unlock the MTD partition*/ | ||
147 | if (strncmp(s, "lk", 2) == 0) | ||
148 | { | ||
149 | mask_flags |= MTD_POWERUP_LOCK; | ||
150 | s += 2; | ||
151 | } | ||
152 | |||
146 | /* test if more partitions are following */ | 153 | /* test if more partitions are following */ |
147 | if (*s == ',') | 154 | if (*s == ',') |
148 | { | 155 | { |
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c index 4ea50a1dda85..99fd210feaec 100644 --- a/drivers/mtd/devices/lart.c +++ b/drivers/mtd/devices/lart.c | |||
@@ -323,7 +323,7 @@ static int flash_probe (void) | |||
323 | /* put the flash back into command mode */ | 323 | /* put the flash back into command mode */ |
324 | write32 (DATA_TO_FLASH (READ_ARRAY),0x00000000); | 324 | write32 (DATA_TO_FLASH (READ_ARRAY),0x00000000); |
325 | 325 | ||
326 | return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || FLASH_DEVICE_16mbit_BOTTOM)); | 326 | return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || devtype == FLASH_DEVICE_16mbit_BOTTOM)); |
327 | } | 327 | } |
328 | 328 | ||
329 | /* | 329 | /* |
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index d4bcd3f8c57c..49acd4171893 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c | |||
@@ -80,65 +80,6 @@ static int parse_obsolete_partitions(struct of_device *dev, | |||
80 | 80 | ||
81 | return nr_parts; | 81 | return nr_parts; |
82 | } | 82 | } |
83 | |||
84 | static int __devinit parse_partitions(struct of_flash *info, | ||
85 | struct of_device *dev) | ||
86 | { | ||
87 | const char *partname; | ||
88 | static const char *part_probe_types[] | ||
89 | = { "cmdlinepart", "RedBoot", NULL }; | ||
90 | struct device_node *dp = dev->node, *pp; | ||
91 | int nr_parts, i; | ||
92 | |||
93 | /* First look for RedBoot table or partitions on the command | ||
94 | * line, these take precedence over device tree information */ | ||
95 | nr_parts = parse_mtd_partitions(info->mtd, part_probe_types, | ||
96 | &info->parts, 0); | ||
97 | if (nr_parts > 0) | ||
98 | return nr_parts; | ||
99 | |||
100 | /* First count the subnodes */ | ||
101 | nr_parts = 0; | ||
102 | for (pp = of_get_next_child(dp, NULL); pp; | ||
103 | pp = of_get_next_child(dp, pp)) | ||
104 | nr_parts++; | ||
105 | |||
106 | if (nr_parts == 0) | ||
107 | return parse_obsolete_partitions(dev, info, dp); | ||
108 | |||
109 | info->parts = kzalloc(nr_parts * sizeof(*info->parts), | ||
110 | GFP_KERNEL); | ||
111 | if (!info->parts) | ||
112 | return -ENOMEM; | ||
113 | |||
114 | for (pp = of_get_next_child(dp, NULL), i = 0; pp; | ||
115 | pp = of_get_next_child(dp, pp), i++) { | ||
116 | const u32 *reg; | ||
117 | int len; | ||
118 | |||
119 | reg = of_get_property(pp, "reg", &len); | ||
120 | if (!reg || (len != 2*sizeof(u32))) { | ||
121 | of_node_put(pp); | ||
122 | dev_err(&dev->dev, "Invalid 'reg' on %s\n", | ||
123 | dp->full_name); | ||
124 | kfree(info->parts); | ||
125 | info->parts = NULL; | ||
126 | return -EINVAL; | ||
127 | } | ||
128 | info->parts[i].offset = reg[0]; | ||
129 | info->parts[i].size = reg[1]; | ||
130 | |||
131 | partname = of_get_property(pp, "label", &len); | ||
132 | if (!partname) | ||
133 | partname = of_get_property(pp, "name", &len); | ||
134 | info->parts[i].name = (char *)partname; | ||
135 | |||
136 | if (of_get_property(pp, "read-only", &len)) | ||
137 | info->parts[i].mask_flags = MTD_WRITEABLE; | ||
138 | } | ||
139 | |||
140 | return nr_parts; | ||
141 | } | ||
142 | #else /* MTD_PARTITIONS */ | 83 | #else /* MTD_PARTITIONS */ |
143 | #define OF_FLASH_PARTS(info) (0) | 84 | #define OF_FLASH_PARTS(info) (0) |
144 | #define parse_partitions(info, dev) (0) | 85 | #define parse_partitions(info, dev) (0) |
@@ -213,6 +154,10 @@ static struct mtd_info * __devinit obsolete_probe(struct of_device *dev, | |||
213 | static int __devinit of_flash_probe(struct of_device *dev, | 154 | static int __devinit of_flash_probe(struct of_device *dev, |
214 | const struct of_device_id *match) | 155 | const struct of_device_id *match) |
215 | { | 156 | { |
157 | #ifdef CONFIG_MTD_PARTITIONS | ||
158 | static const char *part_probe_types[] | ||
159 | = { "cmdlinepart", "RedBoot", NULL }; | ||
160 | #endif | ||
216 | struct device_node *dp = dev->node; | 161 | struct device_node *dp = dev->node; |
217 | struct resource res; | 162 | struct resource res; |
218 | struct of_flash *info; | 163 | struct of_flash *info; |
@@ -275,13 +220,33 @@ static int __devinit of_flash_probe(struct of_device *dev, | |||
275 | } | 220 | } |
276 | info->mtd->owner = THIS_MODULE; | 221 | info->mtd->owner = THIS_MODULE; |
277 | 222 | ||
278 | err = parse_partitions(info, dev); | 223 | #ifdef CONFIG_MTD_PARTITIONS |
224 | /* First look for RedBoot table or partitions on the command | ||
225 | * line, these take precedence over device tree information */ | ||
226 | err = parse_mtd_partitions(info->mtd, part_probe_types, | ||
227 | &info->parts, 0); | ||
279 | if (err < 0) | 228 | if (err < 0) |
280 | goto err_out; | 229 | return err; |
230 | |||
231 | #ifdef CONFIG_MTD_OF_PARTS | ||
232 | if (err == 0) { | ||
233 | err = of_mtd_parse_partitions(&dev->dev, info->mtd, | ||
234 | dp, &info->parts); | ||
235 | if (err < 0) | ||
236 | return err; | ||
237 | } | ||
238 | #endif | ||
239 | |||
240 | if (err == 0) { | ||
241 | err = parse_obsolete_partitions(dev, info, dp); | ||
242 | if (err < 0) | ||
243 | return err; | ||
244 | } | ||
281 | 245 | ||
282 | if (err > 0) | 246 | if (err > 0) |
283 | add_mtd_partitions(info->mtd, OF_FLASH_PARTS(info), err); | 247 | add_mtd_partitions(info->mtd, info->parts, err); |
284 | else | 248 | else |
249 | #endif | ||
285 | add_mtd_device(info->mtd); | 250 | add_mtd_device(info->mtd); |
286 | 251 | ||
287 | return 0; | 252 | return 0; |
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index b42553cd9af5..5d3ac512ce16 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c | |||
@@ -27,12 +27,10 @@ static void mtd_notify_add(struct mtd_info* mtd) | |||
27 | if (!mtd) | 27 | if (!mtd) |
28 | return; | 28 | return; |
29 | 29 | ||
30 | class_device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2), | 30 | device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2), "mtd%d", mtd->index); |
31 | NULL, "mtd%d", mtd->index); | ||
32 | 31 | ||
33 | class_device_create(mtd_class, NULL, | 32 | device_create(mtd_class, NULL, |
34 | MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1), | 33 | MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1), "mtd%dro", mtd->index); |
35 | NULL, "mtd%dro", mtd->index); | ||
36 | } | 34 | } |
37 | 35 | ||
38 | static void mtd_notify_remove(struct mtd_info* mtd) | 36 | static void mtd_notify_remove(struct mtd_info* mtd) |
@@ -40,8 +38,8 @@ static void mtd_notify_remove(struct mtd_info* mtd) | |||
40 | if (!mtd) | 38 | if (!mtd) |
41 | return; | 39 | return; |
42 | 40 | ||
43 | class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2)); | 41 | device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2)); |
44 | class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1)); | 42 | device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1)); |
45 | } | 43 | } |
46 | 44 | ||
47 | static struct mtd_notifier notifier = { | 45 | static struct mtd_notifier notifier = { |
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 6c2645e28371..f7e7890e5bc6 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c | |||
@@ -61,7 +61,7 @@ int add_mtd_device(struct mtd_info *mtd) | |||
61 | 61 | ||
62 | /* Some chips always power up locked. Unlock them now */ | 62 | /* Some chips always power up locked. Unlock them now */ |
63 | if ((mtd->flags & MTD_WRITEABLE) | 63 | if ((mtd->flags & MTD_WRITEABLE) |
64 | && (mtd->flags & MTD_STUPID_LOCK) && mtd->unlock) { | 64 | && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) { |
65 | if (mtd->unlock(mtd, 0, mtd->size)) | 65 | if (mtd->unlock(mtd, 0, mtd->size)) |
66 | printk(KERN_WARNING | 66 | printk(KERN_WARNING |
67 | "%s: unlock failed, " | 67 | "%s: unlock failed, " |
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index 20eaf294f620..34681bc91105 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c | |||
@@ -28,19 +28,24 @@ | |||
28 | #include <linux/workqueue.h> | 28 | #include <linux/workqueue.h> |
29 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
30 | #include <linux/wait.h> | 30 | #include <linux/wait.h> |
31 | #include <linux/spinlock.h> | ||
31 | #include <linux/mtd/mtd.h> | 32 | #include <linux/mtd/mtd.h> |
32 | 33 | ||
33 | #define OOPS_PAGE_SIZE 4096 | 34 | #define OOPS_PAGE_SIZE 4096 |
34 | 35 | ||
35 | static struct mtdoops_context { | 36 | struct mtdoops_context { |
36 | int mtd_index; | 37 | int mtd_index; |
37 | struct work_struct work; | 38 | struct work_struct work_erase; |
39 | struct work_struct work_write; | ||
38 | struct mtd_info *mtd; | 40 | struct mtd_info *mtd; |
39 | int oops_pages; | 41 | int oops_pages; |
40 | int nextpage; | 42 | int nextpage; |
41 | int nextcount; | 43 | int nextcount; |
42 | 44 | ||
43 | void *oops_buf; | 45 | void *oops_buf; |
46 | |||
47 | /* writecount and disabling ready are spin lock protected */ | ||
48 | spinlock_t writecount_lock; | ||
44 | int ready; | 49 | int ready; |
45 | int writecount; | 50 | int writecount; |
46 | } oops_cxt; | 51 | } oops_cxt; |
@@ -62,10 +67,7 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset) | |||
62 | erase.mtd = mtd; | 67 | erase.mtd = mtd; |
63 | erase.callback = mtdoops_erase_callback; | 68 | erase.callback = mtdoops_erase_callback; |
64 | erase.addr = offset; | 69 | erase.addr = offset; |
65 | if (mtd->erasesize < OOPS_PAGE_SIZE) | 70 | erase.len = mtd->erasesize; |
66 | erase.len = OOPS_PAGE_SIZE; | ||
67 | else | ||
68 | erase.len = mtd->erasesize; | ||
69 | erase.priv = (u_long)&wait_q; | 71 | erase.priv = (u_long)&wait_q; |
70 | 72 | ||
71 | set_current_state(TASK_INTERRUPTIBLE); | 73 | set_current_state(TASK_INTERRUPTIBLE); |
@@ -87,7 +89,7 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset) | |||
87 | return 0; | 89 | return 0; |
88 | } | 90 | } |
89 | 91 | ||
90 | static int mtdoops_inc_counter(struct mtdoops_context *cxt) | 92 | static void mtdoops_inc_counter(struct mtdoops_context *cxt) |
91 | { | 93 | { |
92 | struct mtd_info *mtd = cxt->mtd; | 94 | struct mtd_info *mtd = cxt->mtd; |
93 | size_t retlen; | 95 | size_t retlen; |
@@ -103,25 +105,30 @@ static int mtdoops_inc_counter(struct mtdoops_context *cxt) | |||
103 | 105 | ||
104 | ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4, | 106 | ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4, |
105 | &retlen, (u_char *) &count); | 107 | &retlen, (u_char *) &count); |
106 | if ((retlen != 4) || (ret < 0)) { | 108 | if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) { |
107 | printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)" | 109 | printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)" |
108 | ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE, | 110 | ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE, |
109 | retlen, ret); | 111 | retlen, ret); |
110 | return 1; | 112 | schedule_work(&cxt->work_erase); |
113 | return; | ||
111 | } | 114 | } |
112 | 115 | ||
113 | /* See if we need to erase the next block */ | 116 | /* See if we need to erase the next block */ |
114 | if (count != 0xffffffff) | 117 | if (count != 0xffffffff) { |
115 | return 1; | 118 | schedule_work(&cxt->work_erase); |
119 | return; | ||
120 | } | ||
116 | 121 | ||
117 | printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n", | 122 | printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n", |
118 | cxt->nextpage, cxt->nextcount); | 123 | cxt->nextpage, cxt->nextcount); |
119 | cxt->ready = 1; | 124 | cxt->ready = 1; |
120 | return 0; | ||
121 | } | 125 | } |
122 | 126 | ||
123 | static void mtdoops_prepare(struct mtdoops_context *cxt) | 127 | /* Scheduled work - when we can't proceed without erasing a block */ |
128 | static void mtdoops_workfunc_erase(struct work_struct *work) | ||
124 | { | 129 | { |
130 | struct mtdoops_context *cxt = | ||
131 | container_of(work, struct mtdoops_context, work_erase); | ||
125 | struct mtd_info *mtd = cxt->mtd; | 132 | struct mtd_info *mtd = cxt->mtd; |
126 | int i = 0, j, ret, mod; | 133 | int i = 0, j, ret, mod; |
127 | 134 | ||
@@ -136,8 +143,14 @@ static void mtdoops_prepare(struct mtdoops_context *cxt) | |||
136 | cxt->nextpage = 0; | 143 | cxt->nextpage = 0; |
137 | } | 144 | } |
138 | 145 | ||
139 | while (mtd->block_isbad && | 146 | while (mtd->block_isbad) { |
140 | mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE)) { | 147 | ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); |
148 | if (!ret) | ||
149 | break; | ||
150 | if (ret < 0) { | ||
151 | printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n"); | ||
152 | return; | ||
153 | } | ||
141 | badblock: | 154 | badblock: |
142 | printk(KERN_WARNING "mtdoops: Bad block at %08x\n", | 155 | printk(KERN_WARNING "mtdoops: Bad block at %08x\n", |
143 | cxt->nextpage * OOPS_PAGE_SIZE); | 156 | cxt->nextpage * OOPS_PAGE_SIZE); |
@@ -154,34 +167,61 @@ badblock: | |||
154 | for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) | 167 | for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) |
155 | ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE); | 168 | ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE); |
156 | 169 | ||
157 | if (ret < 0) { | 170 | if (ret >= 0) { |
158 | if (mtd->block_markbad) | 171 | printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount); |
159 | mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); | 172 | cxt->ready = 1; |
160 | goto badblock; | 173 | return; |
161 | } | 174 | } |
162 | 175 | ||
163 | printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount); | 176 | if (mtd->block_markbad && (ret == -EIO)) { |
164 | 177 | ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE); | |
165 | cxt->ready = 1; | 178 | if (ret < 0) { |
179 | printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n"); | ||
180 | return; | ||
181 | } | ||
182 | } | ||
183 | goto badblock; | ||
166 | } | 184 | } |
167 | 185 | ||
168 | static void mtdoops_workfunc(struct work_struct *work) | 186 | static void mtdoops_workfunc_write(struct work_struct *work) |
169 | { | 187 | { |
170 | struct mtdoops_context *cxt = | 188 | struct mtdoops_context *cxt = |
171 | container_of(work, struct mtdoops_context, work); | 189 | container_of(work, struct mtdoops_context, work_write); |
190 | struct mtd_info *mtd = cxt->mtd; | ||
191 | size_t retlen; | ||
192 | int ret; | ||
172 | 193 | ||
173 | mtdoops_prepare(cxt); | 194 | if (cxt->writecount < OOPS_PAGE_SIZE) |
174 | } | 195 | memset(cxt->oops_buf + cxt->writecount, 0xff, |
196 | OOPS_PAGE_SIZE - cxt->writecount); | ||
197 | |||
198 | ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, | ||
199 | OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); | ||
200 | |||
201 | cxt->writecount = 0; | ||
202 | |||
203 | if ((retlen != OOPS_PAGE_SIZE) || (ret < 0)) | ||
204 | printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n", | ||
205 | cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); | ||
206 | |||
207 | mtdoops_inc_counter(cxt); | ||
208 | } | ||
175 | 209 | ||
176 | static int find_next_position(struct mtdoops_context *cxt) | 210 | static void find_next_position(struct mtdoops_context *cxt) |
177 | { | 211 | { |
178 | struct mtd_info *mtd = cxt->mtd; | 212 | struct mtd_info *mtd = cxt->mtd; |
179 | int page, maxpos = 0; | 213 | int ret, page, maxpos = 0; |
180 | u32 count, maxcount = 0xffffffff; | 214 | u32 count, maxcount = 0xffffffff; |
181 | size_t retlen; | 215 | size_t retlen; |
182 | 216 | ||
183 | for (page = 0; page < cxt->oops_pages; page++) { | 217 | for (page = 0; page < cxt->oops_pages; page++) { |
184 | mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count); | 218 | ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count); |
219 | if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) { | ||
220 | printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)" | ||
221 | ", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret); | ||
222 | continue; | ||
223 | } | ||
224 | |||
185 | if (count == 0xffffffff) | 225 | if (count == 0xffffffff) |
186 | continue; | 226 | continue; |
187 | if (maxcount == 0xffffffff) { | 227 | if (maxcount == 0xffffffff) { |
@@ -205,20 +245,19 @@ static int find_next_position(struct mtdoops_context *cxt) | |||
205 | cxt->ready = 1; | 245 | cxt->ready = 1; |
206 | printk(KERN_DEBUG "mtdoops: Ready %d, %d (first init)\n", | 246 | printk(KERN_DEBUG "mtdoops: Ready %d, %d (first init)\n", |
207 | cxt->nextpage, cxt->nextcount); | 247 | cxt->nextpage, cxt->nextcount); |
208 | return 0; | 248 | return; |
209 | } | 249 | } |
210 | 250 | ||
211 | cxt->nextpage = maxpos; | 251 | cxt->nextpage = maxpos; |
212 | cxt->nextcount = maxcount; | 252 | cxt->nextcount = maxcount; |
213 | 253 | ||
214 | return mtdoops_inc_counter(cxt); | 254 | mtdoops_inc_counter(cxt); |
215 | } | 255 | } |
216 | 256 | ||
217 | 257 | ||
218 | static void mtdoops_notify_add(struct mtd_info *mtd) | 258 | static void mtdoops_notify_add(struct mtd_info *mtd) |
219 | { | 259 | { |
220 | struct mtdoops_context *cxt = &oops_cxt; | 260 | struct mtdoops_context *cxt = &oops_cxt; |
221 | int ret; | ||
222 | 261 | ||
223 | if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) | 262 | if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0) |
224 | return; | 263 | return; |
@@ -229,14 +268,18 @@ static void mtdoops_notify_add(struct mtd_info *mtd) | |||
229 | return; | 268 | return; |
230 | } | 269 | } |
231 | 270 | ||
271 | if (mtd->erasesize < OOPS_PAGE_SIZE) { | ||
272 | printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n", | ||
273 | mtd->index); | ||
274 | return; | ||
275 | } | ||
276 | |||
232 | cxt->mtd = mtd; | 277 | cxt->mtd = mtd; |
233 | cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE; | 278 | cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE; |
234 | 279 | ||
235 | ret = find_next_position(cxt); | 280 | find_next_position(cxt); |
236 | if (ret == 1) | ||
237 | mtdoops_prepare(cxt); | ||
238 | 281 | ||
239 | printk(KERN_DEBUG "mtdoops: Attached to MTD device %d\n", mtd->index); | 282 | printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index); |
240 | } | 283 | } |
241 | 284 | ||
242 | static void mtdoops_notify_remove(struct mtd_info *mtd) | 285 | static void mtdoops_notify_remove(struct mtd_info *mtd) |
@@ -254,31 +297,24 @@ static void mtdoops_console_sync(void) | |||
254 | { | 297 | { |
255 | struct mtdoops_context *cxt = &oops_cxt; | 298 | struct mtdoops_context *cxt = &oops_cxt; |
256 | struct mtd_info *mtd = cxt->mtd; | 299 | struct mtd_info *mtd = cxt->mtd; |
257 | size_t retlen; | 300 | unsigned long flags; |
258 | int ret; | ||
259 | 301 | ||
260 | if (!cxt->ready || !mtd) | 302 | if (!cxt->ready || !mtd || cxt->writecount == 0) |
261 | return; | 303 | return; |
262 | 304 | ||
263 | if (cxt->writecount == 0) | 305 | /* |
306 | * Once ready is 0 and we've held the lock no further writes to the | ||
307 | * buffer will happen | ||
308 | */ | ||
309 | spin_lock_irqsave(&cxt->writecount_lock, flags); | ||
310 | if (!cxt->ready) { | ||
311 | spin_unlock_irqrestore(&cxt->writecount_lock, flags); | ||
264 | return; | 312 | return; |
265 | 313 | } | |
266 | if (cxt->writecount < OOPS_PAGE_SIZE) | ||
267 | memset(cxt->oops_buf + cxt->writecount, 0xff, | ||
268 | OOPS_PAGE_SIZE - cxt->writecount); | ||
269 | |||
270 | ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE, | ||
271 | OOPS_PAGE_SIZE, &retlen, cxt->oops_buf); | ||
272 | cxt->ready = 0; | 314 | cxt->ready = 0; |
273 | cxt->writecount = 0; | 315 | spin_unlock_irqrestore(&cxt->writecount_lock, flags); |
274 | 316 | ||
275 | if ((retlen != OOPS_PAGE_SIZE) || (ret < 0)) | 317 | schedule_work(&cxt->work_write); |
276 | printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n", | ||
277 | cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret); | ||
278 | |||
279 | ret = mtdoops_inc_counter(cxt); | ||
280 | if (ret == 1) | ||
281 | schedule_work(&cxt->work); | ||
282 | } | 318 | } |
283 | 319 | ||
284 | static void | 320 | static void |
@@ -286,6 +322,7 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count) | |||
286 | { | 322 | { |
287 | struct mtdoops_context *cxt = co->data; | 323 | struct mtdoops_context *cxt = co->data; |
288 | struct mtd_info *mtd = cxt->mtd; | 324 | struct mtd_info *mtd = cxt->mtd; |
325 | unsigned long flags; | ||
289 | 326 | ||
290 | if (!oops_in_progress) { | 327 | if (!oops_in_progress) { |
291 | mtdoops_console_sync(); | 328 | mtdoops_console_sync(); |
@@ -295,6 +332,13 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count) | |||
295 | if (!cxt->ready || !mtd) | 332 | if (!cxt->ready || !mtd) |
296 | return; | 333 | return; |
297 | 334 | ||
335 | /* Locking on writecount ensures sequential writes to the buffer */ | ||
336 | spin_lock_irqsave(&cxt->writecount_lock, flags); | ||
337 | |||
338 | /* Check ready status didn't change whilst waiting for the lock */ | ||
339 | if (!cxt->ready) | ||
340 | return; | ||
341 | |||
298 | if (cxt->writecount == 0) { | 342 | if (cxt->writecount == 0) { |
299 | u32 *stamp = cxt->oops_buf; | 343 | u32 *stamp = cxt->oops_buf; |
300 | *stamp = cxt->nextcount; | 344 | *stamp = cxt->nextcount; |
@@ -306,6 +350,11 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count) | |||
306 | 350 | ||
307 | memcpy(cxt->oops_buf + cxt->writecount, s, count); | 351 | memcpy(cxt->oops_buf + cxt->writecount, s, count); |
308 | cxt->writecount += count; | 352 | cxt->writecount += count; |
353 | |||
354 | spin_unlock_irqrestore(&cxt->writecount_lock, flags); | ||
355 | |||
356 | if (cxt->writecount == OOPS_PAGE_SIZE) | ||
357 | mtdoops_console_sync(); | ||
309 | } | 358 | } |
310 | 359 | ||
311 | static int __init mtdoops_console_setup(struct console *co, char *options) | 360 | static int __init mtdoops_console_setup(struct console *co, char *options) |
@@ -331,7 +380,6 @@ static struct console mtdoops_console = { | |||
331 | .write = mtdoops_console_write, | 380 | .write = mtdoops_console_write, |
332 | .setup = mtdoops_console_setup, | 381 | .setup = mtdoops_console_setup, |
333 | .unblank = mtdoops_console_sync, | 382 | .unblank = mtdoops_console_sync, |
334 | .flags = CON_PRINTBUFFER, | ||
335 | .index = -1, | 383 | .index = -1, |
336 | .data = &oops_cxt, | 384 | .data = &oops_cxt, |
337 | }; | 385 | }; |
@@ -344,11 +392,12 @@ static int __init mtdoops_console_init(void) | |||
344 | cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE); | 392 | cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE); |
345 | 393 | ||
346 | if (!cxt->oops_buf) { | 394 | if (!cxt->oops_buf) { |
347 | printk(KERN_ERR "Failed to allocate oops buffer workspace\n"); | 395 | printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n"); |
348 | return -ENOMEM; | 396 | return -ENOMEM; |
349 | } | 397 | } |
350 | 398 | ||
351 | INIT_WORK(&cxt->work, mtdoops_workfunc); | 399 | INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase); |
400 | INIT_WORK(&cxt->work_write, mtdoops_workfunc_write); | ||
352 | 401 | ||
353 | register_console(&mtdoops_console); | 402 | register_console(&mtdoops_console); |
354 | register_mtd_user(&mtdoops_notifier); | 403 | register_mtd_user(&mtdoops_notifier); |
diff --git a/drivers/mtd/nand/at91_nand.c b/drivers/mtd/nand/at91_nand.c index b2a5672df6e0..c9fb2acf4056 100644 --- a/drivers/mtd/nand/at91_nand.c +++ b/drivers/mtd/nand/at91_nand.c | |||
@@ -156,14 +156,14 @@ static int __init at91_nand_probe(struct platform_device *pdev) | |||
156 | } | 156 | } |
157 | 157 | ||
158 | #ifdef CONFIG_MTD_PARTITIONS | 158 | #ifdef CONFIG_MTD_PARTITIONS |
159 | if (host->board->partition_info) | ||
160 | partitions = host->board->partition_info(mtd->size, &num_partitions); | ||
161 | #ifdef CONFIG_MTD_CMDLINE_PARTS | 159 | #ifdef CONFIG_MTD_CMDLINE_PARTS |
162 | else { | 160 | mtd->name = "at91_nand"; |
163 | mtd->name = "at91_nand"; | 161 | num_partitions = parse_mtd_partitions(mtd, part_probes, |
164 | num_partitions = parse_mtd_partitions(mtd, part_probes, &partitions, 0); | 162 | &partitions, 0); |
165 | } | ||
166 | #endif | 163 | #endif |
164 | if (num_partitions <= 0 && host->board->partition_info) | ||
165 | partitions = host->board->partition_info(mtd->size, | ||
166 | &num_partitions); | ||
167 | 167 | ||
168 | if ((!partitions) || (num_partitions == 0)) { | 168 | if ((!partitions) || (num_partitions == 0)) { |
169 | printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n"); | 169 | printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n"); |
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c new file mode 100644 index 000000000000..f86e06934cd8 --- /dev/null +++ b/drivers/mtd/ofpart.c | |||
@@ -0,0 +1,74 @@ | |||
1 | /* | ||
2 | * Flash partitions described by the OF (or flattened) device tree | ||
3 | * | ||
4 | * Copyright (C) 2006 MontaVista Software Inc. | ||
5 | * Author: Vitaly Wool <vwool@ru.mvista.com> | ||
6 | * | ||
7 | * Revised to handle newer style flash binding by: | ||
8 | * Copyright (C) 2007 David Gibson, IBM Corporation. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the | ||
12 | * Free Software Foundation; either version 2 of the License, or (at your | ||
13 | * option) any later version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/of.h> | ||
19 | #include <linux/mtd/mtd.h> | ||
20 | #include <linux/mtd/partitions.h> | ||
21 | |||
22 | int __devinit of_mtd_parse_partitions(struct device *dev, | ||
23 | struct mtd_info *mtd, | ||
24 | struct device_node *node, | ||
25 | struct mtd_partition **pparts) | ||
26 | { | ||
27 | const char *partname; | ||
28 | struct device_node *pp; | ||
29 | int nr_parts, i; | ||
30 | |||
31 | /* First count the subnodes */ | ||
32 | pp = NULL; | ||
33 | nr_parts = 0; | ||
34 | while ((pp = of_get_next_child(node, pp))) | ||
35 | nr_parts++; | ||
36 | |||
37 | if (nr_parts == 0) | ||
38 | return 0; | ||
39 | |||
40 | *pparts = kzalloc(nr_parts * sizeof(**pparts), GFP_KERNEL); | ||
41 | if (!*pparts) | ||
42 | return -ENOMEM; | ||
43 | |||
44 | pp = NULL; | ||
45 | i = 0; | ||
46 | while ((pp = of_get_next_child(node, pp))) { | ||
47 | const u32 *reg; | ||
48 | int len; | ||
49 | |||
50 | reg = of_get_property(pp, "reg", &len); | ||
51 | if (!reg || (len != 2 * sizeof(u32))) { | ||
52 | of_node_put(pp); | ||
53 | dev_err(dev, "Invalid 'reg' on %s\n", node->full_name); | ||
54 | kfree(*pparts); | ||
55 | *pparts = NULL; | ||
56 | return -EINVAL; | ||
57 | } | ||
58 | (*pparts)[i].offset = reg[0]; | ||
59 | (*pparts)[i].size = reg[1]; | ||
60 | |||
61 | partname = of_get_property(pp, "label", &len); | ||
62 | if (!partname) | ||
63 | partname = of_get_property(pp, "name", &len); | ||
64 | (*pparts)[i].name = (char *)partname; | ||
65 | |||
66 | if (of_get_property(pp, "read-only", &len)) | ||
67 | (*pparts)[i].mask_flags = MTD_WRITEABLE; | ||
68 | |||
69 | i++; | ||
70 | } | ||
71 | |||
72 | return nr_parts; | ||
73 | } | ||
74 | EXPORT_SYMBOL(of_mtd_parse_partitions); | ||
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index a60a3a24c2a1..5ec13dc4705b 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c | |||
@@ -268,7 +268,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf, | |||
268 | struct ubi_volume_desc *desc = file->private_data; | 268 | struct ubi_volume_desc *desc = file->private_data; |
269 | struct ubi_volume *vol = desc->vol; | 269 | struct ubi_volume *vol = desc->vol; |
270 | struct ubi_device *ubi = vol->ubi; | 270 | struct ubi_device *ubi = vol->ubi; |
271 | int lnum, off, len, tbuf_size, vol_id = vol->vol_id, err = 0; | 271 | int lnum, off, len, tbuf_size, err = 0; |
272 | size_t count_save = count; | 272 | size_t count_save = count; |
273 | char *tbuf; | 273 | char *tbuf; |
274 | uint64_t tmp; | 274 | uint64_t tmp; |
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index c7b0afc9d280..c57e8eff9866 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c | |||
@@ -769,7 +769,7 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi, | |||
769 | */ | 769 | */ |
770 | static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum) | 770 | static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum) |
771 | { | 771 | { |
772 | long long ec; | 772 | long long uninitialized_var(ec); |
773 | int err, bitflips = 0, vol_id, ec_corr = 0; | 773 | int err, bitflips = 0, vol_id, ec_corr = 0; |
774 | 774 | ||
775 | dbg_bld("scan PEB %d", pnum); | 775 | dbg_bld("scan PEB %d", pnum); |
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 1142aabcfc8c..0d44ad95ab84 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
@@ -743,7 +743,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
743 | int cancel) | 743 | int cancel) |
744 | { | 744 | { |
745 | int err, put = 0, scrubbing = 0, protect = 0; | 745 | int err, put = 0, scrubbing = 0, protect = 0; |
746 | struct ubi_wl_prot_entry *pe; | 746 | struct ubi_wl_prot_entry *uninitialized_var(pe); |
747 | struct ubi_wl_entry *e1, *e2; | 747 | struct ubi_wl_entry *e1, *e2; |
748 | struct ubi_vid_hdr *vid_hdr; | 748 | struct ubi_vid_hdr *vid_hdr; |
749 | 749 | ||