diff options
Diffstat (limited to 'drivers/mtd')
38 files changed, 3056 insertions, 632 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index 77414702cb00..b4567c35a322 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig | |||
@@ -33,14 +33,6 @@ config MTD_TESTS | |||
33 | should normally be compiled as kernel modules. The modules perform | 33 | should normally be compiled as kernel modules. The modules perform |
34 | various checks and verifications when loaded. | 34 | various checks and verifications when loaded. |
35 | 35 | ||
36 | config MTD_CONCAT | ||
37 | tristate "MTD concatenating support" | ||
38 | help | ||
39 | Support for concatenating several MTD devices into a single | ||
40 | (virtual) one. This allows you to have -for example- a JFFS(2) | ||
41 | file system spanning multiple physical flash chips. If unsure, | ||
42 | say 'Y'. | ||
43 | |||
44 | config MTD_PARTITIONS | 36 | config MTD_PARTITIONS |
45 | bool "MTD partitioning support" | 37 | bool "MTD partitioning support" |
46 | help | 38 | help |
@@ -333,6 +325,16 @@ config MTD_OOPS | |||
333 | To use, add console=ttyMTDx to the kernel command line, | 325 | To use, add console=ttyMTDx to the kernel command line, |
334 | where x is the MTD device number to use. | 326 | where x is the MTD device number to use. |
335 | 327 | ||
328 | config MTD_SWAP | ||
329 | tristate "Swap on MTD device support" | ||
330 | depends on MTD && SWAP | ||
331 | select MTD_BLKDEVS | ||
332 | help | ||
333 | Provides volatile block device driver on top of mtd partition | ||
334 | suitable for swapping. The mapping of written blocks is not saved. | ||
335 | The driver provides wear leveling by storing erase counter into the | ||
336 | OOB. | ||
337 | |||
336 | source "drivers/mtd/chips/Kconfig" | 338 | source "drivers/mtd/chips/Kconfig" |
337 | 339 | ||
338 | source "drivers/mtd/maps/Kconfig" | 340 | source "drivers/mtd/maps/Kconfig" |
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile index d4e7f25b1ebb..d578095fb255 100644 --- a/drivers/mtd/Makefile +++ b/drivers/mtd/Makefile | |||
@@ -4,11 +4,10 @@ | |||
4 | 4 | ||
5 | # Core functionality. | 5 | # Core functionality. |
6 | obj-$(CONFIG_MTD) += mtd.o | 6 | obj-$(CONFIG_MTD) += mtd.o |
7 | mtd-y := mtdcore.o mtdsuper.o | 7 | mtd-y := mtdcore.o mtdsuper.o mtdconcat.o |
8 | mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o | 8 | mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o |
9 | mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o | 9 | mtd-$(CONFIG_MTD_OF_PARTS) += ofpart.o |
10 | 10 | ||
11 | obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o | ||
12 | obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o | 11 | obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o |
13 | obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o | 12 | obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o |
14 | obj-$(CONFIG_MTD_AFS_PARTS) += afs.o | 13 | obj-$(CONFIG_MTD_AFS_PARTS) += afs.o |
@@ -26,6 +25,7 @@ obj-$(CONFIG_RFD_FTL) += rfd_ftl.o | |||
26 | obj-$(CONFIG_SSFDC) += ssfdc.o | 25 | obj-$(CONFIG_SSFDC) += ssfdc.o |
27 | obj-$(CONFIG_SM_FTL) += sm_ftl.o | 26 | obj-$(CONFIG_SM_FTL) += sm_ftl.o |
28 | obj-$(CONFIG_MTD_OOPS) += mtdoops.o | 27 | obj-$(CONFIG_MTD_OOPS) += mtdoops.o |
28 | obj-$(CONFIG_MTD_SWAP) += mtdswap.o | ||
29 | 29 | ||
30 | nftl-objs := nftlcore.o nftlmount.o | 30 | nftl-objs := nftlcore.o nftlmount.o |
31 | inftl-objs := inftlcore.o inftlmount.o | 31 | inftl-objs := inftlcore.o inftlmount.o |
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 4aaa88f8ab5f..092aef11120c 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c | |||
@@ -455,7 +455,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary) | |||
455 | mtd->flags = MTD_CAP_NORFLASH; | 455 | mtd->flags = MTD_CAP_NORFLASH; |
456 | mtd->name = map->name; | 456 | mtd->name = map->name; |
457 | mtd->writesize = 1; | 457 | mtd->writesize = 1; |
458 | mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize; | 458 | mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; |
459 | 459 | ||
460 | mtd->reboot_notifier.notifier_call = cfi_intelext_reboot; | 460 | mtd->reboot_notifier.notifier_call = cfi_intelext_reboot; |
461 | 461 | ||
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index f072fcfde04e..f9a5331e9445 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c | |||
@@ -349,6 +349,7 @@ static struct cfi_fixup cfi_fixup_table[] = { | |||
349 | { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, | 349 | { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri }, |
350 | #ifdef AMD_BOOTLOC_BUG | 350 | #ifdef AMD_BOOTLOC_BUG |
351 | { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, | 351 | { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock }, |
352 | { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock }, | ||
352 | { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, | 353 | { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock }, |
353 | #endif | 354 | #endif |
354 | { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, | 355 | { CFI_MFR_AMD, 0x0050, fixup_use_secsi }, |
@@ -440,7 +441,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) | |||
440 | mtd->flags = MTD_CAP_NORFLASH; | 441 | mtd->flags = MTD_CAP_NORFLASH; |
441 | mtd->name = map->name; | 442 | mtd->name = map->name; |
442 | mtd->writesize = 1; | 443 | mtd->writesize = 1; |
443 | mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize; | 444 | mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; |
444 | 445 | ||
445 | DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n", | 446 | DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n", |
446 | __func__, mtd->writebufsize); | 447 | __func__, mtd->writebufsize); |
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c index c04b7658abe9..ed56ad3884fb 100644 --- a/drivers/mtd/chips/cfi_cmdset_0020.c +++ b/drivers/mtd/chips/cfi_cmdset_0020.c | |||
@@ -238,7 +238,7 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map) | |||
238 | mtd->resume = cfi_staa_resume; | 238 | mtd->resume = cfi_staa_resume; |
239 | mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE; | 239 | mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE; |
240 | mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */ | 240 | mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */ |
241 | mtd->writebufsize = 1 << cfi->cfiq->MaxBufWriteSize; | 241 | mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; |
242 | map->fldrv = &cfi_staa_chipdrv; | 242 | map->fldrv = &cfi_staa_chipdrv; |
243 | __module_get(THIS_MODULE); | 243 | __module_get(THIS_MODULE); |
244 | mtd->name = map->name; | 244 | mtd->name = map->name; |
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index e4eba6cc1b2e..3fb981d4bb51 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c | |||
@@ -655,7 +655,8 @@ static const struct spi_device_id m25p_ids[] = { | |||
655 | { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, | 655 | { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, |
656 | { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, | 656 | { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, |
657 | 657 | ||
658 | /* EON -- en25pxx */ | 658 | /* EON -- en25xxx */ |
659 | { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) }, | ||
659 | { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, | 660 | { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, |
660 | { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, | 661 | { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, |
661 | 662 | ||
@@ -728,6 +729,8 @@ static const struct spi_device_id m25p_ids[] = { | |||
728 | { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) }, | 729 | { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) }, |
729 | { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) }, | 730 | { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) }, |
730 | 731 | ||
732 | { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) }, | ||
733 | |||
731 | /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ | 734 | /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ |
732 | { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) }, | 735 | { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) }, |
733 | { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) }, | 736 | { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) }, |
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c index 26a6e809013d..1483e18971ce 100644 --- a/drivers/mtd/devices/mtdram.c +++ b/drivers/mtd/devices/mtdram.c | |||
@@ -121,6 +121,7 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address, | |||
121 | mtd->flags = MTD_CAP_RAM; | 121 | mtd->flags = MTD_CAP_RAM; |
122 | mtd->size = size; | 122 | mtd->size = size; |
123 | mtd->writesize = 1; | 123 | mtd->writesize = 1; |
124 | mtd->writebufsize = 64; /* Mimic CFI NOR flashes */ | ||
124 | mtd->erasesize = MTDRAM_ERASE_SIZE; | 125 | mtd->erasesize = MTDRAM_ERASE_SIZE; |
125 | mtd->priv = mapped_address; | 126 | mtd->priv = mapped_address; |
126 | 127 | ||
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c index 52393282eaf1..8d28fa02a5a2 100644 --- a/drivers/mtd/devices/phram.c +++ b/drivers/mtd/devices/phram.c | |||
@@ -117,6 +117,7 @@ static void unregister_devices(void) | |||
117 | list_for_each_entry_safe(this, safe, &phram_list, list) { | 117 | list_for_each_entry_safe(this, safe, &phram_list, list) { |
118 | del_mtd_device(&this->mtd); | 118 | del_mtd_device(&this->mtd); |
119 | iounmap(this->mtd.priv); | 119 | iounmap(this->mtd.priv); |
120 | kfree(this->mtd.name); | ||
120 | kfree(this); | 121 | kfree(this); |
121 | } | 122 | } |
122 | } | 123 | } |
@@ -275,6 +276,8 @@ static int phram_setup(const char *val, struct kernel_param *kp) | |||
275 | ret = register_device(name, start, len); | 276 | ret = register_device(name, start, len); |
276 | if (!ret) | 277 | if (!ret) |
277 | pr_info("%s device: %#x at %#x\n", name, len, start); | 278 | pr_info("%s device: %#x at %#x\n", name, len, start); |
279 | else | ||
280 | kfree(name); | ||
278 | 281 | ||
279 | return ret; | 282 | return ret; |
280 | } | 283 | } |
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 5d37d315fa98..44b1f46458ca 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig | |||
@@ -114,7 +114,7 @@ config MTD_SUN_UFLASH | |||
114 | 114 | ||
115 | config MTD_SC520CDP | 115 | config MTD_SC520CDP |
116 | tristate "CFI Flash device mapped on AMD SC520 CDP" | 116 | tristate "CFI Flash device mapped on AMD SC520 CDP" |
117 | depends on X86 && MTD_CFI && MTD_CONCAT | 117 | depends on X86 && MTD_CFI |
118 | help | 118 | help |
119 | The SC520 CDP board has two banks of CFI-compliant chips and one | 119 | The SC520 CDP board has two banks of CFI-compliant chips and one |
120 | Dual-in-line JEDEC chip. This 'mapping' driver supports that | 120 | Dual-in-line JEDEC chip. This 'mapping' driver supports that |
@@ -262,7 +262,7 @@ config MTD_BCM963XX | |||
262 | 262 | ||
263 | config MTD_DILNETPC | 263 | config MTD_DILNETPC |
264 | tristate "CFI Flash device mapped on DIL/Net PC" | 264 | tristate "CFI Flash device mapped on DIL/Net PC" |
265 | depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN | 265 | depends on X86 && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN |
266 | help | 266 | help |
267 | MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP". | 267 | MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP". |
268 | For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm> | 268 | For details, see <http://www.ssv-embedded.de/ssv/pc104/p169.htm> |
@@ -552,4 +552,13 @@ config MTD_PISMO | |||
552 | 552 | ||
553 | When built as a module, it will be called pismo.ko | 553 | When built as a module, it will be called pismo.ko |
554 | 554 | ||
555 | config MTD_LATCH_ADDR | ||
556 | tristate "Latch-assisted Flash Chip Support" | ||
557 | depends on MTD_COMPLEX_MAPPINGS | ||
558 | help | ||
559 | Map driver which allows flashes to be partially physically addressed | ||
560 | and have the upper address lines set by a board specific code. | ||
561 | |||
562 | If compiled as a module, it will be called latch-addr-flash. | ||
563 | |||
555 | endmenu | 564 | endmenu |
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index c7869c7a6b18..08533bd5cba7 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile | |||
@@ -59,3 +59,4 @@ obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o | |||
59 | obj-$(CONFIG_MTD_VMU) += vmu-flash.o | 59 | obj-$(CONFIG_MTD_VMU) += vmu-flash.o |
60 | obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o | 60 | obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o |
61 | obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o | 61 | obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o |
62 | obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o | ||
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c index c09f4f57093e..e5f645b775ad 100644 --- a/drivers/mtd/maps/ceiva.c +++ b/drivers/mtd/maps/ceiva.c | |||
@@ -194,16 +194,10 @@ static int __init clps_setup_mtd(struct clps_info *clps, int nr, struct mtd_info | |||
194 | * We detected multiple devices. Concatenate | 194 | * We detected multiple devices. Concatenate |
195 | * them together. | 195 | * them together. |
196 | */ | 196 | */ |
197 | #ifdef CONFIG_MTD_CONCAT | ||
198 | *rmtd = mtd_concat_create(subdev, found, | 197 | *rmtd = mtd_concat_create(subdev, found, |
199 | "clps flash"); | 198 | "clps flash"); |
200 | if (*rmtd == NULL) | 199 | if (*rmtd == NULL) |
201 | ret = -ENXIO; | 200 | ret = -ENXIO; |
202 | #else | ||
203 | printk(KERN_ERR "clps flash: multiple devices " | ||
204 | "found but MTD concat support disabled.\n"); | ||
205 | ret = -ENXIO; | ||
206 | #endif | ||
207 | } | 201 | } |
208 | } | 202 | } |
209 | 203 | ||
diff --git a/drivers/mtd/maps/integrator-flash.c b/drivers/mtd/maps/integrator-flash.c index 2aac41bde8b3..e22ff5adbbf4 100644 --- a/drivers/mtd/maps/integrator-flash.c +++ b/drivers/mtd/maps/integrator-flash.c | |||
@@ -202,7 +202,6 @@ static int armflash_probe(struct platform_device *dev) | |||
202 | if (info->nr_subdev == 1) | 202 | if (info->nr_subdev == 1) |
203 | info->mtd = info->subdev[0].mtd; | 203 | info->mtd = info->subdev[0].mtd; |
204 | else if (info->nr_subdev > 1) { | 204 | else if (info->nr_subdev > 1) { |
205 | #ifdef CONFIG_MTD_CONCAT | ||
206 | struct mtd_info *cdev[info->nr_subdev]; | 205 | struct mtd_info *cdev[info->nr_subdev]; |
207 | 206 | ||
208 | /* | 207 | /* |
@@ -215,11 +214,6 @@ static int armflash_probe(struct platform_device *dev) | |||
215 | dev_name(&dev->dev)); | 214 | dev_name(&dev->dev)); |
216 | if (info->mtd == NULL) | 215 | if (info->mtd == NULL) |
217 | err = -ENXIO; | 216 | err = -ENXIO; |
218 | #else | ||
219 | printk(KERN_ERR "armflash: multiple devices found but " | ||
220 | "MTD concat support disabled.\n"); | ||
221 | err = -ENXIO; | ||
222 | #endif | ||
223 | } | 217 | } |
224 | 218 | ||
225 | if (err < 0) | 219 | if (err < 0) |
@@ -244,10 +238,8 @@ static int armflash_probe(struct platform_device *dev) | |||
244 | cleanup: | 238 | cleanup: |
245 | if (info->mtd) { | 239 | if (info->mtd) { |
246 | del_mtd_partitions(info->mtd); | 240 | del_mtd_partitions(info->mtd); |
247 | #ifdef CONFIG_MTD_CONCAT | ||
248 | if (info->mtd != info->subdev[0].mtd) | 241 | if (info->mtd != info->subdev[0].mtd) |
249 | mtd_concat_destroy(info->mtd); | 242 | mtd_concat_destroy(info->mtd); |
250 | #endif | ||
251 | } | 243 | } |
252 | kfree(info->parts); | 244 | kfree(info->parts); |
253 | subdev_err: | 245 | subdev_err: |
@@ -272,10 +264,8 @@ static int armflash_remove(struct platform_device *dev) | |||
272 | if (info) { | 264 | if (info) { |
273 | if (info->mtd) { | 265 | if (info->mtd) { |
274 | del_mtd_partitions(info->mtd); | 266 | del_mtd_partitions(info->mtd); |
275 | #ifdef CONFIG_MTD_CONCAT | ||
276 | if (info->mtd != info->subdev[0].mtd) | 267 | if (info->mtd != info->subdev[0].mtd) |
277 | mtd_concat_destroy(info->mtd); | 268 | mtd_concat_destroy(info->mtd); |
278 | #endif | ||
279 | } | 269 | } |
280 | kfree(info->parts); | 270 | kfree(info->parts); |
281 | 271 | ||
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c new file mode 100644 index 000000000000..ee2548085334 --- /dev/null +++ b/drivers/mtd/maps/latch-addr-flash.c | |||
@@ -0,0 +1,272 @@ | |||
1 | /* | ||
2 | * Interface for NOR flash driver whose high address lines are latched | ||
3 | * | ||
4 | * Copyright © 2000 Nicolas Pitre <nico@cam.org> | ||
5 | * Copyright © 2005-2008 Analog Devices Inc. | ||
6 | * Copyright © 2008 MontaVista Software, Inc. <source@mvista.com> | ||
7 | * | ||
8 | * This file is licensed under the terms of the GNU General Public License | ||
9 | * version 2. This program is licensed "as is" without any warranty of any | ||
10 | * kind, whether express or implied. | ||
11 | */ | ||
12 | |||
13 | #include <linux/init.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/mtd/mtd.h> | ||
17 | #include <linux/mtd/map.h> | ||
18 | #include <linux/mtd/partitions.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | #include <linux/mtd/latch-addr-flash.h> | ||
21 | #include <linux/slab.h> | ||
22 | |||
23 | #define DRIVER_NAME "latch-addr-flash" | ||
24 | |||
25 | struct latch_addr_flash_info { | ||
26 | struct mtd_info *mtd; | ||
27 | struct map_info map; | ||
28 | struct resource *res; | ||
29 | |||
30 | void (*set_window)(unsigned long offset, void *data); | ||
31 | void *data; | ||
32 | |||
33 | /* cache; could be found out of res */ | ||
34 | unsigned long win_mask; | ||
35 | |||
36 | int nr_parts; | ||
37 | struct mtd_partition *parts; | ||
38 | |||
39 | spinlock_t lock; | ||
40 | }; | ||
41 | |||
42 | static map_word lf_read(struct map_info *map, unsigned long ofs) | ||
43 | { | ||
44 | struct latch_addr_flash_info *info; | ||
45 | map_word datum; | ||
46 | |||
47 | info = (struct latch_addr_flash_info *)map->map_priv_1; | ||
48 | |||
49 | spin_lock(&info->lock); | ||
50 | |||
51 | info->set_window(ofs, info->data); | ||
52 | datum = inline_map_read(map, info->win_mask & ofs); | ||
53 | |||
54 | spin_unlock(&info->lock); | ||
55 | |||
56 | return datum; | ||
57 | } | ||
58 | |||
59 | static void lf_write(struct map_info *map, map_word datum, unsigned long ofs) | ||
60 | { | ||
61 | struct latch_addr_flash_info *info; | ||
62 | |||
63 | info = (struct latch_addr_flash_info *)map->map_priv_1; | ||
64 | |||
65 | spin_lock(&info->lock); | ||
66 | |||
67 | info->set_window(ofs, info->data); | ||
68 | inline_map_write(map, datum, info->win_mask & ofs); | ||
69 | |||
70 | spin_unlock(&info->lock); | ||
71 | } | ||
72 | |||
73 | static void lf_copy_from(struct map_info *map, void *to, | ||
74 | unsigned long from, ssize_t len) | ||
75 | { | ||
76 | struct latch_addr_flash_info *info = | ||
77 | (struct latch_addr_flash_info *) map->map_priv_1; | ||
78 | unsigned n; | ||
79 | |||
80 | while (len > 0) { | ||
81 | n = info->win_mask + 1 - (from & info->win_mask); | ||
82 | if (n > len) | ||
83 | n = len; | ||
84 | |||
85 | spin_lock(&info->lock); | ||
86 | |||
87 | info->set_window(from, info->data); | ||
88 | memcpy_fromio(to, map->virt + (from & info->win_mask), n); | ||
89 | |||
90 | spin_unlock(&info->lock); | ||
91 | |||
92 | to += n; | ||
93 | from += n; | ||
94 | len -= n; | ||
95 | } | ||
96 | } | ||
97 | |||
98 | static char *rom_probe_types[] = { "cfi_probe", NULL }; | ||
99 | |||
100 | static char *part_probe_types[] = { "cmdlinepart", NULL }; | ||
101 | |||
102 | static int latch_addr_flash_remove(struct platform_device *dev) | ||
103 | { | ||
104 | struct latch_addr_flash_info *info; | ||
105 | struct latch_addr_flash_data *latch_addr_data; | ||
106 | |||
107 | info = platform_get_drvdata(dev); | ||
108 | if (info == NULL) | ||
109 | return 0; | ||
110 | platform_set_drvdata(dev, NULL); | ||
111 | |||
112 | latch_addr_data = dev->dev.platform_data; | ||
113 | |||
114 | if (info->mtd != NULL) { | ||
115 | if (mtd_has_partitions()) { | ||
116 | if (info->nr_parts) { | ||
117 | del_mtd_partitions(info->mtd); | ||
118 | kfree(info->parts); | ||
119 | } else if (latch_addr_data->nr_parts) { | ||
120 | del_mtd_partitions(info->mtd); | ||
121 | } else { | ||
122 | del_mtd_device(info->mtd); | ||
123 | } | ||
124 | } else { | ||
125 | del_mtd_device(info->mtd); | ||
126 | } | ||
127 | map_destroy(info->mtd); | ||
128 | } | ||
129 | |||
130 | if (info->map.virt != NULL) | ||
131 | iounmap(info->map.virt); | ||
132 | |||
133 | if (info->res != NULL) | ||
134 | release_mem_region(info->res->start, resource_size(info->res)); | ||
135 | |||
136 | kfree(info); | ||
137 | |||
138 | if (latch_addr_data->done) | ||
139 | latch_addr_data->done(latch_addr_data->data); | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | static int __devinit latch_addr_flash_probe(struct platform_device *dev) | ||
145 | { | ||
146 | struct latch_addr_flash_data *latch_addr_data; | ||
147 | struct latch_addr_flash_info *info; | ||
148 | resource_size_t win_base = dev->resource->start; | ||
149 | resource_size_t win_size = resource_size(dev->resource); | ||
150 | char **probe_type; | ||
151 | int chipsel; | ||
152 | int err; | ||
153 | |||
154 | latch_addr_data = dev->dev.platform_data; | ||
155 | if (latch_addr_data == NULL) | ||
156 | return -ENODEV; | ||
157 | |||
158 | pr_notice("latch-addr platform flash device: %#llx byte " | ||
159 | "window at %#.8llx\n", | ||
160 | (unsigned long long)win_size, (unsigned long long)win_base); | ||
161 | |||
162 | chipsel = dev->id; | ||
163 | |||
164 | if (latch_addr_data->init) { | ||
165 | err = latch_addr_data->init(latch_addr_data->data, chipsel); | ||
166 | if (err != 0) | ||
167 | return err; | ||
168 | } | ||
169 | |||
170 | info = kzalloc(sizeof(struct latch_addr_flash_info), GFP_KERNEL); | ||
171 | if (info == NULL) { | ||
172 | err = -ENOMEM; | ||
173 | goto done; | ||
174 | } | ||
175 | |||
176 | platform_set_drvdata(dev, info); | ||
177 | |||
178 | info->res = request_mem_region(win_base, win_size, DRIVER_NAME); | ||
179 | if (info->res == NULL) { | ||
180 | dev_err(&dev->dev, "Could not reserve memory region\n"); | ||
181 | err = -EBUSY; | ||
182 | goto free_info; | ||
183 | } | ||
184 | |||
185 | info->map.name = DRIVER_NAME; | ||
186 | info->map.size = latch_addr_data->size; | ||
187 | info->map.bankwidth = latch_addr_data->width; | ||
188 | |||
189 | info->map.phys = NO_XIP; | ||
190 | info->map.virt = ioremap(win_base, win_size); | ||
191 | if (!info->map.virt) { | ||
192 | err = -ENOMEM; | ||
193 | goto free_res; | ||
194 | } | ||
195 | |||
196 | info->map.map_priv_1 = (unsigned long)info; | ||
197 | |||
198 | info->map.read = lf_read; | ||
199 | info->map.copy_from = lf_copy_from; | ||
200 | info->map.write = lf_write; | ||
201 | info->set_window = latch_addr_data->set_window; | ||
202 | info->data = latch_addr_data->data; | ||
203 | info->win_mask = win_size - 1; | ||
204 | |||
205 | spin_lock_init(&info->lock); | ||
206 | |||
207 | for (probe_type = rom_probe_types; !info->mtd && *probe_type; | ||
208 | probe_type++) | ||
209 | info->mtd = do_map_probe(*probe_type, &info->map); | ||
210 | |||
211 | if (info->mtd == NULL) { | ||
212 | dev_err(&dev->dev, "map_probe failed\n"); | ||
213 | err = -ENODEV; | ||
214 | goto iounmap; | ||
215 | } | ||
216 | info->mtd->owner = THIS_MODULE; | ||
217 | |||
218 | if (mtd_has_partitions()) { | ||
219 | |||
220 | err = parse_mtd_partitions(info->mtd, | ||
221 | (const char **)part_probe_types, | ||
222 | &info->parts, 0); | ||
223 | if (err > 0) { | ||
224 | add_mtd_partitions(info->mtd, info->parts, err); | ||
225 | return 0; | ||
226 | } | ||
227 | if (latch_addr_data->nr_parts) { | ||
228 | pr_notice("Using latch-addr-flash partition information\n"); | ||
229 | add_mtd_partitions(info->mtd, latch_addr_data->parts, | ||
230 | latch_addr_data->nr_parts); | ||
231 | return 0; | ||
232 | } | ||
233 | } | ||
234 | add_mtd_device(info->mtd); | ||
235 | return 0; | ||
236 | |||
237 | iounmap: | ||
238 | iounmap(info->map.virt); | ||
239 | free_res: | ||
240 | release_mem_region(info->res->start, resource_size(info->res)); | ||
241 | free_info: | ||
242 | kfree(info); | ||
243 | done: | ||
244 | if (latch_addr_data->done) | ||
245 | latch_addr_data->done(latch_addr_data->data); | ||
246 | return err; | ||
247 | } | ||
248 | |||
249 | static struct platform_driver latch_addr_flash_driver = { | ||
250 | .probe = latch_addr_flash_probe, | ||
251 | .remove = __devexit_p(latch_addr_flash_remove), | ||
252 | .driver = { | ||
253 | .name = DRIVER_NAME, | ||
254 | }, | ||
255 | }; | ||
256 | |||
257 | static int __init latch_addr_flash_init(void) | ||
258 | { | ||
259 | return platform_driver_register(&latch_addr_flash_driver); | ||
260 | } | ||
261 | module_init(latch_addr_flash_init); | ||
262 | |||
263 | static void __exit latch_addr_flash_exit(void) | ||
264 | { | ||
265 | platform_driver_unregister(&latch_addr_flash_driver); | ||
266 | } | ||
267 | module_exit(latch_addr_flash_exit); | ||
268 | |||
269 | MODULE_AUTHOR("David Griego <dgriego@mvista.com>"); | ||
270 | MODULE_DESCRIPTION("MTD map driver for flashes addressed physically with upper " | ||
271 | "address lines being set board specifically"); | ||
272 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c index 4c18b98a3110..7522df4f71f1 100644 --- a/drivers/mtd/maps/physmap.c +++ b/drivers/mtd/maps/physmap.c | |||
@@ -59,10 +59,8 @@ static int physmap_flash_remove(struct platform_device *dev) | |||
59 | #else | 59 | #else |
60 | del_mtd_device(info->cmtd); | 60 | del_mtd_device(info->cmtd); |
61 | #endif | 61 | #endif |
62 | #ifdef CONFIG_MTD_CONCAT | ||
63 | if (info->cmtd != info->mtd[0]) | 62 | if (info->cmtd != info->mtd[0]) |
64 | mtd_concat_destroy(info->cmtd); | 63 | mtd_concat_destroy(info->cmtd); |
65 | #endif | ||
66 | } | 64 | } |
67 | 65 | ||
68 | for (i = 0; i < MAX_RESOURCES; i++) { | 66 | for (i = 0; i < MAX_RESOURCES; i++) { |
@@ -159,15 +157,9 @@ static int physmap_flash_probe(struct platform_device *dev) | |||
159 | /* | 157 | /* |
160 | * We detected multiple devices. Concatenate them together. | 158 | * We detected multiple devices. Concatenate them together. |
161 | */ | 159 | */ |
162 | #ifdef CONFIG_MTD_CONCAT | ||
163 | info->cmtd = mtd_concat_create(info->mtd, devices_found, dev_name(&dev->dev)); | 160 | info->cmtd = mtd_concat_create(info->mtd, devices_found, dev_name(&dev->dev)); |
164 | if (info->cmtd == NULL) | 161 | if (info->cmtd == NULL) |
165 | err = -ENXIO; | 162 | err = -ENXIO; |
166 | #else | ||
167 | printk(KERN_ERR "physmap-flash: multiple devices " | ||
168 | "found but MTD concat support disabled.\n"); | ||
169 | err = -ENXIO; | ||
170 | #endif | ||
171 | } | 163 | } |
172 | if (err) | 164 | if (err) |
173 | goto err_out; | 165 | goto err_out; |
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index 3db0cb083d31..bd483f0c57e1 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c | |||
@@ -104,12 +104,10 @@ static int of_flash_remove(struct platform_device *dev) | |||
104 | return 0; | 104 | return 0; |
105 | dev_set_drvdata(&dev->dev, NULL); | 105 | dev_set_drvdata(&dev->dev, NULL); |
106 | 106 | ||
107 | #ifdef CONFIG_MTD_CONCAT | ||
108 | if (info->cmtd != info->list[0].mtd) { | 107 | if (info->cmtd != info->list[0].mtd) { |
109 | del_mtd_device(info->cmtd); | 108 | del_mtd_device(info->cmtd); |
110 | mtd_concat_destroy(info->cmtd); | 109 | mtd_concat_destroy(info->cmtd); |
111 | } | 110 | } |
112 | #endif | ||
113 | 111 | ||
114 | if (info->cmtd) { | 112 | if (info->cmtd) { |
115 | if (OF_FLASH_PARTS(info)) { | 113 | if (OF_FLASH_PARTS(info)) { |
@@ -337,16 +335,10 @@ static int __devinit of_flash_probe(struct platform_device *dev) | |||
337 | /* | 335 | /* |
338 | * We detected multiple devices. Concatenate them together. | 336 | * We detected multiple devices. Concatenate them together. |
339 | */ | 337 | */ |
340 | #ifdef CONFIG_MTD_CONCAT | ||
341 | info->cmtd = mtd_concat_create(mtd_list, info->list_size, | 338 | info->cmtd = mtd_concat_create(mtd_list, info->list_size, |
342 | dev_name(&dev->dev)); | 339 | dev_name(&dev->dev)); |
343 | if (info->cmtd == NULL) | 340 | if (info->cmtd == NULL) |
344 | err = -ENXIO; | 341 | err = -ENXIO; |
345 | #else | ||
346 | printk(KERN_ERR "physmap_of: multiple devices " | ||
347 | "found but MTD concat support disabled.\n"); | ||
348 | err = -ENXIO; | ||
349 | #endif | ||
350 | } | 342 | } |
351 | if (err) | 343 | if (err) |
352 | goto err_out; | 344 | goto err_out; |
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c index f3af87e08ecd..da875908ea8e 100644 --- a/drivers/mtd/maps/sa1100-flash.c +++ b/drivers/mtd/maps/sa1100-flash.c | |||
@@ -232,10 +232,8 @@ static void sa1100_destroy(struct sa_info *info, struct flash_platform_data *pla | |||
232 | else | 232 | else |
233 | del_mtd_partitions(info->mtd); | 233 | del_mtd_partitions(info->mtd); |
234 | #endif | 234 | #endif |
235 | #ifdef CONFIG_MTD_CONCAT | ||
236 | if (info->mtd != info->subdev[0].mtd) | 235 | if (info->mtd != info->subdev[0].mtd) |
237 | mtd_concat_destroy(info->mtd); | 236 | mtd_concat_destroy(info->mtd); |
238 | #endif | ||
239 | } | 237 | } |
240 | 238 | ||
241 | kfree(info->parts); | 239 | kfree(info->parts); |
@@ -321,7 +319,6 @@ sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat) | |||
321 | info->mtd = info->subdev[0].mtd; | 319 | info->mtd = info->subdev[0].mtd; |
322 | ret = 0; | 320 | ret = 0; |
323 | } else if (info->num_subdev > 1) { | 321 | } else if (info->num_subdev > 1) { |
324 | #ifdef CONFIG_MTD_CONCAT | ||
325 | struct mtd_info *cdev[nr]; | 322 | struct mtd_info *cdev[nr]; |
326 | /* | 323 | /* |
327 | * We detected multiple devices. Concatenate them together. | 324 | * We detected multiple devices. Concatenate them together. |
@@ -333,11 +330,6 @@ sa1100_setup_mtd(struct platform_device *pdev, struct flash_platform_data *plat) | |||
333 | plat->name); | 330 | plat->name); |
334 | if (info->mtd == NULL) | 331 | if (info->mtd == NULL) |
335 | ret = -ENXIO; | 332 | ret = -ENXIO; |
336 | #else | ||
337 | printk(KERN_ERR "SA1100 flash: multiple devices " | ||
338 | "found but MTD concat support disabled.\n"); | ||
339 | ret = -ENXIO; | ||
340 | #endif | ||
341 | } | 333 | } |
342 | 334 | ||
343 | if (ret == 0) | 335 | if (ret == 0) |
diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c index e2147bf11c88..e02dfa9d4ddd 100644 --- a/drivers/mtd/maps/ts5500_flash.c +++ b/drivers/mtd/maps/ts5500_flash.c | |||
@@ -94,7 +94,6 @@ static int __init init_ts5500_map(void) | |||
94 | return 0; | 94 | return 0; |
95 | 95 | ||
96 | err1: | 96 | err1: |
97 | map_destroy(mymtd); | ||
98 | iounmap(ts5500_map.virt); | 97 | iounmap(ts5500_map.virt); |
99 | err2: | 98 | err2: |
100 | return rc; | 99 | return rc; |
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index e0a2373bf0e2..a534e1f0c348 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -40,7 +40,7 @@ | |||
40 | static LIST_HEAD(blktrans_majors); | 40 | static LIST_HEAD(blktrans_majors); |
41 | static DEFINE_MUTEX(blktrans_ref_mutex); | 41 | static DEFINE_MUTEX(blktrans_ref_mutex); |
42 | 42 | ||
43 | void blktrans_dev_release(struct kref *kref) | 43 | static void blktrans_dev_release(struct kref *kref) |
44 | { | 44 | { |
45 | struct mtd_blktrans_dev *dev = | 45 | struct mtd_blktrans_dev *dev = |
46 | container_of(kref, struct mtd_blktrans_dev, ref); | 46 | container_of(kref, struct mtd_blktrans_dev, ref); |
@@ -67,7 +67,7 @@ unlock: | |||
67 | return dev; | 67 | return dev; |
68 | } | 68 | } |
69 | 69 | ||
70 | void blktrans_dev_put(struct mtd_blktrans_dev *dev) | 70 | static void blktrans_dev_put(struct mtd_blktrans_dev *dev) |
71 | { | 71 | { |
72 | mutex_lock(&blktrans_ref_mutex); | 72 | mutex_lock(&blktrans_ref_mutex); |
73 | kref_put(&dev->ref, blktrans_dev_release); | 73 | kref_put(&dev->ref, blktrans_dev_release); |
@@ -119,18 +119,43 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
119 | } | 119 | } |
120 | } | 120 | } |
121 | 121 | ||
122 | int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev) | ||
123 | { | ||
124 | if (kthread_should_stop()) | ||
125 | return 1; | ||
126 | |||
127 | return dev->bg_stop; | ||
128 | } | ||
129 | EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background); | ||
130 | |||
122 | static int mtd_blktrans_thread(void *arg) | 131 | static int mtd_blktrans_thread(void *arg) |
123 | { | 132 | { |
124 | struct mtd_blktrans_dev *dev = arg; | 133 | struct mtd_blktrans_dev *dev = arg; |
134 | struct mtd_blktrans_ops *tr = dev->tr; | ||
125 | struct request_queue *rq = dev->rq; | 135 | struct request_queue *rq = dev->rq; |
126 | struct request *req = NULL; | 136 | struct request *req = NULL; |
137 | int background_done = 0; | ||
127 | 138 | ||
128 | spin_lock_irq(rq->queue_lock); | 139 | spin_lock_irq(rq->queue_lock); |
129 | 140 | ||
130 | while (!kthread_should_stop()) { | 141 | while (!kthread_should_stop()) { |
131 | int res; | 142 | int res; |
132 | 143 | ||
144 | dev->bg_stop = false; | ||
133 | if (!req && !(req = blk_fetch_request(rq))) { | 145 | if (!req && !(req = blk_fetch_request(rq))) { |
146 | if (tr->background && !background_done) { | ||
147 | spin_unlock_irq(rq->queue_lock); | ||
148 | mutex_lock(&dev->lock); | ||
149 | tr->background(dev); | ||
150 | mutex_unlock(&dev->lock); | ||
151 | spin_lock_irq(rq->queue_lock); | ||
152 | /* | ||
153 | * Do background processing just once per idle | ||
154 | * period. | ||
155 | */ | ||
156 | background_done = !dev->bg_stop; | ||
157 | continue; | ||
158 | } | ||
134 | set_current_state(TASK_INTERRUPTIBLE); | 159 | set_current_state(TASK_INTERRUPTIBLE); |
135 | 160 | ||
136 | if (kthread_should_stop()) | 161 | if (kthread_should_stop()) |
@@ -152,6 +177,8 @@ static int mtd_blktrans_thread(void *arg) | |||
152 | 177 | ||
153 | if (!__blk_end_request_cur(req, res)) | 178 | if (!__blk_end_request_cur(req, res)) |
154 | req = NULL; | 179 | req = NULL; |
180 | |||
181 | background_done = 0; | ||
155 | } | 182 | } |
156 | 183 | ||
157 | if (req) | 184 | if (req) |
@@ -172,8 +199,10 @@ static void mtd_blktrans_request(struct request_queue *rq) | |||
172 | if (!dev) | 199 | if (!dev) |
173 | while ((req = blk_fetch_request(rq)) != NULL) | 200 | while ((req = blk_fetch_request(rq)) != NULL) |
174 | __blk_end_request_all(req, -ENODEV); | 201 | __blk_end_request_all(req, -ENODEV); |
175 | else | 202 | else { |
203 | dev->bg_stop = true; | ||
176 | wake_up_process(dev->thread); | 204 | wake_up_process(dev->thread); |
205 | } | ||
177 | } | 206 | } |
178 | 207 | ||
179 | static int blktrans_open(struct block_device *bdev, fmode_t mode) | 208 | static int blktrans_open(struct block_device *bdev, fmode_t mode) |
@@ -379,9 +408,10 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) | |||
379 | new->rq->queuedata = new; | 408 | new->rq->queuedata = new; |
380 | blk_queue_logical_block_size(new->rq, tr->blksize); | 409 | blk_queue_logical_block_size(new->rq, tr->blksize); |
381 | 410 | ||
382 | if (tr->discard) | 411 | if (tr->discard) { |
383 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, | 412 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq); |
384 | new->rq); | 413 | new->rq->limits.max_discard_sectors = UINT_MAX; |
414 | } | ||
385 | 415 | ||
386 | gd->queue = new->rq; | 416 | gd->queue = new->rq; |
387 | 417 | ||
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c index 5f5777bd3f75..5060e608ea5d 100644 --- a/drivers/mtd/mtdconcat.c +++ b/drivers/mtd/mtdconcat.c | |||
@@ -750,6 +750,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c | |||
750 | struct mtd_concat *concat; | 750 | struct mtd_concat *concat; |
751 | uint32_t max_erasesize, curr_erasesize; | 751 | uint32_t max_erasesize, curr_erasesize; |
752 | int num_erase_region; | 752 | int num_erase_region; |
753 | int max_writebufsize = 0; | ||
753 | 754 | ||
754 | printk(KERN_NOTICE "Concatenating MTD devices:\n"); | 755 | printk(KERN_NOTICE "Concatenating MTD devices:\n"); |
755 | for (i = 0; i < num_devs; i++) | 756 | for (i = 0; i < num_devs; i++) |
@@ -776,7 +777,12 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c | |||
776 | concat->mtd.size = subdev[0]->size; | 777 | concat->mtd.size = subdev[0]->size; |
777 | concat->mtd.erasesize = subdev[0]->erasesize; | 778 | concat->mtd.erasesize = subdev[0]->erasesize; |
778 | concat->mtd.writesize = subdev[0]->writesize; | 779 | concat->mtd.writesize = subdev[0]->writesize; |
779 | concat->mtd.writebufsize = subdev[0]->writebufsize; | 780 | |
781 | for (i = 0; i < num_devs; i++) | ||
782 | if (max_writebufsize < subdev[i]->writebufsize) | ||
783 | max_writebufsize = subdev[i]->writebufsize; | ||
784 | concat->mtd.writebufsize = max_writebufsize; | ||
785 | |||
780 | concat->mtd.subpage_sft = subdev[0]->subpage_sft; | 786 | concat->mtd.subpage_sft = subdev[0]->subpage_sft; |
781 | concat->mtd.oobsize = subdev[0]->oobsize; | 787 | concat->mtd.oobsize = subdev[0]->oobsize; |
782 | concat->mtd.oobavail = subdev[0]->oobavail; | 788 | concat->mtd.oobavail = subdev[0]->oobavail; |
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 527cebf58da4..da69bc8a5a7d 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c | |||
@@ -43,7 +43,7 @@ | |||
43 | * backing device capabilities for non-mappable devices (such as NAND flash) | 43 | * backing device capabilities for non-mappable devices (such as NAND flash) |
44 | * - permits private mappings, copies are taken of the data | 44 | * - permits private mappings, copies are taken of the data |
45 | */ | 45 | */ |
46 | struct backing_dev_info mtd_bdi_unmappable = { | 46 | static struct backing_dev_info mtd_bdi_unmappable = { |
47 | .capabilities = BDI_CAP_MAP_COPY, | 47 | .capabilities = BDI_CAP_MAP_COPY, |
48 | }; | 48 | }; |
49 | 49 | ||
@@ -52,7 +52,7 @@ struct backing_dev_info mtd_bdi_unmappable = { | |||
52 | * - permits private mappings, copies are taken of the data | 52 | * - permits private mappings, copies are taken of the data |
53 | * - permits non-writable shared mappings | 53 | * - permits non-writable shared mappings |
54 | */ | 54 | */ |
55 | struct backing_dev_info mtd_bdi_ro_mappable = { | 55 | static struct backing_dev_info mtd_bdi_ro_mappable = { |
56 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | | 56 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | |
57 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP), | 57 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP), |
58 | }; | 58 | }; |
@@ -62,7 +62,7 @@ struct backing_dev_info mtd_bdi_ro_mappable = { | |||
62 | * - permits private mappings, copies are taken of the data | 62 | * - permits private mappings, copies are taken of the data |
63 | * - permits non-writable shared mappings | 63 | * - permits non-writable shared mappings |
64 | */ | 64 | */ |
65 | struct backing_dev_info mtd_bdi_rw_mappable = { | 65 | static struct backing_dev_info mtd_bdi_rw_mappable = { |
66 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | | 66 | .capabilities = (BDI_CAP_MAP_COPY | BDI_CAP_MAP_DIRECT | |
67 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP | | 67 | BDI_CAP_EXEC_MAP | BDI_CAP_READ_MAP | |
68 | BDI_CAP_WRITE_MAP), | 68 | BDI_CAP_WRITE_MAP), |
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c new file mode 100644 index 000000000000..237913c5c92c --- /dev/null +++ b/drivers/mtd/mtdswap.c | |||
@@ -0,0 +1,1587 @@ | |||
1 | /* | ||
2 | * Swap block device support for MTDs | ||
3 | * Turns an MTD device into a swap device with block wear leveling | ||
4 | * | ||
5 | * Copyright © 2007,2011 Nokia Corporation. All rights reserved. | ||
6 | * | ||
7 | * Authors: Jarkko Lavinen <jarkko.lavinen@nokia.com> | ||
8 | * | ||
9 | * Based on Richard Purdie's earlier implementation in 2007. Background | ||
10 | * support and lock-less operation written by Adrian Hunter. | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * version 2 as published by the Free Software Foundation. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, but | ||
17 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
19 | * General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
24 | * 02110-1301 USA | ||
25 | */ | ||
26 | |||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/mtd/mtd.h> | ||
30 | #include <linux/mtd/blktrans.h> | ||
31 | #include <linux/rbtree.h> | ||
32 | #include <linux/sched.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/vmalloc.h> | ||
35 | #include <linux/genhd.h> | ||
36 | #include <linux/swap.h> | ||
37 | #include <linux/debugfs.h> | ||
38 | #include <linux/seq_file.h> | ||
39 | #include <linux/device.h> | ||
40 | #include <linux/math64.h> | ||
41 | |||
42 | #define MTDSWAP_PREFIX "mtdswap" | ||
43 | |||
44 | /* | ||
45 | * The number of free eraseblocks when GC should stop | ||
46 | */ | ||
47 | #define CLEAN_BLOCK_THRESHOLD 20 | ||
48 | |||
49 | /* | ||
50 | * Number of free eraseblocks below which GC can also collect low frag | ||
51 | * blocks. | ||
52 | */ | ||
53 | #define LOW_FRAG_GC_TRESHOLD 5 | ||
54 | |||
55 | /* | ||
56 | * Wear level cost amortization. We want to do wear leveling on the background | ||
57 | * without disturbing gc too much. This is made by defining max GC frequency. | ||
58 | * Frequency value 6 means 1/6 of the GC passes will pick an erase block based | ||
59 | * on the biggest wear difference rather than the biggest dirtiness. | ||
60 | * | ||
61 | * The lower freq2 should be chosen so that it makes sure the maximum erase | ||
62 | * difference will decrease even if a malicious application is deliberately | ||
63 | * trying to make erase differences large. | ||
64 | */ | ||
65 | #define MAX_ERASE_DIFF 4000 | ||
66 | #define COLLECT_NONDIRTY_BASE MAX_ERASE_DIFF | ||
67 | #define COLLECT_NONDIRTY_FREQ1 6 | ||
68 | #define COLLECT_NONDIRTY_FREQ2 4 | ||
69 | |||
70 | #define PAGE_UNDEF UINT_MAX | ||
71 | #define BLOCK_UNDEF UINT_MAX | ||
72 | #define BLOCK_ERROR (UINT_MAX - 1) | ||
73 | #define BLOCK_MAX (UINT_MAX - 2) | ||
74 | |||
75 | #define EBLOCK_BAD (1 << 0) | ||
76 | #define EBLOCK_NOMAGIC (1 << 1) | ||
77 | #define EBLOCK_BITFLIP (1 << 2) | ||
78 | #define EBLOCK_FAILED (1 << 3) | ||
79 | #define EBLOCK_READERR (1 << 4) | ||
80 | #define EBLOCK_IDX_SHIFT 5 | ||
81 | |||
82 | struct swap_eb { | ||
83 | struct rb_node rb; | ||
84 | struct rb_root *root; | ||
85 | |||
86 | unsigned int flags; | ||
87 | unsigned int active_count; | ||
88 | unsigned int erase_count; | ||
89 | unsigned int pad; /* speeds up pointer decremtnt */ | ||
90 | }; | ||
91 | |||
92 | #define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \ | ||
93 | rb)->erase_count) | ||
94 | #define MTDSWAP_ECNT_MAX(rbroot) (rb_entry(rb_last(rbroot), struct swap_eb, \ | ||
95 | rb)->erase_count) | ||
96 | |||
97 | struct mtdswap_tree { | ||
98 | struct rb_root root; | ||
99 | unsigned int count; | ||
100 | }; | ||
101 | |||
102 | enum { | ||
103 | MTDSWAP_CLEAN, | ||
104 | MTDSWAP_USED, | ||
105 | MTDSWAP_LOWFRAG, | ||
106 | MTDSWAP_HIFRAG, | ||
107 | MTDSWAP_DIRTY, | ||
108 | MTDSWAP_BITFLIP, | ||
109 | MTDSWAP_FAILING, | ||
110 | MTDSWAP_TREE_CNT, | ||
111 | }; | ||
112 | |||
113 | struct mtdswap_dev { | ||
114 | struct mtd_blktrans_dev *mbd_dev; | ||
115 | struct mtd_info *mtd; | ||
116 | struct device *dev; | ||
117 | |||
118 | unsigned int *page_data; | ||
119 | unsigned int *revmap; | ||
120 | |||
121 | unsigned int eblks; | ||
122 | unsigned int spare_eblks; | ||
123 | unsigned int pages_per_eblk; | ||
124 | unsigned int max_erase_count; | ||
125 | struct swap_eb *eb_data; | ||
126 | |||
127 | struct mtdswap_tree trees[MTDSWAP_TREE_CNT]; | ||
128 | |||
129 | unsigned long long sect_read_count; | ||
130 | unsigned long long sect_write_count; | ||
131 | unsigned long long mtd_write_count; | ||
132 | unsigned long long mtd_read_count; | ||
133 | unsigned long long discard_count; | ||
134 | unsigned long long discard_page_count; | ||
135 | |||
136 | unsigned int curr_write_pos; | ||
137 | struct swap_eb *curr_write; | ||
138 | |||
139 | char *page_buf; | ||
140 | char *oob_buf; | ||
141 | |||
142 | struct dentry *debugfs_root; | ||
143 | }; | ||
144 | |||
145 | struct mtdswap_oobdata { | ||
146 | __le16 magic; | ||
147 | __le32 count; | ||
148 | } __attribute__((packed)); | ||
149 | |||
150 | #define MTDSWAP_MAGIC_CLEAN 0x2095 | ||
151 | #define MTDSWAP_MAGIC_DIRTY (MTDSWAP_MAGIC_CLEAN + 1) | ||
152 | #define MTDSWAP_TYPE_CLEAN 0 | ||
153 | #define MTDSWAP_TYPE_DIRTY 1 | ||
154 | #define MTDSWAP_OOBSIZE sizeof(struct mtdswap_oobdata) | ||
155 | |||
156 | #define MTDSWAP_ERASE_RETRIES 3 /* Before marking erase block bad */ | ||
157 | #define MTDSWAP_IO_RETRIES 3 | ||
158 | |||
159 | enum { | ||
160 | MTDSWAP_SCANNED_CLEAN, | ||
161 | MTDSWAP_SCANNED_DIRTY, | ||
162 | MTDSWAP_SCANNED_BITFLIP, | ||
163 | MTDSWAP_SCANNED_BAD, | ||
164 | }; | ||
165 | |||
166 | /* | ||
167 | * In the worst case mtdswap_writesect() has allocated the last clean | ||
168 | * page from the current block and is then pre-empted by the GC | ||
169 | * thread. The thread can consume a full erase block when moving a | ||
170 | * block. | ||
171 | */ | ||
172 | #define MIN_SPARE_EBLOCKS 2 | ||
173 | #define MIN_ERASE_BLOCKS (MIN_SPARE_EBLOCKS + 1) | ||
174 | |||
175 | #define TREE_ROOT(d, name) (&d->trees[MTDSWAP_ ## name].root) | ||
176 | #define TREE_EMPTY(d, name) (TREE_ROOT(d, name)->rb_node == NULL) | ||
177 | #define TREE_NONEMPTY(d, name) (!TREE_EMPTY(d, name)) | ||
178 | #define TREE_COUNT(d, name) (d->trees[MTDSWAP_ ## name].count) | ||
179 | |||
180 | #define MTDSWAP_MBD_TO_MTDSWAP(dev) ((struct mtdswap_dev *)dev->priv) | ||
181 | |||
182 | static char partitions[128] = ""; | ||
183 | module_param_string(partitions, partitions, sizeof(partitions), 0444); | ||
184 | MODULE_PARM_DESC(partitions, "MTD partition numbers to use as swap " | ||
185 | "partitions=\"1,3,5\""); | ||
186 | |||
187 | static unsigned int spare_eblocks = 10; | ||
188 | module_param(spare_eblocks, uint, 0444); | ||
189 | MODULE_PARM_DESC(spare_eblocks, "Percentage of spare erase blocks for " | ||
190 | "garbage collection (default 10%)"); | ||
191 | |||
192 | static bool header; /* false */ | ||
193 | module_param(header, bool, 0444); | ||
194 | MODULE_PARM_DESC(header, | ||
195 | "Include builtin swap header (default 0, without header)"); | ||
196 | |||
197 | static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background); | ||
198 | |||
199 | static loff_t mtdswap_eb_offset(struct mtdswap_dev *d, struct swap_eb *eb) | ||
200 | { | ||
201 | return (loff_t)(eb - d->eb_data) * d->mtd->erasesize; | ||
202 | } | ||
203 | |||
204 | static void mtdswap_eb_detach(struct mtdswap_dev *d, struct swap_eb *eb) | ||
205 | { | ||
206 | unsigned int oldidx; | ||
207 | struct mtdswap_tree *tp; | ||
208 | |||
209 | if (eb->root) { | ||
210 | tp = container_of(eb->root, struct mtdswap_tree, root); | ||
211 | oldidx = tp - &d->trees[0]; | ||
212 | |||
213 | d->trees[oldidx].count--; | ||
214 | rb_erase(&eb->rb, eb->root); | ||
215 | } | ||
216 | } | ||
217 | |||
218 | static void __mtdswap_rb_add(struct rb_root *root, struct swap_eb *eb) | ||
219 | { | ||
220 | struct rb_node **p, *parent = NULL; | ||
221 | struct swap_eb *cur; | ||
222 | |||
223 | p = &root->rb_node; | ||
224 | while (*p) { | ||
225 | parent = *p; | ||
226 | cur = rb_entry(parent, struct swap_eb, rb); | ||
227 | if (eb->erase_count > cur->erase_count) | ||
228 | p = &(*p)->rb_right; | ||
229 | else | ||
230 | p = &(*p)->rb_left; | ||
231 | } | ||
232 | |||
233 | rb_link_node(&eb->rb, parent, p); | ||
234 | rb_insert_color(&eb->rb, root); | ||
235 | } | ||
236 | |||
237 | static void mtdswap_rb_add(struct mtdswap_dev *d, struct swap_eb *eb, int idx) | ||
238 | { | ||
239 | struct rb_root *root; | ||
240 | |||
241 | if (eb->root == &d->trees[idx].root) | ||
242 | return; | ||
243 | |||
244 | mtdswap_eb_detach(d, eb); | ||
245 | root = &d->trees[idx].root; | ||
246 | __mtdswap_rb_add(root, eb); | ||
247 | eb->root = root; | ||
248 | d->trees[idx].count++; | ||
249 | } | ||
250 | |||
251 | static struct rb_node *mtdswap_rb_index(struct rb_root *root, unsigned int idx) | ||
252 | { | ||
253 | struct rb_node *p; | ||
254 | unsigned int i; | ||
255 | |||
256 | p = rb_first(root); | ||
257 | i = 0; | ||
258 | while (i < idx && p) { | ||
259 | p = rb_next(p); | ||
260 | i++; | ||
261 | } | ||
262 | |||
263 | return p; | ||
264 | } | ||
265 | |||
266 | static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb) | ||
267 | { | ||
268 | int ret; | ||
269 | loff_t offset; | ||
270 | |||
271 | d->spare_eblks--; | ||
272 | eb->flags |= EBLOCK_BAD; | ||
273 | mtdswap_eb_detach(d, eb); | ||
274 | eb->root = NULL; | ||
275 | |||
276 | /* badblocks not supported */ | ||
277 | if (!d->mtd->block_markbad) | ||
278 | return 1; | ||
279 | |||
280 | offset = mtdswap_eb_offset(d, eb); | ||
281 | dev_warn(d->dev, "Marking bad block at %08llx\n", offset); | ||
282 | ret = d->mtd->block_markbad(d->mtd, offset); | ||
283 | |||
284 | if (ret) { | ||
285 | dev_warn(d->dev, "Mark block bad failed for block at %08llx " | ||
286 | "error %d\n", offset, ret); | ||
287 | return ret; | ||
288 | } | ||
289 | |||
290 | return 1; | ||
291 | |||
292 | } | ||
293 | |||
294 | static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb) | ||
295 | { | ||
296 | unsigned int marked = eb->flags & EBLOCK_FAILED; | ||
297 | struct swap_eb *curr_write = d->curr_write; | ||
298 | |||
299 | eb->flags |= EBLOCK_FAILED; | ||
300 | if (curr_write == eb) { | ||
301 | d->curr_write = NULL; | ||
302 | |||
303 | if (!marked && d->curr_write_pos != 0) { | ||
304 | mtdswap_rb_add(d, eb, MTDSWAP_FAILING); | ||
305 | return 0; | ||
306 | } | ||
307 | } | ||
308 | |||
309 | return mtdswap_handle_badblock(d, eb); | ||
310 | } | ||
311 | |||
312 | static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from, | ||
313 | struct mtd_oob_ops *ops) | ||
314 | { | ||
315 | int ret = d->mtd->read_oob(d->mtd, from, ops); | ||
316 | |||
317 | if (ret == -EUCLEAN) | ||
318 | return ret; | ||
319 | |||
320 | if (ret) { | ||
321 | dev_warn(d->dev, "Read OOB failed %d for block at %08llx\n", | ||
322 | ret, from); | ||
323 | return ret; | ||
324 | } | ||
325 | |||
326 | if (ops->oobretlen < ops->ooblen) { | ||
327 | dev_warn(d->dev, "Read OOB return short read (%zd bytes not " | ||
328 | "%zd) for block at %08llx\n", | ||
329 | ops->oobretlen, ops->ooblen, from); | ||
330 | return -EIO; | ||
331 | } | ||
332 | |||
333 | return 0; | ||
334 | } | ||
335 | |||
336 | static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb) | ||
337 | { | ||
338 | struct mtdswap_oobdata *data, *data2; | ||
339 | int ret; | ||
340 | loff_t offset; | ||
341 | struct mtd_oob_ops ops; | ||
342 | |||
343 | offset = mtdswap_eb_offset(d, eb); | ||
344 | |||
345 | /* Check first if the block is bad. */ | ||
346 | if (d->mtd->block_isbad && d->mtd->block_isbad(d->mtd, offset)) | ||
347 | return MTDSWAP_SCANNED_BAD; | ||
348 | |||
349 | ops.ooblen = 2 * d->mtd->ecclayout->oobavail; | ||
350 | ops.oobbuf = d->oob_buf; | ||
351 | ops.ooboffs = 0; | ||
352 | ops.datbuf = NULL; | ||
353 | ops.mode = MTD_OOB_AUTO; | ||
354 | |||
355 | ret = mtdswap_read_oob(d, offset, &ops); | ||
356 | |||
357 | if (ret && ret != -EUCLEAN) | ||
358 | return ret; | ||
359 | |||
360 | data = (struct mtdswap_oobdata *)d->oob_buf; | ||
361 | data2 = (struct mtdswap_oobdata *) | ||
362 | (d->oob_buf + d->mtd->ecclayout->oobavail); | ||
363 | |||
364 | if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) { | ||
365 | eb->erase_count = le32_to_cpu(data->count); | ||
366 | if (ret == -EUCLEAN) | ||
367 | ret = MTDSWAP_SCANNED_BITFLIP; | ||
368 | else { | ||
369 | if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY) | ||
370 | ret = MTDSWAP_SCANNED_DIRTY; | ||
371 | else | ||
372 | ret = MTDSWAP_SCANNED_CLEAN; | ||
373 | } | ||
374 | } else { | ||
375 | eb->flags |= EBLOCK_NOMAGIC; | ||
376 | ret = MTDSWAP_SCANNED_DIRTY; | ||
377 | } | ||
378 | |||
379 | return ret; | ||
380 | } | ||
381 | |||
382 | static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb, | ||
383 | u16 marker) | ||
384 | { | ||
385 | struct mtdswap_oobdata n; | ||
386 | int ret; | ||
387 | loff_t offset; | ||
388 | struct mtd_oob_ops ops; | ||
389 | |||
390 | ops.ooboffs = 0; | ||
391 | ops.oobbuf = (uint8_t *)&n; | ||
392 | ops.mode = MTD_OOB_AUTO; | ||
393 | ops.datbuf = NULL; | ||
394 | |||
395 | if (marker == MTDSWAP_TYPE_CLEAN) { | ||
396 | n.magic = cpu_to_le16(MTDSWAP_MAGIC_CLEAN); | ||
397 | n.count = cpu_to_le32(eb->erase_count); | ||
398 | ops.ooblen = MTDSWAP_OOBSIZE; | ||
399 | offset = mtdswap_eb_offset(d, eb); | ||
400 | } else { | ||
401 | n.magic = cpu_to_le16(MTDSWAP_MAGIC_DIRTY); | ||
402 | ops.ooblen = sizeof(n.magic); | ||
403 | offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize; | ||
404 | } | ||
405 | |||
406 | ret = d->mtd->write_oob(d->mtd, offset , &ops); | ||
407 | |||
408 | if (ret) { | ||
409 | dev_warn(d->dev, "Write OOB failed for block at %08llx " | ||
410 | "error %d\n", offset, ret); | ||
411 | if (ret == -EIO || ret == -EBADMSG) | ||
412 | mtdswap_handle_write_error(d, eb); | ||
413 | return ret; | ||
414 | } | ||
415 | |||
416 | if (ops.oobretlen != ops.ooblen) { | ||
417 | dev_warn(d->dev, "Short OOB write for block at %08llx: " | ||
418 | "%zd not %zd\n", | ||
419 | offset, ops.oobretlen, ops.ooblen); | ||
420 | return ret; | ||
421 | } | ||
422 | |||
423 | return 0; | ||
424 | } | ||
425 | |||
426 | /* | ||
427 | * Are there any erase blocks without MAGIC_CLEAN header, presumably | ||
428 | * because power was cut off after erase but before header write? We | ||
429 | * need to guestimate the erase count. | ||
430 | */ | ||
431 | static void mtdswap_check_counts(struct mtdswap_dev *d) | ||
432 | { | ||
433 | struct rb_root hist_root = RB_ROOT; | ||
434 | struct rb_node *medrb; | ||
435 | struct swap_eb *eb; | ||
436 | unsigned int i, cnt, median; | ||
437 | |||
438 | cnt = 0; | ||
439 | for (i = 0; i < d->eblks; i++) { | ||
440 | eb = d->eb_data + i; | ||
441 | |||
442 | if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR)) | ||
443 | continue; | ||
444 | |||
445 | __mtdswap_rb_add(&hist_root, eb); | ||
446 | cnt++; | ||
447 | } | ||
448 | |||
449 | if (cnt == 0) | ||
450 | return; | ||
451 | |||
452 | medrb = mtdswap_rb_index(&hist_root, cnt / 2); | ||
453 | median = rb_entry(medrb, struct swap_eb, rb)->erase_count; | ||
454 | |||
455 | d->max_erase_count = MTDSWAP_ECNT_MAX(&hist_root); | ||
456 | |||
457 | for (i = 0; i < d->eblks; i++) { | ||
458 | eb = d->eb_data + i; | ||
459 | |||
460 | if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_READERR)) | ||
461 | eb->erase_count = median; | ||
462 | |||
463 | if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR)) | ||
464 | continue; | ||
465 | |||
466 | rb_erase(&eb->rb, &hist_root); | ||
467 | } | ||
468 | } | ||
469 | |||
470 | static void mtdswap_scan_eblks(struct mtdswap_dev *d) | ||
471 | { | ||
472 | int status; | ||
473 | unsigned int i, idx; | ||
474 | struct swap_eb *eb; | ||
475 | |||
476 | for (i = 0; i < d->eblks; i++) { | ||
477 | eb = d->eb_data + i; | ||
478 | |||
479 | status = mtdswap_read_markers(d, eb); | ||
480 | if (status < 0) | ||
481 | eb->flags |= EBLOCK_READERR; | ||
482 | else if (status == MTDSWAP_SCANNED_BAD) { | ||
483 | eb->flags |= EBLOCK_BAD; | ||
484 | continue; | ||
485 | } | ||
486 | |||
487 | switch (status) { | ||
488 | case MTDSWAP_SCANNED_CLEAN: | ||
489 | idx = MTDSWAP_CLEAN; | ||
490 | break; | ||
491 | case MTDSWAP_SCANNED_DIRTY: | ||
492 | case MTDSWAP_SCANNED_BITFLIP: | ||
493 | idx = MTDSWAP_DIRTY; | ||
494 | break; | ||
495 | default: | ||
496 | idx = MTDSWAP_FAILING; | ||
497 | } | ||
498 | |||
499 | eb->flags |= (idx << EBLOCK_IDX_SHIFT); | ||
500 | } | ||
501 | |||
502 | mtdswap_check_counts(d); | ||
503 | |||
504 | for (i = 0; i < d->eblks; i++) { | ||
505 | eb = d->eb_data + i; | ||
506 | |||
507 | if (eb->flags & EBLOCK_BAD) | ||
508 | continue; | ||
509 | |||
510 | idx = eb->flags >> EBLOCK_IDX_SHIFT; | ||
511 | mtdswap_rb_add(d, eb, idx); | ||
512 | } | ||
513 | } | ||
514 | |||
515 | /* | ||
516 | * Place eblk into a tree corresponding to its number of active blocks | ||
517 | * it contains. | ||
518 | */ | ||
519 | static void mtdswap_store_eb(struct mtdswap_dev *d, struct swap_eb *eb) | ||
520 | { | ||
521 | unsigned int weight = eb->active_count; | ||
522 | unsigned int maxweight = d->pages_per_eblk; | ||
523 | |||
524 | if (eb == d->curr_write) | ||
525 | return; | ||
526 | |||
527 | if (eb->flags & EBLOCK_BITFLIP) | ||
528 | mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); | ||
529 | else if (eb->flags & (EBLOCK_READERR | EBLOCK_FAILED)) | ||
530 | mtdswap_rb_add(d, eb, MTDSWAP_FAILING); | ||
531 | if (weight == maxweight) | ||
532 | mtdswap_rb_add(d, eb, MTDSWAP_USED); | ||
533 | else if (weight == 0) | ||
534 | mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); | ||
535 | else if (weight > (maxweight/2)) | ||
536 | mtdswap_rb_add(d, eb, MTDSWAP_LOWFRAG); | ||
537 | else | ||
538 | mtdswap_rb_add(d, eb, MTDSWAP_HIFRAG); | ||
539 | } | ||
540 | |||
541 | |||
542 | static void mtdswap_erase_callback(struct erase_info *done) | ||
543 | { | ||
544 | wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv; | ||
545 | wake_up(wait_q); | ||
546 | } | ||
547 | |||
548 | static int mtdswap_erase_block(struct mtdswap_dev *d, struct swap_eb *eb) | ||
549 | { | ||
550 | struct mtd_info *mtd = d->mtd; | ||
551 | struct erase_info erase; | ||
552 | wait_queue_head_t wq; | ||
553 | unsigned int retries = 0; | ||
554 | int ret; | ||
555 | |||
556 | eb->erase_count++; | ||
557 | if (eb->erase_count > d->max_erase_count) | ||
558 | d->max_erase_count = eb->erase_count; | ||
559 | |||
560 | retry: | ||
561 | init_waitqueue_head(&wq); | ||
562 | memset(&erase, 0, sizeof(struct erase_info)); | ||
563 | |||
564 | erase.mtd = mtd; | ||
565 | erase.callback = mtdswap_erase_callback; | ||
566 | erase.addr = mtdswap_eb_offset(d, eb); | ||
567 | erase.len = mtd->erasesize; | ||
568 | erase.priv = (u_long)&wq; | ||
569 | |||
570 | ret = mtd->erase(mtd, &erase); | ||
571 | if (ret) { | ||
572 | if (retries++ < MTDSWAP_ERASE_RETRIES) { | ||
573 | dev_warn(d->dev, | ||
574 | "erase of erase block %#llx on %s failed", | ||
575 | erase.addr, mtd->name); | ||
576 | yield(); | ||
577 | goto retry; | ||
578 | } | ||
579 | |||
580 | dev_err(d->dev, "Cannot erase erase block %#llx on %s\n", | ||
581 | erase.addr, mtd->name); | ||
582 | |||
583 | mtdswap_handle_badblock(d, eb); | ||
584 | return -EIO; | ||
585 | } | ||
586 | |||
587 | ret = wait_event_interruptible(wq, erase.state == MTD_ERASE_DONE || | ||
588 | erase.state == MTD_ERASE_FAILED); | ||
589 | if (ret) { | ||
590 | dev_err(d->dev, "Interrupted erase block %#llx erassure on %s", | ||
591 | erase.addr, mtd->name); | ||
592 | return -EINTR; | ||
593 | } | ||
594 | |||
595 | if (erase.state == MTD_ERASE_FAILED) { | ||
596 | if (retries++ < MTDSWAP_ERASE_RETRIES) { | ||
597 | dev_warn(d->dev, | ||
598 | "erase of erase block %#llx on %s failed", | ||
599 | erase.addr, mtd->name); | ||
600 | yield(); | ||
601 | goto retry; | ||
602 | } | ||
603 | |||
604 | mtdswap_handle_badblock(d, eb); | ||
605 | return -EIO; | ||
606 | } | ||
607 | |||
608 | return 0; | ||
609 | } | ||
610 | |||
611 | static int mtdswap_map_free_block(struct mtdswap_dev *d, unsigned int page, | ||
612 | unsigned int *block) | ||
613 | { | ||
614 | int ret; | ||
615 | struct swap_eb *old_eb = d->curr_write; | ||
616 | struct rb_root *clean_root; | ||
617 | struct swap_eb *eb; | ||
618 | |||
619 | if (old_eb == NULL || d->curr_write_pos >= d->pages_per_eblk) { | ||
620 | do { | ||
621 | if (TREE_EMPTY(d, CLEAN)) | ||
622 | return -ENOSPC; | ||
623 | |||
624 | clean_root = TREE_ROOT(d, CLEAN); | ||
625 | eb = rb_entry(rb_first(clean_root), struct swap_eb, rb); | ||
626 | rb_erase(&eb->rb, clean_root); | ||
627 | eb->root = NULL; | ||
628 | TREE_COUNT(d, CLEAN)--; | ||
629 | |||
630 | ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY); | ||
631 | } while (ret == -EIO || ret == -EBADMSG); | ||
632 | |||
633 | if (ret) | ||
634 | return ret; | ||
635 | |||
636 | d->curr_write_pos = 0; | ||
637 | d->curr_write = eb; | ||
638 | if (old_eb) | ||
639 | mtdswap_store_eb(d, old_eb); | ||
640 | } | ||
641 | |||
642 | *block = (d->curr_write - d->eb_data) * d->pages_per_eblk + | ||
643 | d->curr_write_pos; | ||
644 | |||
645 | d->curr_write->active_count++; | ||
646 | d->revmap[*block] = page; | ||
647 | d->curr_write_pos++; | ||
648 | |||
649 | return 0; | ||
650 | } | ||
651 | |||
652 | static unsigned int mtdswap_free_page_cnt(struct mtdswap_dev *d) | ||
653 | { | ||
654 | return TREE_COUNT(d, CLEAN) * d->pages_per_eblk + | ||
655 | d->pages_per_eblk - d->curr_write_pos; | ||
656 | } | ||
657 | |||
658 | static unsigned int mtdswap_enough_free_pages(struct mtdswap_dev *d) | ||
659 | { | ||
660 | return mtdswap_free_page_cnt(d) > d->pages_per_eblk; | ||
661 | } | ||
662 | |||
663 | static int mtdswap_write_block(struct mtdswap_dev *d, char *buf, | ||
664 | unsigned int page, unsigned int *bp, int gc_context) | ||
665 | { | ||
666 | struct mtd_info *mtd = d->mtd; | ||
667 | struct swap_eb *eb; | ||
668 | size_t retlen; | ||
669 | loff_t writepos; | ||
670 | int ret; | ||
671 | |||
672 | retry: | ||
673 | if (!gc_context) | ||
674 | while (!mtdswap_enough_free_pages(d)) | ||
675 | if (mtdswap_gc(d, 0) > 0) | ||
676 | return -ENOSPC; | ||
677 | |||
678 | ret = mtdswap_map_free_block(d, page, bp); | ||
679 | eb = d->eb_data + (*bp / d->pages_per_eblk); | ||
680 | |||
681 | if (ret == -EIO || ret == -EBADMSG) { | ||
682 | d->curr_write = NULL; | ||
683 | eb->active_count--; | ||
684 | d->revmap[*bp] = PAGE_UNDEF; | ||
685 | goto retry; | ||
686 | } | ||
687 | |||
688 | if (ret < 0) | ||
689 | return ret; | ||
690 | |||
691 | writepos = (loff_t)*bp << PAGE_SHIFT; | ||
692 | ret = mtd->write(mtd, writepos, PAGE_SIZE, &retlen, buf); | ||
693 | if (ret == -EIO || ret == -EBADMSG) { | ||
694 | d->curr_write_pos--; | ||
695 | eb->active_count--; | ||
696 | d->revmap[*bp] = PAGE_UNDEF; | ||
697 | mtdswap_handle_write_error(d, eb); | ||
698 | goto retry; | ||
699 | } | ||
700 | |||
701 | if (ret < 0) { | ||
702 | dev_err(d->dev, "Write to MTD device failed: %d (%zd written)", | ||
703 | ret, retlen); | ||
704 | goto err; | ||
705 | } | ||
706 | |||
707 | if (retlen != PAGE_SIZE) { | ||
708 | dev_err(d->dev, "Short write to MTD device: %zd written", | ||
709 | retlen); | ||
710 | ret = -EIO; | ||
711 | goto err; | ||
712 | } | ||
713 | |||
714 | return ret; | ||
715 | |||
716 | err: | ||
717 | d->curr_write_pos--; | ||
718 | eb->active_count--; | ||
719 | d->revmap[*bp] = PAGE_UNDEF; | ||
720 | |||
721 | return ret; | ||
722 | } | ||
723 | |||
724 | static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock, | ||
725 | unsigned int *newblock) | ||
726 | { | ||
727 | struct mtd_info *mtd = d->mtd; | ||
728 | struct swap_eb *eb, *oldeb; | ||
729 | int ret; | ||
730 | size_t retlen; | ||
731 | unsigned int page, retries; | ||
732 | loff_t readpos; | ||
733 | |||
734 | page = d->revmap[oldblock]; | ||
735 | readpos = (loff_t) oldblock << PAGE_SHIFT; | ||
736 | retries = 0; | ||
737 | |||
738 | retry: | ||
739 | ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf); | ||
740 | |||
741 | if (ret < 0 && ret != -EUCLEAN) { | ||
742 | oldeb = d->eb_data + oldblock / d->pages_per_eblk; | ||
743 | oldeb->flags |= EBLOCK_READERR; | ||
744 | |||
745 | dev_err(d->dev, "Read Error: %d (block %u)\n", ret, | ||
746 | oldblock); | ||
747 | retries++; | ||
748 | if (retries < MTDSWAP_IO_RETRIES) | ||
749 | goto retry; | ||
750 | |||
751 | goto read_error; | ||
752 | } | ||
753 | |||
754 | if (retlen != PAGE_SIZE) { | ||
755 | dev_err(d->dev, "Short read: %zd (block %u)\n", retlen, | ||
756 | oldblock); | ||
757 | ret = -EIO; | ||
758 | goto read_error; | ||
759 | } | ||
760 | |||
761 | ret = mtdswap_write_block(d, d->page_buf, page, newblock, 1); | ||
762 | if (ret < 0) { | ||
763 | d->page_data[page] = BLOCK_ERROR; | ||
764 | dev_err(d->dev, "Write error: %d\n", ret); | ||
765 | return ret; | ||
766 | } | ||
767 | |||
768 | eb = d->eb_data + *newblock / d->pages_per_eblk; | ||
769 | d->page_data[page] = *newblock; | ||
770 | d->revmap[oldblock] = PAGE_UNDEF; | ||
771 | eb = d->eb_data + oldblock / d->pages_per_eblk; | ||
772 | eb->active_count--; | ||
773 | |||
774 | return 0; | ||
775 | |||
776 | read_error: | ||
777 | d->page_data[page] = BLOCK_ERROR; | ||
778 | d->revmap[oldblock] = PAGE_UNDEF; | ||
779 | return ret; | ||
780 | } | ||
781 | |||
782 | static int mtdswap_gc_eblock(struct mtdswap_dev *d, struct swap_eb *eb) | ||
783 | { | ||
784 | unsigned int i, block, eblk_base, newblock; | ||
785 | int ret, errcode; | ||
786 | |||
787 | errcode = 0; | ||
788 | eblk_base = (eb - d->eb_data) * d->pages_per_eblk; | ||
789 | |||
790 | for (i = 0; i < d->pages_per_eblk; i++) { | ||
791 | if (d->spare_eblks < MIN_SPARE_EBLOCKS) | ||
792 | return -ENOSPC; | ||
793 | |||
794 | block = eblk_base + i; | ||
795 | if (d->revmap[block] == PAGE_UNDEF) | ||
796 | continue; | ||
797 | |||
798 | ret = mtdswap_move_block(d, block, &newblock); | ||
799 | if (ret < 0 && !errcode) | ||
800 | errcode = ret; | ||
801 | } | ||
802 | |||
803 | return errcode; | ||
804 | } | ||
805 | |||
806 | static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d) | ||
807 | { | ||
808 | int idx, stopat; | ||
809 | |||
810 | if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_TRESHOLD) | ||
811 | stopat = MTDSWAP_LOWFRAG; | ||
812 | else | ||
813 | stopat = MTDSWAP_HIFRAG; | ||
814 | |||
815 | for (idx = MTDSWAP_BITFLIP; idx >= stopat; idx--) | ||
816 | if (d->trees[idx].root.rb_node != NULL) | ||
817 | return idx; | ||
818 | |||
819 | return -1; | ||
820 | } | ||
821 | |||
822 | static int mtdswap_wlfreq(unsigned int maxdiff) | ||
823 | { | ||
824 | unsigned int h, x, y, dist, base; | ||
825 | |||
826 | /* | ||
827 | * Calculate linear ramp down from f1 to f2 when maxdiff goes from | ||
828 | * MAX_ERASE_DIFF to MAX_ERASE_DIFF + COLLECT_NONDIRTY_BASE. Similar | ||
829 | * to triangle with height f1 - f1 and width COLLECT_NONDIRTY_BASE. | ||
830 | */ | ||
831 | |||
832 | dist = maxdiff - MAX_ERASE_DIFF; | ||
833 | if (dist > COLLECT_NONDIRTY_BASE) | ||
834 | dist = COLLECT_NONDIRTY_BASE; | ||
835 | |||
836 | /* | ||
837 | * Modelling the slop as right angular triangle with base | ||
838 | * COLLECT_NONDIRTY_BASE and height freq1 - freq2. The ratio y/x is | ||
839 | * equal to the ratio h/base. | ||
840 | */ | ||
841 | h = COLLECT_NONDIRTY_FREQ1 - COLLECT_NONDIRTY_FREQ2; | ||
842 | base = COLLECT_NONDIRTY_BASE; | ||
843 | |||
844 | x = dist - base; | ||
845 | y = (x * h + base / 2) / base; | ||
846 | |||
847 | return COLLECT_NONDIRTY_FREQ2 + y; | ||
848 | } | ||
849 | |||
850 | static int mtdswap_choose_wl_tree(struct mtdswap_dev *d) | ||
851 | { | ||
852 | static unsigned int pick_cnt; | ||
853 | unsigned int i, idx = -1, wear, max; | ||
854 | struct rb_root *root; | ||
855 | |||
856 | max = 0; | ||
857 | for (i = 0; i <= MTDSWAP_DIRTY; i++) { | ||
858 | root = &d->trees[i].root; | ||
859 | if (root->rb_node == NULL) | ||
860 | continue; | ||
861 | |||
862 | wear = d->max_erase_count - MTDSWAP_ECNT_MIN(root); | ||
863 | if (wear > max) { | ||
864 | max = wear; | ||
865 | idx = i; | ||
866 | } | ||
867 | } | ||
868 | |||
869 | if (max > MAX_ERASE_DIFF && pick_cnt >= mtdswap_wlfreq(max) - 1) { | ||
870 | pick_cnt = 0; | ||
871 | return idx; | ||
872 | } | ||
873 | |||
874 | pick_cnt++; | ||
875 | return -1; | ||
876 | } | ||
877 | |||
878 | static int mtdswap_choose_gc_tree(struct mtdswap_dev *d, | ||
879 | unsigned int background) | ||
880 | { | ||
881 | int idx; | ||
882 | |||
883 | if (TREE_NONEMPTY(d, FAILING) && | ||
884 | (background || (TREE_EMPTY(d, CLEAN) && TREE_EMPTY(d, DIRTY)))) | ||
885 | return MTDSWAP_FAILING; | ||
886 | |||
887 | idx = mtdswap_choose_wl_tree(d); | ||
888 | if (idx >= MTDSWAP_CLEAN) | ||
889 | return idx; | ||
890 | |||
891 | return __mtdswap_choose_gc_tree(d); | ||
892 | } | ||
893 | |||
894 | static struct swap_eb *mtdswap_pick_gc_eblk(struct mtdswap_dev *d, | ||
895 | unsigned int background) | ||
896 | { | ||
897 | struct rb_root *rp = NULL; | ||
898 | struct swap_eb *eb = NULL; | ||
899 | int idx; | ||
900 | |||
901 | if (background && TREE_COUNT(d, CLEAN) > CLEAN_BLOCK_THRESHOLD && | ||
902 | TREE_EMPTY(d, DIRTY) && TREE_EMPTY(d, FAILING)) | ||
903 | return NULL; | ||
904 | |||
905 | idx = mtdswap_choose_gc_tree(d, background); | ||
906 | if (idx < 0) | ||
907 | return NULL; | ||
908 | |||
909 | rp = &d->trees[idx].root; | ||
910 | eb = rb_entry(rb_first(rp), struct swap_eb, rb); | ||
911 | |||
912 | rb_erase(&eb->rb, rp); | ||
913 | eb->root = NULL; | ||
914 | d->trees[idx].count--; | ||
915 | return eb; | ||
916 | } | ||
917 | |||
918 | static unsigned int mtdswap_test_patt(unsigned int i) | ||
919 | { | ||
920 | return i % 2 ? 0x55555555 : 0xAAAAAAAA; | ||
921 | } | ||
922 | |||
923 | static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d, | ||
924 | struct swap_eb *eb) | ||
925 | { | ||
926 | struct mtd_info *mtd = d->mtd; | ||
927 | unsigned int test, i, j, patt, mtd_pages; | ||
928 | loff_t base, pos; | ||
929 | unsigned int *p1 = (unsigned int *)d->page_buf; | ||
930 | unsigned char *p2 = (unsigned char *)d->oob_buf; | ||
931 | struct mtd_oob_ops ops; | ||
932 | int ret; | ||
933 | |||
934 | ops.mode = MTD_OOB_AUTO; | ||
935 | ops.len = mtd->writesize; | ||
936 | ops.ooblen = mtd->ecclayout->oobavail; | ||
937 | ops.ooboffs = 0; | ||
938 | ops.datbuf = d->page_buf; | ||
939 | ops.oobbuf = d->oob_buf; | ||
940 | base = mtdswap_eb_offset(d, eb); | ||
941 | mtd_pages = d->pages_per_eblk * PAGE_SIZE / mtd->writesize; | ||
942 | |||
943 | for (test = 0; test < 2; test++) { | ||
944 | pos = base; | ||
945 | for (i = 0; i < mtd_pages; i++) { | ||
946 | patt = mtdswap_test_patt(test + i); | ||
947 | memset(d->page_buf, patt, mtd->writesize); | ||
948 | memset(d->oob_buf, patt, mtd->ecclayout->oobavail); | ||
949 | ret = mtd->write_oob(mtd, pos, &ops); | ||
950 | if (ret) | ||
951 | goto error; | ||
952 | |||
953 | pos += mtd->writesize; | ||
954 | } | ||
955 | |||
956 | pos = base; | ||
957 | for (i = 0; i < mtd_pages; i++) { | ||
958 | ret = mtd->read_oob(mtd, pos, &ops); | ||
959 | if (ret) | ||
960 | goto error; | ||
961 | |||
962 | patt = mtdswap_test_patt(test + i); | ||
963 | for (j = 0; j < mtd->writesize/sizeof(int); j++) | ||
964 | if (p1[j] != patt) | ||
965 | goto error; | ||
966 | |||
967 | for (j = 0; j < mtd->ecclayout->oobavail; j++) | ||
968 | if (p2[j] != (unsigned char)patt) | ||
969 | goto error; | ||
970 | |||
971 | pos += mtd->writesize; | ||
972 | } | ||
973 | |||
974 | ret = mtdswap_erase_block(d, eb); | ||
975 | if (ret) | ||
976 | goto error; | ||
977 | } | ||
978 | |||
979 | eb->flags &= ~EBLOCK_READERR; | ||
980 | return 1; | ||
981 | |||
982 | error: | ||
983 | mtdswap_handle_badblock(d, eb); | ||
984 | return 0; | ||
985 | } | ||
986 | |||
987 | static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background) | ||
988 | { | ||
989 | struct swap_eb *eb; | ||
990 | int ret; | ||
991 | |||
992 | if (d->spare_eblks < MIN_SPARE_EBLOCKS) | ||
993 | return 1; | ||
994 | |||
995 | eb = mtdswap_pick_gc_eblk(d, background); | ||
996 | if (!eb) | ||
997 | return 1; | ||
998 | |||
999 | ret = mtdswap_gc_eblock(d, eb); | ||
1000 | if (ret == -ENOSPC) | ||
1001 | return 1; | ||
1002 | |||
1003 | if (eb->flags & EBLOCK_FAILED) { | ||
1004 | mtdswap_handle_badblock(d, eb); | ||
1005 | return 0; | ||
1006 | } | ||
1007 | |||
1008 | eb->flags &= ~EBLOCK_BITFLIP; | ||
1009 | ret = mtdswap_erase_block(d, eb); | ||
1010 | if ((eb->flags & EBLOCK_READERR) && | ||
1011 | (ret || !mtdswap_eblk_passes(d, eb))) | ||
1012 | return 0; | ||
1013 | |||
1014 | if (ret == 0) | ||
1015 | ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_CLEAN); | ||
1016 | |||
1017 | if (ret == 0) | ||
1018 | mtdswap_rb_add(d, eb, MTDSWAP_CLEAN); | ||
1019 | else if (ret != -EIO && ret != -EBADMSG) | ||
1020 | mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); | ||
1021 | |||
1022 | return 0; | ||
1023 | } | ||
1024 | |||
1025 | static void mtdswap_background(struct mtd_blktrans_dev *dev) | ||
1026 | { | ||
1027 | struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); | ||
1028 | int ret; | ||
1029 | |||
1030 | while (1) { | ||
1031 | ret = mtdswap_gc(d, 1); | ||
1032 | if (ret || mtd_blktrans_cease_background(dev)) | ||
1033 | return; | ||
1034 | } | ||
1035 | } | ||
1036 | |||
1037 | static void mtdswap_cleanup(struct mtdswap_dev *d) | ||
1038 | { | ||
1039 | vfree(d->eb_data); | ||
1040 | vfree(d->revmap); | ||
1041 | vfree(d->page_data); | ||
1042 | kfree(d->oob_buf); | ||
1043 | kfree(d->page_buf); | ||
1044 | } | ||
1045 | |||
1046 | static int mtdswap_flush(struct mtd_blktrans_dev *dev) | ||
1047 | { | ||
1048 | struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); | ||
1049 | |||
1050 | if (d->mtd->sync) | ||
1051 | d->mtd->sync(d->mtd); | ||
1052 | return 0; | ||
1053 | } | ||
1054 | |||
1055 | static unsigned int mtdswap_badblocks(struct mtd_info *mtd, uint64_t size) | ||
1056 | { | ||
1057 | loff_t offset; | ||
1058 | unsigned int badcnt; | ||
1059 | |||
1060 | badcnt = 0; | ||
1061 | |||
1062 | if (mtd->block_isbad) | ||
1063 | for (offset = 0; offset < size; offset += mtd->erasesize) | ||
1064 | if (mtd->block_isbad(mtd, offset)) | ||
1065 | badcnt++; | ||
1066 | |||
1067 | return badcnt; | ||
1068 | } | ||
1069 | |||
1070 | static int mtdswap_writesect(struct mtd_blktrans_dev *dev, | ||
1071 | unsigned long page, char *buf) | ||
1072 | { | ||
1073 | struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); | ||
1074 | unsigned int newblock, mapped; | ||
1075 | struct swap_eb *eb; | ||
1076 | int ret; | ||
1077 | |||
1078 | d->sect_write_count++; | ||
1079 | |||
1080 | if (d->spare_eblks < MIN_SPARE_EBLOCKS) | ||
1081 | return -ENOSPC; | ||
1082 | |||
1083 | if (header) { | ||
1084 | /* Ignore writes to the header page */ | ||
1085 | if (unlikely(page == 0)) | ||
1086 | return 0; | ||
1087 | |||
1088 | page--; | ||
1089 | } | ||
1090 | |||
1091 | mapped = d->page_data[page]; | ||
1092 | if (mapped <= BLOCK_MAX) { | ||
1093 | eb = d->eb_data + (mapped / d->pages_per_eblk); | ||
1094 | eb->active_count--; | ||
1095 | mtdswap_store_eb(d, eb); | ||
1096 | d->page_data[page] = BLOCK_UNDEF; | ||
1097 | d->revmap[mapped] = PAGE_UNDEF; | ||
1098 | } | ||
1099 | |||
1100 | ret = mtdswap_write_block(d, buf, page, &newblock, 0); | ||
1101 | d->mtd_write_count++; | ||
1102 | |||
1103 | if (ret < 0) | ||
1104 | return ret; | ||
1105 | |||
1106 | eb = d->eb_data + (newblock / d->pages_per_eblk); | ||
1107 | d->page_data[page] = newblock; | ||
1108 | |||
1109 | return 0; | ||
1110 | } | ||
1111 | |||
1112 | /* Provide a dummy swap header for the kernel */ | ||
1113 | static int mtdswap_auto_header(struct mtdswap_dev *d, char *buf) | ||
1114 | { | ||
1115 | union swap_header *hd = (union swap_header *)(buf); | ||
1116 | |||
1117 | memset(buf, 0, PAGE_SIZE - 10); | ||
1118 | |||
1119 | hd->info.version = 1; | ||
1120 | hd->info.last_page = d->mbd_dev->size - 1; | ||
1121 | hd->info.nr_badpages = 0; | ||
1122 | |||
1123 | memcpy(buf + PAGE_SIZE - 10, "SWAPSPACE2", 10); | ||
1124 | |||
1125 | return 0; | ||
1126 | } | ||
1127 | |||
1128 | static int mtdswap_readsect(struct mtd_blktrans_dev *dev, | ||
1129 | unsigned long page, char *buf) | ||
1130 | { | ||
1131 | struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); | ||
1132 | struct mtd_info *mtd = d->mtd; | ||
1133 | unsigned int realblock, retries; | ||
1134 | loff_t readpos; | ||
1135 | struct swap_eb *eb; | ||
1136 | size_t retlen; | ||
1137 | int ret; | ||
1138 | |||
1139 | d->sect_read_count++; | ||
1140 | |||
1141 | if (header) { | ||
1142 | if (unlikely(page == 0)) | ||
1143 | return mtdswap_auto_header(d, buf); | ||
1144 | |||
1145 | page--; | ||
1146 | } | ||
1147 | |||
1148 | realblock = d->page_data[page]; | ||
1149 | if (realblock > BLOCK_MAX) { | ||
1150 | memset(buf, 0x0, PAGE_SIZE); | ||
1151 | if (realblock == BLOCK_UNDEF) | ||
1152 | return 0; | ||
1153 | else | ||
1154 | return -EIO; | ||
1155 | } | ||
1156 | |||
1157 | eb = d->eb_data + (realblock / d->pages_per_eblk); | ||
1158 | BUG_ON(d->revmap[realblock] == PAGE_UNDEF); | ||
1159 | |||
1160 | readpos = (loff_t)realblock << PAGE_SHIFT; | ||
1161 | retries = 0; | ||
1162 | |||
1163 | retry: | ||
1164 | ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, buf); | ||
1165 | |||
1166 | d->mtd_read_count++; | ||
1167 | if (ret == -EUCLEAN) { | ||
1168 | eb->flags |= EBLOCK_BITFLIP; | ||
1169 | mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); | ||
1170 | ret = 0; | ||
1171 | } | ||
1172 | |||
1173 | if (ret < 0) { | ||
1174 | dev_err(d->dev, "Read error %d\n", ret); | ||
1175 | eb->flags |= EBLOCK_READERR; | ||
1176 | mtdswap_rb_add(d, eb, MTDSWAP_FAILING); | ||
1177 | retries++; | ||
1178 | if (retries < MTDSWAP_IO_RETRIES) | ||
1179 | goto retry; | ||
1180 | |||
1181 | return ret; | ||
1182 | } | ||
1183 | |||
1184 | if (retlen != PAGE_SIZE) { | ||
1185 | dev_err(d->dev, "Short read %zd\n", retlen); | ||
1186 | return -EIO; | ||
1187 | } | ||
1188 | |||
1189 | return 0; | ||
1190 | } | ||
1191 | |||
1192 | static int mtdswap_discard(struct mtd_blktrans_dev *dev, unsigned long first, | ||
1193 | unsigned nr_pages) | ||
1194 | { | ||
1195 | struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); | ||
1196 | unsigned long page; | ||
1197 | struct swap_eb *eb; | ||
1198 | unsigned int mapped; | ||
1199 | |||
1200 | d->discard_count++; | ||
1201 | |||
1202 | for (page = first; page < first + nr_pages; page++) { | ||
1203 | mapped = d->page_data[page]; | ||
1204 | if (mapped <= BLOCK_MAX) { | ||
1205 | eb = d->eb_data + (mapped / d->pages_per_eblk); | ||
1206 | eb->active_count--; | ||
1207 | mtdswap_store_eb(d, eb); | ||
1208 | d->page_data[page] = BLOCK_UNDEF; | ||
1209 | d->revmap[mapped] = PAGE_UNDEF; | ||
1210 | d->discard_page_count++; | ||
1211 | } else if (mapped == BLOCK_ERROR) { | ||
1212 | d->page_data[page] = BLOCK_UNDEF; | ||
1213 | d->discard_page_count++; | ||
1214 | } | ||
1215 | } | ||
1216 | |||
1217 | return 0; | ||
1218 | } | ||
1219 | |||
1220 | static int mtdswap_show(struct seq_file *s, void *data) | ||
1221 | { | ||
1222 | struct mtdswap_dev *d = (struct mtdswap_dev *) s->private; | ||
1223 | unsigned long sum; | ||
1224 | unsigned int count[MTDSWAP_TREE_CNT]; | ||
1225 | unsigned int min[MTDSWAP_TREE_CNT]; | ||
1226 | unsigned int max[MTDSWAP_TREE_CNT]; | ||
1227 | unsigned int i, cw = 0, cwp = 0, cwecount = 0, bb_cnt, mapped, pages; | ||
1228 | uint64_t use_size; | ||
1229 | char *name[] = {"clean", "used", "low", "high", "dirty", "bitflip", | ||
1230 | "failing"}; | ||
1231 | |||
1232 | mutex_lock(&d->mbd_dev->lock); | ||
1233 | |||
1234 | for (i = 0; i < MTDSWAP_TREE_CNT; i++) { | ||
1235 | struct rb_root *root = &d->trees[i].root; | ||
1236 | |||
1237 | if (root->rb_node) { | ||
1238 | count[i] = d->trees[i].count; | ||
1239 | min[i] = rb_entry(rb_first(root), struct swap_eb, | ||
1240 | rb)->erase_count; | ||
1241 | max[i] = rb_entry(rb_last(root), struct swap_eb, | ||
1242 | rb)->erase_count; | ||
1243 | } else | ||
1244 | count[i] = 0; | ||
1245 | } | ||
1246 | |||
1247 | if (d->curr_write) { | ||
1248 | cw = 1; | ||
1249 | cwp = d->curr_write_pos; | ||
1250 | cwecount = d->curr_write->erase_count; | ||
1251 | } | ||
1252 | |||
1253 | sum = 0; | ||
1254 | for (i = 0; i < d->eblks; i++) | ||
1255 | sum += d->eb_data[i].erase_count; | ||
1256 | |||
1257 | use_size = (uint64_t)d->eblks * d->mtd->erasesize; | ||
1258 | bb_cnt = mtdswap_badblocks(d->mtd, use_size); | ||
1259 | |||
1260 | mapped = 0; | ||
1261 | pages = d->mbd_dev->size; | ||
1262 | for (i = 0; i < pages; i++) | ||
1263 | if (d->page_data[i] != BLOCK_UNDEF) | ||
1264 | mapped++; | ||
1265 | |||
1266 | mutex_unlock(&d->mbd_dev->lock); | ||
1267 | |||
1268 | for (i = 0; i < MTDSWAP_TREE_CNT; i++) { | ||
1269 | if (!count[i]) | ||
1270 | continue; | ||
1271 | |||
1272 | if (min[i] != max[i]) | ||
1273 | seq_printf(s, "%s:\t%5d erase blocks, erased min %d, " | ||
1274 | "max %d times\n", | ||
1275 | name[i], count[i], min[i], max[i]); | ||
1276 | else | ||
1277 | seq_printf(s, "%s:\t%5d erase blocks, all erased %d " | ||
1278 | "times\n", name[i], count[i], min[i]); | ||
1279 | } | ||
1280 | |||
1281 | if (bb_cnt) | ||
1282 | seq_printf(s, "bad:\t%5u erase blocks\n", bb_cnt); | ||
1283 | |||
1284 | if (cw) | ||
1285 | seq_printf(s, "current erase block: %u pages used, %u free, " | ||
1286 | "erased %u times\n", | ||
1287 | cwp, d->pages_per_eblk - cwp, cwecount); | ||
1288 | |||
1289 | seq_printf(s, "total erasures: %lu\n", sum); | ||
1290 | |||
1291 | seq_printf(s, "\n"); | ||
1292 | |||
1293 | seq_printf(s, "mtdswap_readsect count: %llu\n", d->sect_read_count); | ||
1294 | seq_printf(s, "mtdswap_writesect count: %llu\n", d->sect_write_count); | ||
1295 | seq_printf(s, "mtdswap_discard count: %llu\n", d->discard_count); | ||
1296 | seq_printf(s, "mtd read count: %llu\n", d->mtd_read_count); | ||
1297 | seq_printf(s, "mtd write count: %llu\n", d->mtd_write_count); | ||
1298 | seq_printf(s, "discarded pages count: %llu\n", d->discard_page_count); | ||
1299 | |||
1300 | seq_printf(s, "\n"); | ||
1301 | seq_printf(s, "total pages: %u\n", pages); | ||
1302 | seq_printf(s, "pages mapped: %u\n", mapped); | ||
1303 | |||
1304 | return 0; | ||
1305 | } | ||
1306 | |||
1307 | static int mtdswap_open(struct inode *inode, struct file *file) | ||
1308 | { | ||
1309 | return single_open(file, mtdswap_show, inode->i_private); | ||
1310 | } | ||
1311 | |||
1312 | static const struct file_operations mtdswap_fops = { | ||
1313 | .open = mtdswap_open, | ||
1314 | .read = seq_read, | ||
1315 | .llseek = seq_lseek, | ||
1316 | .release = single_release, | ||
1317 | }; | ||
1318 | |||
1319 | static int mtdswap_add_debugfs(struct mtdswap_dev *d) | ||
1320 | { | ||
1321 | struct gendisk *gd = d->mbd_dev->disk; | ||
1322 | struct device *dev = disk_to_dev(gd); | ||
1323 | |||
1324 | struct dentry *root; | ||
1325 | struct dentry *dent; | ||
1326 | |||
1327 | root = debugfs_create_dir(gd->disk_name, NULL); | ||
1328 | if (IS_ERR(root)) | ||
1329 | return 0; | ||
1330 | |||
1331 | if (!root) { | ||
1332 | dev_err(dev, "failed to initialize debugfs\n"); | ||
1333 | return -1; | ||
1334 | } | ||
1335 | |||
1336 | d->debugfs_root = root; | ||
1337 | |||
1338 | dent = debugfs_create_file("stats", S_IRUSR, root, d, | ||
1339 | &mtdswap_fops); | ||
1340 | if (!dent) { | ||
1341 | dev_err(d->dev, "debugfs_create_file failed\n"); | ||
1342 | debugfs_remove_recursive(root); | ||
1343 | d->debugfs_root = NULL; | ||
1344 | return -1; | ||
1345 | } | ||
1346 | |||
1347 | return 0; | ||
1348 | } | ||
1349 | |||
1350 | static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks, | ||
1351 | unsigned int spare_cnt) | ||
1352 | { | ||
1353 | struct mtd_info *mtd = d->mbd_dev->mtd; | ||
1354 | unsigned int i, eblk_bytes, pages, blocks; | ||
1355 | int ret = -ENOMEM; | ||
1356 | |||
1357 | d->mtd = mtd; | ||
1358 | d->eblks = eblocks; | ||
1359 | d->spare_eblks = spare_cnt; | ||
1360 | d->pages_per_eblk = mtd->erasesize >> PAGE_SHIFT; | ||
1361 | |||
1362 | pages = d->mbd_dev->size; | ||
1363 | blocks = eblocks * d->pages_per_eblk; | ||
1364 | |||
1365 | for (i = 0; i < MTDSWAP_TREE_CNT; i++) | ||
1366 | d->trees[i].root = RB_ROOT; | ||
1367 | |||
1368 | d->page_data = vmalloc(sizeof(int)*pages); | ||
1369 | if (!d->page_data) | ||
1370 | goto page_data_fail; | ||
1371 | |||
1372 | d->revmap = vmalloc(sizeof(int)*blocks); | ||
1373 | if (!d->revmap) | ||
1374 | goto revmap_fail; | ||
1375 | |||
1376 | eblk_bytes = sizeof(struct swap_eb)*d->eblks; | ||
1377 | d->eb_data = vmalloc(eblk_bytes); | ||
1378 | if (!d->eb_data) | ||
1379 | goto eb_data_fail; | ||
1380 | |||
1381 | memset(d->eb_data, 0, eblk_bytes); | ||
1382 | for (i = 0; i < pages; i++) | ||
1383 | d->page_data[i] = BLOCK_UNDEF; | ||
1384 | |||
1385 | for (i = 0; i < blocks; i++) | ||
1386 | d->revmap[i] = PAGE_UNDEF; | ||
1387 | |||
1388 | d->page_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
1389 | if (!d->page_buf) | ||
1390 | goto page_buf_fail; | ||
1391 | |||
1392 | d->oob_buf = kmalloc(2 * mtd->ecclayout->oobavail, GFP_KERNEL); | ||
1393 | if (!d->oob_buf) | ||
1394 | goto oob_buf_fail; | ||
1395 | |||
1396 | mtdswap_scan_eblks(d); | ||
1397 | |||
1398 | return 0; | ||
1399 | |||
1400 | oob_buf_fail: | ||
1401 | kfree(d->page_buf); | ||
1402 | page_buf_fail: | ||
1403 | vfree(d->eb_data); | ||
1404 | eb_data_fail: | ||
1405 | vfree(d->revmap); | ||
1406 | revmap_fail: | ||
1407 | vfree(d->page_data); | ||
1408 | page_data_fail: | ||
1409 | printk(KERN_ERR "%s: init failed (%d)\n", MTDSWAP_PREFIX, ret); | ||
1410 | return ret; | ||
1411 | } | ||
1412 | |||
1413 | static void mtdswap_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | ||
1414 | { | ||
1415 | struct mtdswap_dev *d; | ||
1416 | struct mtd_blktrans_dev *mbd_dev; | ||
1417 | char *parts; | ||
1418 | char *this_opt; | ||
1419 | unsigned long part; | ||
1420 | unsigned int eblocks, eavailable, bad_blocks, spare_cnt; | ||
1421 | uint64_t swap_size, use_size, size_limit; | ||
1422 | struct nand_ecclayout *oinfo; | ||
1423 | int ret; | ||
1424 | |||
1425 | parts = &partitions[0]; | ||
1426 | if (!*parts) | ||
1427 | return; | ||
1428 | |||
1429 | while ((this_opt = strsep(&parts, ",")) != NULL) { | ||
1430 | if (strict_strtoul(this_opt, 0, &part) < 0) | ||
1431 | return; | ||
1432 | |||
1433 | if (mtd->index == part) | ||
1434 | break; | ||
1435 | } | ||
1436 | |||
1437 | if (mtd->index != part) | ||
1438 | return; | ||
1439 | |||
1440 | if (mtd->erasesize < PAGE_SIZE || mtd->erasesize % PAGE_SIZE) { | ||
1441 | printk(KERN_ERR "%s: Erase size %u not multiple of PAGE_SIZE " | ||
1442 | "%lu\n", MTDSWAP_PREFIX, mtd->erasesize, PAGE_SIZE); | ||
1443 | return; | ||
1444 | } | ||
1445 | |||
1446 | if (PAGE_SIZE % mtd->writesize || mtd->writesize > PAGE_SIZE) { | ||
1447 | printk(KERN_ERR "%s: PAGE_SIZE %lu not multiple of write size" | ||
1448 | " %u\n", MTDSWAP_PREFIX, PAGE_SIZE, mtd->writesize); | ||
1449 | return; | ||
1450 | } | ||
1451 | |||
1452 | oinfo = mtd->ecclayout; | ||
1453 | if (!mtd->oobsize || !oinfo || oinfo->oobavail < MTDSWAP_OOBSIZE) { | ||
1454 | printk(KERN_ERR "%s: Not enough free bytes in OOB, " | ||
1455 | "%d available, %lu needed.\n", | ||
1456 | MTDSWAP_PREFIX, oinfo->oobavail, MTDSWAP_OOBSIZE); | ||
1457 | return; | ||
1458 | } | ||
1459 | |||
1460 | if (spare_eblocks > 100) | ||
1461 | spare_eblocks = 100; | ||
1462 | |||
1463 | use_size = mtd->size; | ||
1464 | size_limit = (uint64_t) BLOCK_MAX * PAGE_SIZE; | ||
1465 | |||
1466 | if (mtd->size > size_limit) { | ||
1467 | printk(KERN_WARNING "%s: Device too large. Limiting size to " | ||
1468 | "%llu bytes\n", MTDSWAP_PREFIX, size_limit); | ||
1469 | use_size = size_limit; | ||
1470 | } | ||
1471 | |||
1472 | eblocks = mtd_div_by_eb(use_size, mtd); | ||
1473 | use_size = eblocks * mtd->erasesize; | ||
1474 | bad_blocks = mtdswap_badblocks(mtd, use_size); | ||
1475 | eavailable = eblocks - bad_blocks; | ||
1476 | |||
1477 | if (eavailable < MIN_ERASE_BLOCKS) { | ||
1478 | printk(KERN_ERR "%s: Not enough erase blocks. %u available, " | ||
1479 | "%d needed\n", MTDSWAP_PREFIX, eavailable, | ||
1480 | MIN_ERASE_BLOCKS); | ||
1481 | return; | ||
1482 | } | ||
1483 | |||
1484 | spare_cnt = div_u64((uint64_t)eavailable * spare_eblocks, 100); | ||
1485 | |||
1486 | if (spare_cnt < MIN_SPARE_EBLOCKS) | ||
1487 | spare_cnt = MIN_SPARE_EBLOCKS; | ||
1488 | |||
1489 | if (spare_cnt > eavailable - 1) | ||
1490 | spare_cnt = eavailable - 1; | ||
1491 | |||
1492 | swap_size = (uint64_t)(eavailable - spare_cnt) * mtd->erasesize + | ||
1493 | (header ? PAGE_SIZE : 0); | ||
1494 | |||
1495 | printk(KERN_INFO "%s: Enabling MTD swap on device %lu, size %llu KB, " | ||
1496 | "%u spare, %u bad blocks\n", | ||
1497 | MTDSWAP_PREFIX, part, swap_size / 1024, spare_cnt, bad_blocks); | ||
1498 | |||
1499 | d = kzalloc(sizeof(struct mtdswap_dev), GFP_KERNEL); | ||
1500 | if (!d) | ||
1501 | return; | ||
1502 | |||
1503 | mbd_dev = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL); | ||
1504 | if (!mbd_dev) { | ||
1505 | kfree(d); | ||
1506 | return; | ||
1507 | } | ||
1508 | |||
1509 | d->mbd_dev = mbd_dev; | ||
1510 | mbd_dev->priv = d; | ||
1511 | |||
1512 | mbd_dev->mtd = mtd; | ||
1513 | mbd_dev->devnum = mtd->index; | ||
1514 | mbd_dev->size = swap_size >> PAGE_SHIFT; | ||
1515 | mbd_dev->tr = tr; | ||
1516 | |||
1517 | if (!(mtd->flags & MTD_WRITEABLE)) | ||
1518 | mbd_dev->readonly = 1; | ||
1519 | |||
1520 | if (mtdswap_init(d, eblocks, spare_cnt) < 0) | ||
1521 | goto init_failed; | ||
1522 | |||
1523 | if (add_mtd_blktrans_dev(mbd_dev) < 0) | ||
1524 | goto cleanup; | ||
1525 | |||
1526 | d->dev = disk_to_dev(mbd_dev->disk); | ||
1527 | |||
1528 | ret = mtdswap_add_debugfs(d); | ||
1529 | if (ret < 0) | ||
1530 | goto debugfs_failed; | ||
1531 | |||
1532 | return; | ||
1533 | |||
1534 | debugfs_failed: | ||
1535 | del_mtd_blktrans_dev(mbd_dev); | ||
1536 | |||
1537 | cleanup: | ||
1538 | mtdswap_cleanup(d); | ||
1539 | |||
1540 | init_failed: | ||
1541 | kfree(mbd_dev); | ||
1542 | kfree(d); | ||
1543 | } | ||
1544 | |||
1545 | static void mtdswap_remove_dev(struct mtd_blktrans_dev *dev) | ||
1546 | { | ||
1547 | struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); | ||
1548 | |||
1549 | debugfs_remove_recursive(d->debugfs_root); | ||
1550 | del_mtd_blktrans_dev(dev); | ||
1551 | mtdswap_cleanup(d); | ||
1552 | kfree(d); | ||
1553 | } | ||
1554 | |||
1555 | static struct mtd_blktrans_ops mtdswap_ops = { | ||
1556 | .name = "mtdswap", | ||
1557 | .major = 0, | ||
1558 | .part_bits = 0, | ||
1559 | .blksize = PAGE_SIZE, | ||
1560 | .flush = mtdswap_flush, | ||
1561 | .readsect = mtdswap_readsect, | ||
1562 | .writesect = mtdswap_writesect, | ||
1563 | .discard = mtdswap_discard, | ||
1564 | .background = mtdswap_background, | ||
1565 | .add_mtd = mtdswap_add_mtd, | ||
1566 | .remove_dev = mtdswap_remove_dev, | ||
1567 | .owner = THIS_MODULE, | ||
1568 | }; | ||
1569 | |||
1570 | static int __init mtdswap_modinit(void) | ||
1571 | { | ||
1572 | return register_mtd_blktrans(&mtdswap_ops); | ||
1573 | } | ||
1574 | |||
1575 | static void __exit mtdswap_modexit(void) | ||
1576 | { | ||
1577 | deregister_mtd_blktrans(&mtdswap_ops); | ||
1578 | } | ||
1579 | |||
1580 | module_init(mtdswap_modinit); | ||
1581 | module_exit(mtdswap_modexit); | ||
1582 | |||
1583 | |||
1584 | MODULE_LICENSE("GPL"); | ||
1585 | MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>"); | ||
1586 | MODULE_DESCRIPTION("Block device access to an MTD suitable for using as " | ||
1587 | "swap space"); | ||
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 4f6c06f16328..a92054e945e1 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
@@ -31,6 +31,21 @@ config MTD_NAND_VERIFY_WRITE | |||
31 | device thinks the write was successful, a bit could have been | 31 | device thinks the write was successful, a bit could have been |
32 | flipped accidentally due to device wear or something else. | 32 | flipped accidentally due to device wear or something else. |
33 | 33 | ||
34 | config MTD_NAND_BCH | ||
35 | tristate | ||
36 | select BCH | ||
37 | depends on MTD_NAND_ECC_BCH | ||
38 | default MTD_NAND | ||
39 | |||
40 | config MTD_NAND_ECC_BCH | ||
41 | bool "Support software BCH ECC" | ||
42 | default n | ||
43 | help | ||
44 | This enables support for software BCH error correction. Binary BCH | ||
45 | codes are more powerful and cpu intensive than traditional Hamming | ||
46 | ECC codes. They are used with NAND devices requiring more than 1 bit | ||
47 | of error correction. | ||
48 | |||
34 | config MTD_SM_COMMON | 49 | config MTD_SM_COMMON |
35 | tristate | 50 | tristate |
36 | default n | 51 | default n |
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index 8ad6faec72cb..5745d831168e 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_MTD_NAND) += nand.o | 5 | obj-$(CONFIG_MTD_NAND) += nand.o |
6 | obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o | 6 | obj-$(CONFIG_MTD_NAND_ECC) += nand_ecc.o |
7 | obj-$(CONFIG_MTD_NAND_BCH) += nand_bch.o | ||
7 | obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o | 8 | obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o |
8 | obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o | 9 | obj-$(CONFIG_MTD_SM_COMMON) += sm_common.o |
9 | 10 | ||
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index ccce0f03b5dc..6fae04b3fc6d 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c | |||
@@ -48,6 +48,9 @@ | |||
48 | #define no_ecc 0 | 48 | #define no_ecc 0 |
49 | #endif | 49 | #endif |
50 | 50 | ||
51 | static int use_dma = 1; | ||
52 | module_param(use_dma, int, 0); | ||
53 | |||
51 | static int on_flash_bbt = 0; | 54 | static int on_flash_bbt = 0; |
52 | module_param(on_flash_bbt, int, 0); | 55 | module_param(on_flash_bbt, int, 0); |
53 | 56 | ||
@@ -89,11 +92,20 @@ struct atmel_nand_host { | |||
89 | struct nand_chip nand_chip; | 92 | struct nand_chip nand_chip; |
90 | struct mtd_info mtd; | 93 | struct mtd_info mtd; |
91 | void __iomem *io_base; | 94 | void __iomem *io_base; |
95 | dma_addr_t io_phys; | ||
92 | struct atmel_nand_data *board; | 96 | struct atmel_nand_data *board; |
93 | struct device *dev; | 97 | struct device *dev; |
94 | void __iomem *ecc; | 98 | void __iomem *ecc; |
99 | |||
100 | struct completion comp; | ||
101 | struct dma_chan *dma_chan; | ||
95 | }; | 102 | }; |
96 | 103 | ||
104 | static int cpu_has_dma(void) | ||
105 | { | ||
106 | return cpu_is_at91sam9rl() || cpu_is_at91sam9g45(); | ||
107 | } | ||
108 | |||
97 | /* | 109 | /* |
98 | * Enable NAND. | 110 | * Enable NAND. |
99 | */ | 111 | */ |
@@ -150,7 +162,7 @@ static int atmel_nand_device_ready(struct mtd_info *mtd) | |||
150 | /* | 162 | /* |
151 | * Minimal-overhead PIO for data access. | 163 | * Minimal-overhead PIO for data access. |
152 | */ | 164 | */ |
153 | static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len) | 165 | static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len) |
154 | { | 166 | { |
155 | struct nand_chip *nand_chip = mtd->priv; | 167 | struct nand_chip *nand_chip = mtd->priv; |
156 | 168 | ||
@@ -164,7 +176,7 @@ static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len) | |||
164 | __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2); | 176 | __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2); |
165 | } | 177 | } |
166 | 178 | ||
167 | static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) | 179 | static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len) |
168 | { | 180 | { |
169 | struct nand_chip *nand_chip = mtd->priv; | 181 | struct nand_chip *nand_chip = mtd->priv; |
170 | 182 | ||
@@ -178,6 +190,121 @@ static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len) | |||
178 | __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2); | 190 | __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2); |
179 | } | 191 | } |
180 | 192 | ||
193 | static void dma_complete_func(void *completion) | ||
194 | { | ||
195 | complete(completion); | ||
196 | } | ||
197 | |||
198 | static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len, | ||
199 | int is_read) | ||
200 | { | ||
201 | struct dma_device *dma_dev; | ||
202 | enum dma_ctrl_flags flags; | ||
203 | dma_addr_t dma_src_addr, dma_dst_addr, phys_addr; | ||
204 | struct dma_async_tx_descriptor *tx = NULL; | ||
205 | dma_cookie_t cookie; | ||
206 | struct nand_chip *chip = mtd->priv; | ||
207 | struct atmel_nand_host *host = chip->priv; | ||
208 | void *p = buf; | ||
209 | int err = -EIO; | ||
210 | enum dma_data_direction dir = is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | ||
211 | |||
212 | if (buf >= high_memory) { | ||
213 | struct page *pg; | ||
214 | |||
215 | if (((size_t)buf & PAGE_MASK) != | ||
216 | ((size_t)(buf + len - 1) & PAGE_MASK)) { | ||
217 | dev_warn(host->dev, "Buffer not fit in one page\n"); | ||
218 | goto err_buf; | ||
219 | } | ||
220 | |||
221 | pg = vmalloc_to_page(buf); | ||
222 | if (pg == 0) { | ||
223 | dev_err(host->dev, "Failed to vmalloc_to_page\n"); | ||
224 | goto err_buf; | ||
225 | } | ||
226 | p = page_address(pg) + ((size_t)buf & ~PAGE_MASK); | ||
227 | } | ||
228 | |||
229 | dma_dev = host->dma_chan->device; | ||
230 | |||
231 | flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP | | ||
232 | DMA_COMPL_SKIP_DEST_UNMAP; | ||
233 | |||
234 | phys_addr = dma_map_single(dma_dev->dev, p, len, dir); | ||
235 | if (dma_mapping_error(dma_dev->dev, phys_addr)) { | ||
236 | dev_err(host->dev, "Failed to dma_map_single\n"); | ||
237 | goto err_buf; | ||
238 | } | ||
239 | |||
240 | if (is_read) { | ||
241 | dma_src_addr = host->io_phys; | ||
242 | dma_dst_addr = phys_addr; | ||
243 | } else { | ||
244 | dma_src_addr = phys_addr; | ||
245 | dma_dst_addr = host->io_phys; | ||
246 | } | ||
247 | |||
248 | tx = dma_dev->device_prep_dma_memcpy(host->dma_chan, dma_dst_addr, | ||
249 | dma_src_addr, len, flags); | ||
250 | if (!tx) { | ||
251 | dev_err(host->dev, "Failed to prepare DMA memcpy\n"); | ||
252 | goto err_dma; | ||
253 | } | ||
254 | |||
255 | init_completion(&host->comp); | ||
256 | tx->callback = dma_complete_func; | ||
257 | tx->callback_param = &host->comp; | ||
258 | |||
259 | cookie = tx->tx_submit(tx); | ||
260 | if (dma_submit_error(cookie)) { | ||
261 | dev_err(host->dev, "Failed to do DMA tx_submit\n"); | ||
262 | goto err_dma; | ||
263 | } | ||
264 | |||
265 | dma_async_issue_pending(host->dma_chan); | ||
266 | wait_for_completion(&host->comp); | ||
267 | |||
268 | err = 0; | ||
269 | |||
270 | err_dma: | ||
271 | dma_unmap_single(dma_dev->dev, phys_addr, len, dir); | ||
272 | err_buf: | ||
273 | if (err != 0) | ||
274 | dev_warn(host->dev, "Fall back to CPU I/O\n"); | ||
275 | return err; | ||
276 | } | ||
277 | |||
278 | static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len) | ||
279 | { | ||
280 | struct nand_chip *chip = mtd->priv; | ||
281 | struct atmel_nand_host *host = chip->priv; | ||
282 | |||
283 | if (use_dma && len >= mtd->oobsize) | ||
284 | if (atmel_nand_dma_op(mtd, buf, len, 1) == 0) | ||
285 | return; | ||
286 | |||
287 | if (host->board->bus_width_16) | ||
288 | atmel_read_buf16(mtd, buf, len); | ||
289 | else | ||
290 | atmel_read_buf8(mtd, buf, len); | ||
291 | } | ||
292 | |||
293 | static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len) | ||
294 | { | ||
295 | struct nand_chip *chip = mtd->priv; | ||
296 | struct atmel_nand_host *host = chip->priv; | ||
297 | |||
298 | if (use_dma && len >= mtd->oobsize) | ||
299 | if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0) | ||
300 | return; | ||
301 | |||
302 | if (host->board->bus_width_16) | ||
303 | atmel_write_buf16(mtd, buf, len); | ||
304 | else | ||
305 | atmel_write_buf8(mtd, buf, len); | ||
306 | } | ||
307 | |||
181 | /* | 308 | /* |
182 | * Calculate HW ECC | 309 | * Calculate HW ECC |
183 | * | 310 | * |
@@ -398,6 +525,8 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
398 | return -ENOMEM; | 525 | return -ENOMEM; |
399 | } | 526 | } |
400 | 527 | ||
528 | host->io_phys = (dma_addr_t)mem->start; | ||
529 | |||
401 | host->io_base = ioremap(mem->start, mem->end - mem->start + 1); | 530 | host->io_base = ioremap(mem->start, mem->end - mem->start + 1); |
402 | if (host->io_base == NULL) { | 531 | if (host->io_base == NULL) { |
403 | printk(KERN_ERR "atmel_nand: ioremap failed\n"); | 532 | printk(KERN_ERR "atmel_nand: ioremap failed\n"); |
@@ -448,14 +577,11 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
448 | 577 | ||
449 | nand_chip->chip_delay = 20; /* 20us command delay time */ | 578 | nand_chip->chip_delay = 20; /* 20us command delay time */ |
450 | 579 | ||
451 | if (host->board->bus_width_16) { /* 16-bit bus width */ | 580 | if (host->board->bus_width_16) /* 16-bit bus width */ |
452 | nand_chip->options |= NAND_BUSWIDTH_16; | 581 | nand_chip->options |= NAND_BUSWIDTH_16; |
453 | nand_chip->read_buf = atmel_read_buf16; | 582 | |
454 | nand_chip->write_buf = atmel_write_buf16; | 583 | nand_chip->read_buf = atmel_read_buf; |
455 | } else { | 584 | nand_chip->write_buf = atmel_write_buf; |
456 | nand_chip->read_buf = atmel_read_buf; | ||
457 | nand_chip->write_buf = atmel_write_buf; | ||
458 | } | ||
459 | 585 | ||
460 | platform_set_drvdata(pdev, host); | 586 | platform_set_drvdata(pdev, host); |
461 | atmel_nand_enable(host); | 587 | atmel_nand_enable(host); |
@@ -473,6 +599,22 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
473 | nand_chip->options |= NAND_USE_FLASH_BBT; | 599 | nand_chip->options |= NAND_USE_FLASH_BBT; |
474 | } | 600 | } |
475 | 601 | ||
602 | if (cpu_has_dma() && use_dma) { | ||
603 | dma_cap_mask_t mask; | ||
604 | |||
605 | dma_cap_zero(mask); | ||
606 | dma_cap_set(DMA_MEMCPY, mask); | ||
607 | host->dma_chan = dma_request_channel(mask, 0, NULL); | ||
608 | if (!host->dma_chan) { | ||
609 | dev_err(host->dev, "Failed to request DMA channel\n"); | ||
610 | use_dma = 0; | ||
611 | } | ||
612 | } | ||
613 | if (use_dma) | ||
614 | dev_info(host->dev, "Using DMA for NAND access.\n"); | ||
615 | else | ||
616 | dev_info(host->dev, "No DMA support for NAND access.\n"); | ||
617 | |||
476 | /* first scan to find the device and get the page size */ | 618 | /* first scan to find the device and get the page size */ |
477 | if (nand_scan_ident(mtd, 1, NULL)) { | 619 | if (nand_scan_ident(mtd, 1, NULL)) { |
478 | res = -ENXIO; | 620 | res = -ENXIO; |
@@ -555,6 +697,8 @@ err_scan_ident: | |||
555 | err_no_card: | 697 | err_no_card: |
556 | atmel_nand_disable(host); | 698 | atmel_nand_disable(host); |
557 | platform_set_drvdata(pdev, NULL); | 699 | platform_set_drvdata(pdev, NULL); |
700 | if (host->dma_chan) | ||
701 | dma_release_channel(host->dma_chan); | ||
558 | if (host->ecc) | 702 | if (host->ecc) |
559 | iounmap(host->ecc); | 703 | iounmap(host->ecc); |
560 | err_ecc_ioremap: | 704 | err_ecc_ioremap: |
@@ -578,6 +722,10 @@ static int __exit atmel_nand_remove(struct platform_device *pdev) | |||
578 | 722 | ||
579 | if (host->ecc) | 723 | if (host->ecc) |
580 | iounmap(host->ecc); | 724 | iounmap(host->ecc); |
725 | |||
726 | if (host->dma_chan) | ||
727 | dma_release_channel(host->dma_chan); | ||
728 | |||
581 | iounmap(host->io_base); | 729 | iounmap(host->io_base); |
582 | kfree(host); | 730 | kfree(host); |
583 | 731 | ||
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index a90fde3ede28..aff3468867ac 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c | |||
@@ -37,9 +37,6 @@ | |||
37 | #include <mach/nand.h> | 37 | #include <mach/nand.h> |
38 | #include <mach/aemif.h> | 38 | #include <mach/aemif.h> |
39 | 39 | ||
40 | #include <asm/mach-types.h> | ||
41 | |||
42 | |||
43 | /* | 40 | /* |
44 | * This is a device driver for the NAND flash controller found on the | 41 | * This is a device driver for the NAND flash controller found on the |
45 | * various DaVinci family chips. It handles up to four SoC chipselects, | 42 | * various DaVinci family chips. It handles up to four SoC chipselects, |
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c index c2f95437e5e9..0b81b5b499d1 100644 --- a/drivers/mtd/nand/mpc5121_nfc.c +++ b/drivers/mtd/nand/mpc5121_nfc.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/clk.h> | 29 | #include <linux/clk.h> |
30 | #include <linux/gfp.h> | 30 | #include <linux/gfp.h> |
31 | #include <linux/delay.h> | 31 | #include <linux/delay.h> |
32 | #include <linux/err.h> | ||
32 | #include <linux/init.h> | 33 | #include <linux/init.h> |
33 | #include <linux/interrupt.h> | 34 | #include <linux/interrupt.h> |
34 | #include <linux/io.h> | 35 | #include <linux/io.h> |
@@ -757,9 +758,9 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op) | |||
757 | 758 | ||
758 | /* Enable NFC clock */ | 759 | /* Enable NFC clock */ |
759 | prv->clk = clk_get(dev, "nfc_clk"); | 760 | prv->clk = clk_get(dev, "nfc_clk"); |
760 | if (!prv->clk) { | 761 | if (IS_ERR(prv->clk)) { |
761 | dev_err(dev, "Unable to acquire NFC clock!\n"); | 762 | dev_err(dev, "Unable to acquire NFC clock!\n"); |
762 | retval = -ENODEV; | 763 | retval = PTR_ERR(prv->clk); |
763 | goto error; | 764 | goto error; |
764 | } | 765 | } |
765 | 766 | ||
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 5ae1d9ee2cf1..42a95fb41504 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c | |||
@@ -211,6 +211,31 @@ static struct nand_ecclayout nandv2_hw_eccoob_largepage = { | |||
211 | } | 211 | } |
212 | }; | 212 | }; |
213 | 213 | ||
214 | /* OOB description for 4096 byte pages with 128 byte OOB */ | ||
215 | static struct nand_ecclayout nandv2_hw_eccoob_4k = { | ||
216 | .eccbytes = 8 * 9, | ||
217 | .eccpos = { | ||
218 | 7, 8, 9, 10, 11, 12, 13, 14, 15, | ||
219 | 23, 24, 25, 26, 27, 28, 29, 30, 31, | ||
220 | 39, 40, 41, 42, 43, 44, 45, 46, 47, | ||
221 | 55, 56, 57, 58, 59, 60, 61, 62, 63, | ||
222 | 71, 72, 73, 74, 75, 76, 77, 78, 79, | ||
223 | 87, 88, 89, 90, 91, 92, 93, 94, 95, | ||
224 | 103, 104, 105, 106, 107, 108, 109, 110, 111, | ||
225 | 119, 120, 121, 122, 123, 124, 125, 126, 127, | ||
226 | }, | ||
227 | .oobfree = { | ||
228 | {.offset = 2, .length = 4}, | ||
229 | {.offset = 16, .length = 7}, | ||
230 | {.offset = 32, .length = 7}, | ||
231 | {.offset = 48, .length = 7}, | ||
232 | {.offset = 64, .length = 7}, | ||
233 | {.offset = 80, .length = 7}, | ||
234 | {.offset = 96, .length = 7}, | ||
235 | {.offset = 112, .length = 7}, | ||
236 | } | ||
237 | }; | ||
238 | |||
214 | #ifdef CONFIG_MTD_PARTITIONS | 239 | #ifdef CONFIG_MTD_PARTITIONS |
215 | static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL }; | 240 | static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL }; |
216 | #endif | 241 | #endif |
@@ -641,9 +666,9 @@ static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) | |||
641 | 666 | ||
642 | n = min(n, len); | 667 | n = min(n, len); |
643 | 668 | ||
644 | memcpy(buf, host->data_buf + col, len); | 669 | memcpy(buf, host->data_buf + col, n); |
645 | 670 | ||
646 | host->buf_start += len; | 671 | host->buf_start += n; |
647 | } | 672 | } |
648 | 673 | ||
649 | /* Used by the upper layer to verify the data in NAND Flash | 674 | /* Used by the upper layer to verify the data in NAND Flash |
@@ -1185,6 +1210,8 @@ static int __init mxcnd_probe(struct platform_device *pdev) | |||
1185 | 1210 | ||
1186 | if (mtd->writesize == 2048) | 1211 | if (mtd->writesize == 2048) |
1187 | this->ecc.layout = oob_largepage; | 1212 | this->ecc.layout = oob_largepage; |
1213 | if (nfc_is_v21() && mtd->writesize == 4096) | ||
1214 | this->ecc.layout = &nandv2_hw_eccoob_4k; | ||
1188 | 1215 | ||
1189 | /* second phase scan */ | 1216 | /* second phase scan */ |
1190 | if (nand_scan_tail(mtd)) { | 1217 | if (nand_scan_tail(mtd)) { |
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index a9c6ce745767..85cfc061d41c 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/mtd/mtd.h> | 42 | #include <linux/mtd/mtd.h> |
43 | #include <linux/mtd/nand.h> | 43 | #include <linux/mtd/nand.h> |
44 | #include <linux/mtd/nand_ecc.h> | 44 | #include <linux/mtd/nand_ecc.h> |
45 | #include <linux/mtd/nand_bch.h> | ||
45 | #include <linux/interrupt.h> | 46 | #include <linux/interrupt.h> |
46 | #include <linux/bitops.h> | 47 | #include <linux/bitops.h> |
47 | #include <linux/leds.h> | 48 | #include <linux/leds.h> |
@@ -2377,7 +2378,7 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, | |||
2377 | return -EINVAL; | 2378 | return -EINVAL; |
2378 | } | 2379 | } |
2379 | 2380 | ||
2380 | /* Do not allow reads past end of device */ | 2381 | /* Do not allow write past end of device */ |
2381 | if (unlikely(to >= mtd->size || | 2382 | if (unlikely(to >= mtd->size || |
2382 | ops->ooboffs + ops->ooblen > | 2383 | ops->ooboffs + ops->ooblen > |
2383 | ((mtd->size >> chip->page_shift) - | 2384 | ((mtd->size >> chip->page_shift) - |
@@ -3248,7 +3249,7 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3248 | /* | 3249 | /* |
3249 | * If no default placement scheme is given, select an appropriate one | 3250 | * If no default placement scheme is given, select an appropriate one |
3250 | */ | 3251 | */ |
3251 | if (!chip->ecc.layout) { | 3252 | if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) { |
3252 | switch (mtd->oobsize) { | 3253 | switch (mtd->oobsize) { |
3253 | case 8: | 3254 | case 8: |
3254 | chip->ecc.layout = &nand_oob_8; | 3255 | chip->ecc.layout = &nand_oob_8; |
@@ -3351,6 +3352,40 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3351 | chip->ecc.bytes = 3; | 3352 | chip->ecc.bytes = 3; |
3352 | break; | 3353 | break; |
3353 | 3354 | ||
3355 | case NAND_ECC_SOFT_BCH: | ||
3356 | if (!mtd_nand_has_bch()) { | ||
3357 | printk(KERN_WARNING "CONFIG_MTD_ECC_BCH not enabled\n"); | ||
3358 | BUG(); | ||
3359 | } | ||
3360 | chip->ecc.calculate = nand_bch_calculate_ecc; | ||
3361 | chip->ecc.correct = nand_bch_correct_data; | ||
3362 | chip->ecc.read_page = nand_read_page_swecc; | ||
3363 | chip->ecc.read_subpage = nand_read_subpage; | ||
3364 | chip->ecc.write_page = nand_write_page_swecc; | ||
3365 | chip->ecc.read_page_raw = nand_read_page_raw; | ||
3366 | chip->ecc.write_page_raw = nand_write_page_raw; | ||
3367 | chip->ecc.read_oob = nand_read_oob_std; | ||
3368 | chip->ecc.write_oob = nand_write_oob_std; | ||
3369 | /* | ||
3370 | * Board driver should supply ecc.size and ecc.bytes values to | ||
3371 | * select how many bits are correctable; see nand_bch_init() | ||
3372 | * for details. | ||
3373 | * Otherwise, default to 4 bits for large page devices | ||
3374 | */ | ||
3375 | if (!chip->ecc.size && (mtd->oobsize >= 64)) { | ||
3376 | chip->ecc.size = 512; | ||
3377 | chip->ecc.bytes = 7; | ||
3378 | } | ||
3379 | chip->ecc.priv = nand_bch_init(mtd, | ||
3380 | chip->ecc.size, | ||
3381 | chip->ecc.bytes, | ||
3382 | &chip->ecc.layout); | ||
3383 | if (!chip->ecc.priv) { | ||
3384 | printk(KERN_WARNING "BCH ECC initialization failed!\n"); | ||
3385 | BUG(); | ||
3386 | } | ||
3387 | break; | ||
3388 | |||
3354 | case NAND_ECC_NONE: | 3389 | case NAND_ECC_NONE: |
3355 | printk(KERN_WARNING "NAND_ECC_NONE selected by board driver. " | 3390 | printk(KERN_WARNING "NAND_ECC_NONE selected by board driver. " |
3356 | "This is not recommended !!\n"); | 3391 | "This is not recommended !!\n"); |
@@ -3501,6 +3536,9 @@ void nand_release(struct mtd_info *mtd) | |||
3501 | { | 3536 | { |
3502 | struct nand_chip *chip = mtd->priv; | 3537 | struct nand_chip *chip = mtd->priv; |
3503 | 3538 | ||
3539 | if (chip->ecc.mode == NAND_ECC_SOFT_BCH) | ||
3540 | nand_bch_free((struct nand_bch_control *)chip->ecc.priv); | ||
3541 | |||
3504 | #ifdef CONFIG_MTD_PARTITIONS | 3542 | #ifdef CONFIG_MTD_PARTITIONS |
3505 | /* Deregister partitions */ | 3543 | /* Deregister partitions */ |
3506 | del_mtd_partitions(mtd); | 3544 | del_mtd_partitions(mtd); |
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c index 6ebd869993aa..a1e8b30078d9 100644 --- a/drivers/mtd/nand/nand_bbt.c +++ b/drivers/mtd/nand/nand_bbt.c | |||
@@ -1101,12 +1101,16 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td) | |||
1101 | static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd) | 1101 | static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd) |
1102 | { | 1102 | { |
1103 | struct nand_chip *this = mtd->priv; | 1103 | struct nand_chip *this = mtd->priv; |
1104 | u32 pattern_len = bd->len; | 1104 | u32 pattern_len; |
1105 | u32 bits = bd->options & NAND_BBT_NRBITS_MSK; | 1105 | u32 bits; |
1106 | u32 table_size; | 1106 | u32 table_size; |
1107 | 1107 | ||
1108 | if (!bd) | 1108 | if (!bd) |
1109 | return; | 1109 | return; |
1110 | |||
1111 | pattern_len = bd->len; | ||
1112 | bits = bd->options & NAND_BBT_NRBITS_MSK; | ||
1113 | |||
1110 | BUG_ON((this->options & NAND_USE_FLASH_BBT_NO_OOB) && | 1114 | BUG_ON((this->options & NAND_USE_FLASH_BBT_NO_OOB) && |
1111 | !(this->options & NAND_USE_FLASH_BBT)); | 1115 | !(this->options & NAND_USE_FLASH_BBT)); |
1112 | BUG_ON(!bits); | 1116 | BUG_ON(!bits); |
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c new file mode 100644 index 000000000000..0f931e757116 --- /dev/null +++ b/drivers/mtd/nand/nand_bch.c | |||
@@ -0,0 +1,243 @@ | |||
1 | /* | ||
2 | * This file provides ECC correction for more than 1 bit per block of data, | ||
3 | * using binary BCH codes. It relies on the generic BCH library lib/bch.c. | ||
4 | * | ||
5 | * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com> | ||
6 | * | ||
7 | * This file is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the | ||
9 | * Free Software Foundation; either version 2 or (at your option) any | ||
10 | * later version. | ||
11 | * | ||
12 | * This file is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
15 | * for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along | ||
18 | * with this file; if not, write to the Free Software Foundation, Inc., | ||
19 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/types.h> | ||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/bitops.h> | ||
27 | #include <linux/mtd/mtd.h> | ||
28 | #include <linux/mtd/nand.h> | ||
29 | #include <linux/mtd/nand_bch.h> | ||
30 | #include <linux/bch.h> | ||
31 | |||
32 | /** | ||
33 | * struct nand_bch_control - private NAND BCH control structure | ||
34 | * @bch: BCH control structure | ||
35 | * @ecclayout: private ecc layout for this BCH configuration | ||
36 | * @errloc: error location array | ||
37 | * @eccmask: XOR ecc mask, allows erased pages to be decoded as valid | ||
38 | */ | ||
39 | struct nand_bch_control { | ||
40 | struct bch_control *bch; | ||
41 | struct nand_ecclayout ecclayout; | ||
42 | unsigned int *errloc; | ||
43 | unsigned char *eccmask; | ||
44 | }; | ||
45 | |||
46 | /** | ||
47 | * nand_bch_calculate_ecc - [NAND Interface] Calculate ECC for data block | ||
48 | * @mtd: MTD block structure | ||
49 | * @buf: input buffer with raw data | ||
50 | * @code: output buffer with ECC | ||
51 | */ | ||
52 | int nand_bch_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf, | ||
53 | unsigned char *code) | ||
54 | { | ||
55 | const struct nand_chip *chip = mtd->priv; | ||
56 | struct nand_bch_control *nbc = chip->ecc.priv; | ||
57 | unsigned int i; | ||
58 | |||
59 | memset(code, 0, chip->ecc.bytes); | ||
60 | encode_bch(nbc->bch, buf, chip->ecc.size, code); | ||
61 | |||
62 | /* apply mask so that an erased page is a valid codeword */ | ||
63 | for (i = 0; i < chip->ecc.bytes; i++) | ||
64 | code[i] ^= nbc->eccmask[i]; | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | EXPORT_SYMBOL(nand_bch_calculate_ecc); | ||
69 | |||
70 | /** | ||
71 | * nand_bch_correct_data - [NAND Interface] Detect and correct bit error(s) | ||
72 | * @mtd: MTD block structure | ||
73 | * @buf: raw data read from the chip | ||
74 | * @read_ecc: ECC from the chip | ||
75 | * @calc_ecc: the ECC calculated from raw data | ||
76 | * | ||
77 | * Detect and correct bit errors for a data byte block | ||
78 | */ | ||
79 | int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf, | ||
80 | unsigned char *read_ecc, unsigned char *calc_ecc) | ||
81 | { | ||
82 | const struct nand_chip *chip = mtd->priv; | ||
83 | struct nand_bch_control *nbc = chip->ecc.priv; | ||
84 | unsigned int *errloc = nbc->errloc; | ||
85 | int i, count; | ||
86 | |||
87 | count = decode_bch(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc, | ||
88 | NULL, errloc); | ||
89 | if (count > 0) { | ||
90 | for (i = 0; i < count; i++) { | ||
91 | if (errloc[i] < (chip->ecc.size*8)) | ||
92 | /* error is located in data, correct it */ | ||
93 | buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7)); | ||
94 | /* else error in ecc, no action needed */ | ||
95 | |||
96 | DEBUG(MTD_DEBUG_LEVEL0, "%s: corrected bitflip %u\n", | ||
97 | __func__, errloc[i]); | ||
98 | } | ||
99 | } else if (count < 0) { | ||
100 | printk(KERN_ERR "ecc unrecoverable error\n"); | ||
101 | count = -1; | ||
102 | } | ||
103 | return count; | ||
104 | } | ||
105 | EXPORT_SYMBOL(nand_bch_correct_data); | ||
106 | |||
107 | /** | ||
108 | * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction | ||
109 | * @mtd: MTD block structure | ||
110 | * @eccsize: ecc block size in bytes | ||
111 | * @eccbytes: ecc length in bytes | ||
112 | * @ecclayout: output default layout | ||
113 | * | ||
114 | * Returns: | ||
115 | * a pointer to a new NAND BCH control structure, or NULL upon failure | ||
116 | * | ||
117 | * Initialize NAND BCH error correction. Parameters @eccsize and @eccbytes | ||
118 | * are used to compute BCH parameters m (Galois field order) and t (error | ||
119 | * correction capability). @eccbytes should be equal to the number of bytes | ||
120 | * required to store m*t bits, where m is such that 2^m-1 > @eccsize*8. | ||
121 | * | ||
122 | * Example: to configure 4 bit correction per 512 bytes, you should pass | ||
123 | * @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8) | ||
124 | * @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits) | ||
125 | */ | ||
126 | struct nand_bch_control * | ||
127 | nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes, | ||
128 | struct nand_ecclayout **ecclayout) | ||
129 | { | ||
130 | unsigned int m, t, eccsteps, i; | ||
131 | struct nand_ecclayout *layout; | ||
132 | struct nand_bch_control *nbc = NULL; | ||
133 | unsigned char *erased_page; | ||
134 | |||
135 | if (!eccsize || !eccbytes) { | ||
136 | printk(KERN_WARNING "ecc parameters not supplied\n"); | ||
137 | goto fail; | ||
138 | } | ||
139 | |||
140 | m = fls(1+8*eccsize); | ||
141 | t = (eccbytes*8)/m; | ||
142 | |||
143 | nbc = kzalloc(sizeof(*nbc), GFP_KERNEL); | ||
144 | if (!nbc) | ||
145 | goto fail; | ||
146 | |||
147 | nbc->bch = init_bch(m, t, 0); | ||
148 | if (!nbc->bch) | ||
149 | goto fail; | ||
150 | |||
151 | /* verify that eccbytes has the expected value */ | ||
152 | if (nbc->bch->ecc_bytes != eccbytes) { | ||
153 | printk(KERN_WARNING "invalid eccbytes %u, should be %u\n", | ||
154 | eccbytes, nbc->bch->ecc_bytes); | ||
155 | goto fail; | ||
156 | } | ||
157 | |||
158 | eccsteps = mtd->writesize/eccsize; | ||
159 | |||
160 | /* if no ecc placement scheme was provided, build one */ | ||
161 | if (!*ecclayout) { | ||
162 | |||
163 | /* handle large page devices only */ | ||
164 | if (mtd->oobsize < 64) { | ||
165 | printk(KERN_WARNING "must provide an oob scheme for " | ||
166 | "oobsize %d\n", mtd->oobsize); | ||
167 | goto fail; | ||
168 | } | ||
169 | |||
170 | layout = &nbc->ecclayout; | ||
171 | layout->eccbytes = eccsteps*eccbytes; | ||
172 | |||
173 | /* reserve 2 bytes for bad block marker */ | ||
174 | if (layout->eccbytes+2 > mtd->oobsize) { | ||
175 | printk(KERN_WARNING "no suitable oob scheme available " | ||
176 | "for oobsize %d eccbytes %u\n", mtd->oobsize, | ||
177 | eccbytes); | ||
178 | goto fail; | ||
179 | } | ||
180 | /* put ecc bytes at oob tail */ | ||
181 | for (i = 0; i < layout->eccbytes; i++) | ||
182 | layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i; | ||
183 | |||
184 | layout->oobfree[0].offset = 2; | ||
185 | layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes; | ||
186 | |||
187 | *ecclayout = layout; | ||
188 | } | ||
189 | |||
190 | /* sanity checks */ | ||
191 | if (8*(eccsize+eccbytes) >= (1 << m)) { | ||
192 | printk(KERN_WARNING "eccsize %u is too large\n", eccsize); | ||
193 | goto fail; | ||
194 | } | ||
195 | if ((*ecclayout)->eccbytes != (eccsteps*eccbytes)) { | ||
196 | printk(KERN_WARNING "invalid ecc layout\n"); | ||
197 | goto fail; | ||
198 | } | ||
199 | |||
200 | nbc->eccmask = kmalloc(eccbytes, GFP_KERNEL); | ||
201 | nbc->errloc = kmalloc(t*sizeof(*nbc->errloc), GFP_KERNEL); | ||
202 | if (!nbc->eccmask || !nbc->errloc) | ||
203 | goto fail; | ||
204 | /* | ||
205 | * compute and store the inverted ecc of an erased ecc block | ||
206 | */ | ||
207 | erased_page = kmalloc(eccsize, GFP_KERNEL); | ||
208 | if (!erased_page) | ||
209 | goto fail; | ||
210 | |||
211 | memset(erased_page, 0xff, eccsize); | ||
212 | memset(nbc->eccmask, 0, eccbytes); | ||
213 | encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask); | ||
214 | kfree(erased_page); | ||
215 | |||
216 | for (i = 0; i < eccbytes; i++) | ||
217 | nbc->eccmask[i] ^= 0xff; | ||
218 | |||
219 | return nbc; | ||
220 | fail: | ||
221 | nand_bch_free(nbc); | ||
222 | return NULL; | ||
223 | } | ||
224 | EXPORT_SYMBOL(nand_bch_init); | ||
225 | |||
226 | /** | ||
227 | * nand_bch_free - [NAND Interface] Release NAND BCH ECC resources | ||
228 | * @nbc: NAND BCH control structure | ||
229 | */ | ||
230 | void nand_bch_free(struct nand_bch_control *nbc) | ||
231 | { | ||
232 | if (nbc) { | ||
233 | free_bch(nbc->bch); | ||
234 | kfree(nbc->errloc); | ||
235 | kfree(nbc->eccmask); | ||
236 | kfree(nbc); | ||
237 | } | ||
238 | } | ||
239 | EXPORT_SYMBOL(nand_bch_free); | ||
240 | |||
241 | MODULE_LICENSE("GPL"); | ||
242 | MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>"); | ||
243 | MODULE_DESCRIPTION("NAND software BCH ECC support"); | ||
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index a5aa99f014ba..213181be0d9a 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/string.h> | 34 | #include <linux/string.h> |
35 | #include <linux/mtd/mtd.h> | 35 | #include <linux/mtd/mtd.h> |
36 | #include <linux/mtd/nand.h> | 36 | #include <linux/mtd/nand.h> |
37 | #include <linux/mtd/nand_bch.h> | ||
37 | #include <linux/mtd/partitions.h> | 38 | #include <linux/mtd/partitions.h> |
38 | #include <linux/delay.h> | 39 | #include <linux/delay.h> |
39 | #include <linux/list.h> | 40 | #include <linux/list.h> |
@@ -108,6 +109,7 @@ static unsigned int rptwear = 0; | |||
108 | static unsigned int overridesize = 0; | 109 | static unsigned int overridesize = 0; |
109 | static char *cache_file = NULL; | 110 | static char *cache_file = NULL; |
110 | static unsigned int bbt; | 111 | static unsigned int bbt; |
112 | static unsigned int bch; | ||
111 | 113 | ||
112 | module_param(first_id_byte, uint, 0400); | 114 | module_param(first_id_byte, uint, 0400); |
113 | module_param(second_id_byte, uint, 0400); | 115 | module_param(second_id_byte, uint, 0400); |
@@ -132,6 +134,7 @@ module_param(rptwear, uint, 0400); | |||
132 | module_param(overridesize, uint, 0400); | 134 | module_param(overridesize, uint, 0400); |
133 | module_param(cache_file, charp, 0400); | 135 | module_param(cache_file, charp, 0400); |
134 | module_param(bbt, uint, 0400); | 136 | module_param(bbt, uint, 0400); |
137 | module_param(bch, uint, 0400); | ||
135 | 138 | ||
136 | MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)"); | 139 | MODULE_PARM_DESC(first_id_byte, "The first byte returned by NAND Flash 'read ID' command (manufacturer ID)"); |
137 | MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)"); | 140 | MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)"); |
@@ -165,6 +168,8 @@ MODULE_PARM_DESC(overridesize, "Specifies the NAND Flash size overriding the I | |||
165 | " e.g. 5 means a size of 32 erase blocks"); | 168 | " e.g. 5 means a size of 32 erase blocks"); |
166 | MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory"); | 169 | MODULE_PARM_DESC(cache_file, "File to use to cache nand pages instead of memory"); |
167 | MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area"); | 170 | MODULE_PARM_DESC(bbt, "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area"); |
171 | MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should " | ||
172 | "be correctable in 512-byte blocks"); | ||
168 | 173 | ||
169 | /* The largest possible page size */ | 174 | /* The largest possible page size */ |
170 | #define NS_LARGEST_PAGE_SIZE 4096 | 175 | #define NS_LARGEST_PAGE_SIZE 4096 |
@@ -2309,7 +2314,43 @@ static int __init ns_init_module(void) | |||
2309 | if ((retval = parse_gravepages()) != 0) | 2314 | if ((retval = parse_gravepages()) != 0) |
2310 | goto error; | 2315 | goto error; |
2311 | 2316 | ||
2312 | if ((retval = nand_scan(nsmtd, 1)) != 0) { | 2317 | retval = nand_scan_ident(nsmtd, 1, NULL); |
2318 | if (retval) { | ||
2319 | NS_ERR("cannot scan NAND Simulator device\n"); | ||
2320 | if (retval > 0) | ||
2321 | retval = -ENXIO; | ||
2322 | goto error; | ||
2323 | } | ||
2324 | |||
2325 | if (bch) { | ||
2326 | unsigned int eccsteps, eccbytes; | ||
2327 | if (!mtd_nand_has_bch()) { | ||
2328 | NS_ERR("BCH ECC support is disabled\n"); | ||
2329 | retval = -EINVAL; | ||
2330 | goto error; | ||
2331 | } | ||
2332 | /* use 512-byte ecc blocks */ | ||
2333 | eccsteps = nsmtd->writesize/512; | ||
2334 | eccbytes = (bch*13+7)/8; | ||
2335 | /* do not bother supporting small page devices */ | ||
2336 | if ((nsmtd->oobsize < 64) || !eccsteps) { | ||
2337 | NS_ERR("bch not available on small page devices\n"); | ||
2338 | retval = -EINVAL; | ||
2339 | goto error; | ||
2340 | } | ||
2341 | if ((eccbytes*eccsteps+2) > nsmtd->oobsize) { | ||
2342 | NS_ERR("invalid bch value %u\n", bch); | ||
2343 | retval = -EINVAL; | ||
2344 | goto error; | ||
2345 | } | ||
2346 | chip->ecc.mode = NAND_ECC_SOFT_BCH; | ||
2347 | chip->ecc.size = 512; | ||
2348 | chip->ecc.bytes = eccbytes; | ||
2349 | NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size); | ||
2350 | } | ||
2351 | |||
2352 | retval = nand_scan_tail(nsmtd); | ||
2353 | if (retval) { | ||
2313 | NS_ERR("can't register NAND Simulator\n"); | 2354 | NS_ERR("can't register NAND Simulator\n"); |
2314 | if (retval > 0) | 2355 | if (retval > 0) |
2315 | retval = -ENXIO; | 2356 | retval = -ENXIO; |
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 7b8f1fffc528..da9a351c9d79 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
@@ -668,6 +668,8 @@ static void gen_true_ecc(u8 *ecc_buf) | |||
668 | * | 668 | * |
669 | * This function compares two ECC's and indicates if there is an error. | 669 | * This function compares two ECC's and indicates if there is an error. |
670 | * If the error can be corrected it will be corrected to the buffer. | 670 | * If the error can be corrected it will be corrected to the buffer. |
671 | * If there is no error, %0 is returned. If there is an error but it | ||
672 | * was corrected, %1 is returned. Otherwise, %-1 is returned. | ||
671 | */ | 673 | */ |
672 | static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */ | 674 | static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */ |
673 | u8 *ecc_data2, /* read from register */ | 675 | u8 *ecc_data2, /* read from register */ |
@@ -773,7 +775,7 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */ | |||
773 | 775 | ||
774 | page_data[find_byte] ^= (1 << find_bit); | 776 | page_data[find_byte] ^= (1 << find_bit); |
775 | 777 | ||
776 | return 0; | 778 | return 1; |
777 | default: | 779 | default: |
778 | if (isEccFF) { | 780 | if (isEccFF) { |
779 | if (ecc_data2[0] == 0 && | 781 | if (ecc_data2[0] == 0 && |
@@ -794,8 +796,11 @@ static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */ | |||
794 | * @calc_ecc: ecc read from HW ECC registers | 796 | * @calc_ecc: ecc read from HW ECC registers |
795 | * | 797 | * |
796 | * Compares the ecc read from nand spare area with ECC registers values | 798 | * Compares the ecc read from nand spare area with ECC registers values |
797 | * and if ECC's mismached, it will call 'omap_compare_ecc' for error detection | 799 | * and if ECC's mismatched, it will call 'omap_compare_ecc' for error |
798 | * and correction. | 800 | * detection and correction. If there are no errors, %0 is returned. If |
801 | * there were errors and all of the errors were corrected, the number of | ||
802 | * corrected errors is returned. If uncorrectable errors exist, %-1 is | ||
803 | * returned. | ||
799 | */ | 804 | */ |
800 | static int omap_correct_data(struct mtd_info *mtd, u_char *dat, | 805 | static int omap_correct_data(struct mtd_info *mtd, u_char *dat, |
801 | u_char *read_ecc, u_char *calc_ecc) | 806 | u_char *read_ecc, u_char *calc_ecc) |
@@ -803,6 +808,7 @@ static int omap_correct_data(struct mtd_info *mtd, u_char *dat, | |||
803 | struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, | 808 | struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, |
804 | mtd); | 809 | mtd); |
805 | int blockCnt = 0, i = 0, ret = 0; | 810 | int blockCnt = 0, i = 0, ret = 0; |
811 | int stat = 0; | ||
806 | 812 | ||
807 | /* Ex NAND_ECC_HW12_2048 */ | 813 | /* Ex NAND_ECC_HW12_2048 */ |
808 | if ((info->nand.ecc.mode == NAND_ECC_HW) && | 814 | if ((info->nand.ecc.mode == NAND_ECC_HW) && |
@@ -816,12 +822,14 @@ static int omap_correct_data(struct mtd_info *mtd, u_char *dat, | |||
816 | ret = omap_compare_ecc(read_ecc, calc_ecc, dat); | 822 | ret = omap_compare_ecc(read_ecc, calc_ecc, dat); |
817 | if (ret < 0) | 823 | if (ret < 0) |
818 | return ret; | 824 | return ret; |
825 | /* keep track of the number of corrected errors */ | ||
826 | stat += ret; | ||
819 | } | 827 | } |
820 | read_ecc += 3; | 828 | read_ecc += 3; |
821 | calc_ecc += 3; | 829 | calc_ecc += 3; |
822 | dat += 512; | 830 | dat += 512; |
823 | } | 831 | } |
824 | return 0; | 832 | return stat; |
825 | } | 833 | } |
826 | 834 | ||
827 | /** | 835 | /** |
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index ea2c288df3f6..ab7f4c33ced6 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c | |||
@@ -27,6 +27,8 @@ | |||
27 | #include <plat/pxa3xx_nand.h> | 27 | #include <plat/pxa3xx_nand.h> |
28 | 28 | ||
29 | #define CHIP_DELAY_TIMEOUT (2 * HZ/10) | 29 | #define CHIP_DELAY_TIMEOUT (2 * HZ/10) |
30 | #define NAND_STOP_DELAY (2 * HZ/50) | ||
31 | #define PAGE_CHUNK_SIZE (2048) | ||
30 | 32 | ||
31 | /* registers and bit definitions */ | 33 | /* registers and bit definitions */ |
32 | #define NDCR (0x00) /* Control register */ | 34 | #define NDCR (0x00) /* Control register */ |
@@ -52,16 +54,18 @@ | |||
52 | #define NDCR_ND_MODE (0x3 << 21) | 54 | #define NDCR_ND_MODE (0x3 << 21) |
53 | #define NDCR_NAND_MODE (0x0) | 55 | #define NDCR_NAND_MODE (0x0) |
54 | #define NDCR_CLR_PG_CNT (0x1 << 20) | 56 | #define NDCR_CLR_PG_CNT (0x1 << 20) |
55 | #define NDCR_CLR_ECC (0x1 << 19) | 57 | #define NDCR_STOP_ON_UNCOR (0x1 << 19) |
56 | #define NDCR_RD_ID_CNT_MASK (0x7 << 16) | 58 | #define NDCR_RD_ID_CNT_MASK (0x7 << 16) |
57 | #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK) | 59 | #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK) |
58 | 60 | ||
59 | #define NDCR_RA_START (0x1 << 15) | 61 | #define NDCR_RA_START (0x1 << 15) |
60 | #define NDCR_PG_PER_BLK (0x1 << 14) | 62 | #define NDCR_PG_PER_BLK (0x1 << 14) |
61 | #define NDCR_ND_ARB_EN (0x1 << 12) | 63 | #define NDCR_ND_ARB_EN (0x1 << 12) |
64 | #define NDCR_INT_MASK (0xFFF) | ||
62 | 65 | ||
63 | #define NDSR_MASK (0xfff) | 66 | #define NDSR_MASK (0xfff) |
64 | #define NDSR_RDY (0x1 << 11) | 67 | #define NDSR_RDY (0x1 << 12) |
68 | #define NDSR_FLASH_RDY (0x1 << 11) | ||
65 | #define NDSR_CS0_PAGED (0x1 << 10) | 69 | #define NDSR_CS0_PAGED (0x1 << 10) |
66 | #define NDSR_CS1_PAGED (0x1 << 9) | 70 | #define NDSR_CS1_PAGED (0x1 << 9) |
67 | #define NDSR_CS0_CMDD (0x1 << 8) | 71 | #define NDSR_CS0_CMDD (0x1 << 8) |
@@ -74,6 +78,7 @@ | |||
74 | #define NDSR_RDDREQ (0x1 << 1) | 78 | #define NDSR_RDDREQ (0x1 << 1) |
75 | #define NDSR_WRCMDREQ (0x1) | 79 | #define NDSR_WRCMDREQ (0x1) |
76 | 80 | ||
81 | #define NDCB0_ST_ROW_EN (0x1 << 26) | ||
77 | #define NDCB0_AUTO_RS (0x1 << 25) | 82 | #define NDCB0_AUTO_RS (0x1 << 25) |
78 | #define NDCB0_CSEL (0x1 << 24) | 83 | #define NDCB0_CSEL (0x1 << 24) |
79 | #define NDCB0_CMD_TYPE_MASK (0x7 << 21) | 84 | #define NDCB0_CMD_TYPE_MASK (0x7 << 21) |
@@ -104,18 +109,21 @@ enum { | |||
104 | }; | 109 | }; |
105 | 110 | ||
106 | enum { | 111 | enum { |
107 | STATE_READY = 0, | 112 | STATE_IDLE = 0, |
108 | STATE_CMD_HANDLE, | 113 | STATE_CMD_HANDLE, |
109 | STATE_DMA_READING, | 114 | STATE_DMA_READING, |
110 | STATE_DMA_WRITING, | 115 | STATE_DMA_WRITING, |
111 | STATE_DMA_DONE, | 116 | STATE_DMA_DONE, |
112 | STATE_PIO_READING, | 117 | STATE_PIO_READING, |
113 | STATE_PIO_WRITING, | 118 | STATE_PIO_WRITING, |
119 | STATE_CMD_DONE, | ||
120 | STATE_READY, | ||
114 | }; | 121 | }; |
115 | 122 | ||
116 | struct pxa3xx_nand_info { | 123 | struct pxa3xx_nand_info { |
117 | struct nand_chip nand_chip; | 124 | struct nand_chip nand_chip; |
118 | 125 | ||
126 | struct nand_hw_control controller; | ||
119 | struct platform_device *pdev; | 127 | struct platform_device *pdev; |
120 | struct pxa3xx_nand_cmdset *cmdset; | 128 | struct pxa3xx_nand_cmdset *cmdset; |
121 | 129 | ||
@@ -126,6 +134,7 @@ struct pxa3xx_nand_info { | |||
126 | unsigned int buf_start; | 134 | unsigned int buf_start; |
127 | unsigned int buf_count; | 135 | unsigned int buf_count; |
128 | 136 | ||
137 | struct mtd_info *mtd; | ||
129 | /* DMA information */ | 138 | /* DMA information */ |
130 | int drcmr_dat; | 139 | int drcmr_dat; |
131 | int drcmr_cmd; | 140 | int drcmr_cmd; |
@@ -149,6 +158,7 @@ struct pxa3xx_nand_info { | |||
149 | 158 | ||
150 | int use_ecc; /* use HW ECC ? */ | 159 | int use_ecc; /* use HW ECC ? */ |
151 | int use_dma; /* use DMA ? */ | 160 | int use_dma; /* use DMA ? */ |
161 | int is_ready; | ||
152 | 162 | ||
153 | unsigned int page_size; /* page size of attached chip */ | 163 | unsigned int page_size; /* page size of attached chip */ |
154 | unsigned int data_size; /* data size in FIFO */ | 164 | unsigned int data_size; /* data size in FIFO */ |
@@ -201,20 +211,22 @@ static struct pxa3xx_nand_timing timing[] = { | |||
201 | }; | 211 | }; |
202 | 212 | ||
203 | static struct pxa3xx_nand_flash builtin_flash_types[] = { | 213 | static struct pxa3xx_nand_flash builtin_flash_types[] = { |
204 | { 0, 0, 2048, 8, 8, 0, &default_cmdset, &timing[0] }, | 214 | { "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] }, |
205 | { 0x46ec, 32, 512, 16, 16, 4096, &default_cmdset, &timing[1] }, | 215 | { "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] }, |
206 | { 0xdaec, 64, 2048, 8, 8, 2048, &default_cmdset, &timing[1] }, | 216 | { "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] }, |
207 | { 0xd7ec, 128, 4096, 8, 8, 8192, &default_cmdset, &timing[1] }, | 217 | { "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] }, |
208 | { 0xa12c, 64, 2048, 8, 8, 1024, &default_cmdset, &timing[2] }, | 218 | { "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] }, |
209 | { 0xb12c, 64, 2048, 16, 16, 1024, &default_cmdset, &timing[2] }, | 219 | { "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] }, |
210 | { 0xdc2c, 64, 2048, 8, 8, 4096, &default_cmdset, &timing[2] }, | 220 | { "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] }, |
211 | { 0xcc2c, 64, 2048, 16, 16, 4096, &default_cmdset, &timing[2] }, | 221 | { "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] }, |
212 | { 0xba20, 64, 2048, 16, 16, 2048, &default_cmdset, &timing[3] }, | 222 | { "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] }, |
213 | }; | 223 | }; |
214 | 224 | ||
215 | /* Define a default flash type setting serve as flash detecting only */ | 225 | /* Define a default flash type setting serve as flash detecting only */ |
216 | #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0]) | 226 | #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0]) |
217 | 227 | ||
228 | const char *mtd_names[] = {"pxa3xx_nand-0", NULL}; | ||
229 | |||
218 | #define NDTR0_tCH(c) (min((c), 7) << 19) | 230 | #define NDTR0_tCH(c) (min((c), 7) << 19) |
219 | #define NDTR0_tCS(c) (min((c), 7) << 16) | 231 | #define NDTR0_tCS(c) (min((c), 7) << 16) |
220 | #define NDTR0_tWH(c) (min((c), 7) << 11) | 232 | #define NDTR0_tWH(c) (min((c), 7) << 11) |
@@ -252,25 +264,6 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info, | |||
252 | nand_writel(info, NDTR1CS0, ndtr1); | 264 | nand_writel(info, NDTR1CS0, ndtr1); |
253 | } | 265 | } |
254 | 266 | ||
255 | #define WAIT_EVENT_TIMEOUT 10 | ||
256 | |||
257 | static int wait_for_event(struct pxa3xx_nand_info *info, uint32_t event) | ||
258 | { | ||
259 | int timeout = WAIT_EVENT_TIMEOUT; | ||
260 | uint32_t ndsr; | ||
261 | |||
262 | while (timeout--) { | ||
263 | ndsr = nand_readl(info, NDSR) & NDSR_MASK; | ||
264 | if (ndsr & event) { | ||
265 | nand_writel(info, NDSR, ndsr); | ||
266 | return 0; | ||
267 | } | ||
268 | udelay(10); | ||
269 | } | ||
270 | |||
271 | return -ETIMEDOUT; | ||
272 | } | ||
273 | |||
274 | static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info) | 267 | static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info) |
275 | { | 268 | { |
276 | int oob_enable = info->reg_ndcr & NDCR_SPARE_EN; | 269 | int oob_enable = info->reg_ndcr & NDCR_SPARE_EN; |
@@ -291,69 +284,45 @@ static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info) | |||
291 | } | 284 | } |
292 | } | 285 | } |
293 | 286 | ||
294 | static int prepare_read_prog_cmd(struct pxa3xx_nand_info *info, | 287 | /** |
295 | uint16_t cmd, int column, int page_addr) | 288 | * NOTE: it is a must to set ND_RUN firstly, then write |
289 | * command buffer, otherwise, it does not work. | ||
290 | * We enable all the interrupt at the same time, and | ||
291 | * let pxa3xx_nand_irq to handle all logic. | ||
292 | */ | ||
293 | static void pxa3xx_nand_start(struct pxa3xx_nand_info *info) | ||
296 | { | 294 | { |
297 | const struct pxa3xx_nand_cmdset *cmdset = info->cmdset; | 295 | uint32_t ndcr; |
298 | pxa3xx_set_datasize(info); | ||
299 | |||
300 | /* generate values for NDCBx registers */ | ||
301 | info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0); | ||
302 | info->ndcb1 = 0; | ||
303 | info->ndcb2 = 0; | ||
304 | info->ndcb0 |= NDCB0_ADDR_CYC(info->row_addr_cycles + info->col_addr_cycles); | ||
305 | |||
306 | if (info->col_addr_cycles == 2) { | ||
307 | /* large block, 2 cycles for column address | ||
308 | * row address starts from 3rd cycle | ||
309 | */ | ||
310 | info->ndcb1 |= page_addr << 16; | ||
311 | if (info->row_addr_cycles == 3) | ||
312 | info->ndcb2 = (page_addr >> 16) & 0xff; | ||
313 | } else | ||
314 | /* small block, 1 cycles for column address | ||
315 | * row address starts from 2nd cycle | ||
316 | */ | ||
317 | info->ndcb1 = page_addr << 8; | ||
318 | |||
319 | if (cmd == cmdset->program) | ||
320 | info->ndcb0 |= NDCB0_CMD_TYPE(1) | NDCB0_AUTO_RS; | ||
321 | 296 | ||
322 | return 0; | 297 | ndcr = info->reg_ndcr; |
323 | } | 298 | ndcr |= info->use_ecc ? NDCR_ECC_EN : 0; |
299 | ndcr |= info->use_dma ? NDCR_DMA_EN : 0; | ||
300 | ndcr |= NDCR_ND_RUN; | ||
324 | 301 | ||
325 | static int prepare_erase_cmd(struct pxa3xx_nand_info *info, | 302 | /* clear status bits and run */ |
326 | uint16_t cmd, int page_addr) | 303 | nand_writel(info, NDCR, 0); |
327 | { | 304 | nand_writel(info, NDSR, NDSR_MASK); |
328 | info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0); | 305 | nand_writel(info, NDCR, ndcr); |
329 | info->ndcb0 |= NDCB0_CMD_TYPE(2) | NDCB0_AUTO_RS | NDCB0_ADDR_CYC(3); | ||
330 | info->ndcb1 = page_addr; | ||
331 | info->ndcb2 = 0; | ||
332 | return 0; | ||
333 | } | 306 | } |
334 | 307 | ||
335 | static int prepare_other_cmd(struct pxa3xx_nand_info *info, uint16_t cmd) | 308 | static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info) |
336 | { | 309 | { |
337 | const struct pxa3xx_nand_cmdset *cmdset = info->cmdset; | 310 | uint32_t ndcr; |
338 | 311 | int timeout = NAND_STOP_DELAY; | |
339 | info->ndcb0 = cmd | ((cmd & 0xff00) ? NDCB0_DBC : 0); | ||
340 | info->ndcb1 = 0; | ||
341 | info->ndcb2 = 0; | ||
342 | 312 | ||
343 | info->oob_size = 0; | 313 | /* wait RUN bit in NDCR become 0 */ |
344 | if (cmd == cmdset->read_id) { | 314 | ndcr = nand_readl(info, NDCR); |
345 | info->ndcb0 |= NDCB0_CMD_TYPE(3); | 315 | while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) { |
346 | info->data_size = 8; | 316 | ndcr = nand_readl(info, NDCR); |
347 | } else if (cmd == cmdset->read_status) { | 317 | udelay(1); |
348 | info->ndcb0 |= NDCB0_CMD_TYPE(4); | 318 | } |
349 | info->data_size = 8; | ||
350 | } else if (cmd == cmdset->reset || cmd == cmdset->lock || | ||
351 | cmd == cmdset->unlock) { | ||
352 | info->ndcb0 |= NDCB0_CMD_TYPE(5); | ||
353 | } else | ||
354 | return -EINVAL; | ||
355 | 319 | ||
356 | return 0; | 320 | if (timeout <= 0) { |
321 | ndcr &= ~NDCR_ND_RUN; | ||
322 | nand_writel(info, NDCR, ndcr); | ||
323 | } | ||
324 | /* clear status bits */ | ||
325 | nand_writel(info, NDSR, NDSR_MASK); | ||
357 | } | 326 | } |
358 | 327 | ||
359 | static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) | 328 | static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) |
@@ -372,39 +341,8 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) | |||
372 | nand_writel(info, NDCR, ndcr | int_mask); | 341 | nand_writel(info, NDCR, ndcr | int_mask); |
373 | } | 342 | } |
374 | 343 | ||
375 | /* NOTE: it is a must to set ND_RUN firstly, then write command buffer | 344 | static void handle_data_pio(struct pxa3xx_nand_info *info) |
376 | * otherwise, it does not work | ||
377 | */ | ||
378 | static int write_cmd(struct pxa3xx_nand_info *info) | ||
379 | { | 345 | { |
380 | uint32_t ndcr; | ||
381 | |||
382 | /* clear status bits and run */ | ||
383 | nand_writel(info, NDSR, NDSR_MASK); | ||
384 | |||
385 | ndcr = info->reg_ndcr; | ||
386 | |||
387 | ndcr |= info->use_ecc ? NDCR_ECC_EN : 0; | ||
388 | ndcr |= info->use_dma ? NDCR_DMA_EN : 0; | ||
389 | ndcr |= NDCR_ND_RUN; | ||
390 | |||
391 | nand_writel(info, NDCR, ndcr); | ||
392 | |||
393 | if (wait_for_event(info, NDSR_WRCMDREQ)) { | ||
394 | printk(KERN_ERR "timed out writing command\n"); | ||
395 | return -ETIMEDOUT; | ||
396 | } | ||
397 | |||
398 | nand_writel(info, NDCB0, info->ndcb0); | ||
399 | nand_writel(info, NDCB0, info->ndcb1); | ||
400 | nand_writel(info, NDCB0, info->ndcb2); | ||
401 | return 0; | ||
402 | } | ||
403 | |||
404 | static int handle_data_pio(struct pxa3xx_nand_info *info) | ||
405 | { | ||
406 | int ret, timeout = CHIP_DELAY_TIMEOUT; | ||
407 | |||
408 | switch (info->state) { | 346 | switch (info->state) { |
409 | case STATE_PIO_WRITING: | 347 | case STATE_PIO_WRITING: |
410 | __raw_writesl(info->mmio_base + NDDB, info->data_buff, | 348 | __raw_writesl(info->mmio_base + NDDB, info->data_buff, |
@@ -412,14 +350,6 @@ static int handle_data_pio(struct pxa3xx_nand_info *info) | |||
412 | if (info->oob_size > 0) | 350 | if (info->oob_size > 0) |
413 | __raw_writesl(info->mmio_base + NDDB, info->oob_buff, | 351 | __raw_writesl(info->mmio_base + NDDB, info->oob_buff, |
414 | DIV_ROUND_UP(info->oob_size, 4)); | 352 | DIV_ROUND_UP(info->oob_size, 4)); |
415 | |||
416 | enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD); | ||
417 | |||
418 | ret = wait_for_completion_timeout(&info->cmd_complete, timeout); | ||
419 | if (!ret) { | ||
420 | printk(KERN_ERR "program command time out\n"); | ||
421 | return -1; | ||
422 | } | ||
423 | break; | 353 | break; |
424 | case STATE_PIO_READING: | 354 | case STATE_PIO_READING: |
425 | __raw_readsl(info->mmio_base + NDDB, info->data_buff, | 355 | __raw_readsl(info->mmio_base + NDDB, info->data_buff, |
@@ -431,14 +361,11 @@ static int handle_data_pio(struct pxa3xx_nand_info *info) | |||
431 | default: | 361 | default: |
432 | printk(KERN_ERR "%s: invalid state %d\n", __func__, | 362 | printk(KERN_ERR "%s: invalid state %d\n", __func__, |
433 | info->state); | 363 | info->state); |
434 | return -EINVAL; | 364 | BUG(); |
435 | } | 365 | } |
436 | |||
437 | info->state = STATE_READY; | ||
438 | return 0; | ||
439 | } | 366 | } |
440 | 367 | ||
441 | static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out) | 368 | static void start_data_dma(struct pxa3xx_nand_info *info) |
442 | { | 369 | { |
443 | struct pxa_dma_desc *desc = info->data_desc; | 370 | struct pxa_dma_desc *desc = info->data_desc; |
444 | int dma_len = ALIGN(info->data_size + info->oob_size, 32); | 371 | int dma_len = ALIGN(info->data_size + info->oob_size, 32); |
@@ -446,14 +373,21 @@ static void start_data_dma(struct pxa3xx_nand_info *info, int dir_out) | |||
446 | desc->ddadr = DDADR_STOP; | 373 | desc->ddadr = DDADR_STOP; |
447 | desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len; | 374 | desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len; |
448 | 375 | ||
449 | if (dir_out) { | 376 | switch (info->state) { |
377 | case STATE_DMA_WRITING: | ||
450 | desc->dsadr = info->data_buff_phys; | 378 | desc->dsadr = info->data_buff_phys; |
451 | desc->dtadr = info->mmio_phys + NDDB; | 379 | desc->dtadr = info->mmio_phys + NDDB; |
452 | desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG; | 380 | desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG; |
453 | } else { | 381 | break; |
382 | case STATE_DMA_READING: | ||
454 | desc->dtadr = info->data_buff_phys; | 383 | desc->dtadr = info->data_buff_phys; |
455 | desc->dsadr = info->mmio_phys + NDDB; | 384 | desc->dsadr = info->mmio_phys + NDDB; |
456 | desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC; | 385 | desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC; |
386 | break; | ||
387 | default: | ||
388 | printk(KERN_ERR "%s: invalid state %d\n", __func__, | ||
389 | info->state); | ||
390 | BUG(); | ||
457 | } | 391 | } |
458 | 392 | ||
459 | DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch; | 393 | DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch; |
@@ -471,93 +405,62 @@ static void pxa3xx_nand_data_dma_irq(int channel, void *data) | |||
471 | 405 | ||
472 | if (dcsr & DCSR_BUSERR) { | 406 | if (dcsr & DCSR_BUSERR) { |
473 | info->retcode = ERR_DMABUSERR; | 407 | info->retcode = ERR_DMABUSERR; |
474 | complete(&info->cmd_complete); | ||
475 | } | 408 | } |
476 | 409 | ||
477 | if (info->state == STATE_DMA_WRITING) { | 410 | info->state = STATE_DMA_DONE; |
478 | info->state = STATE_DMA_DONE; | 411 | enable_int(info, NDCR_INT_MASK); |
479 | enable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD); | 412 | nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ); |
480 | } else { | ||
481 | info->state = STATE_READY; | ||
482 | complete(&info->cmd_complete); | ||
483 | } | ||
484 | } | 413 | } |
485 | 414 | ||
486 | static irqreturn_t pxa3xx_nand_irq(int irq, void *devid) | 415 | static irqreturn_t pxa3xx_nand_irq(int irq, void *devid) |
487 | { | 416 | { |
488 | struct pxa3xx_nand_info *info = devid; | 417 | struct pxa3xx_nand_info *info = devid; |
489 | unsigned int status; | 418 | unsigned int status, is_completed = 0; |
490 | 419 | ||
491 | status = nand_readl(info, NDSR); | 420 | status = nand_readl(info, NDSR); |
492 | 421 | ||
493 | if (status & (NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR)) { | 422 | if (status & NDSR_DBERR) |
494 | if (status & NDSR_DBERR) | 423 | info->retcode = ERR_DBERR; |
495 | info->retcode = ERR_DBERR; | 424 | if (status & NDSR_SBERR) |
496 | else if (status & NDSR_SBERR) | 425 | info->retcode = ERR_SBERR; |
497 | info->retcode = ERR_SBERR; | 426 | if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) { |
498 | 427 | /* whether use dma to transfer data */ | |
499 | disable_int(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR); | ||
500 | |||
501 | if (info->use_dma) { | ||
502 | info->state = STATE_DMA_READING; | ||
503 | start_data_dma(info, 0); | ||
504 | } else { | ||
505 | info->state = STATE_PIO_READING; | ||
506 | complete(&info->cmd_complete); | ||
507 | } | ||
508 | } else if (status & NDSR_WRDREQ) { | ||
509 | disable_int(info, NDSR_WRDREQ); | ||
510 | if (info->use_dma) { | 428 | if (info->use_dma) { |
511 | info->state = STATE_DMA_WRITING; | 429 | disable_int(info, NDCR_INT_MASK); |
512 | start_data_dma(info, 1); | 430 | info->state = (status & NDSR_RDDREQ) ? |
431 | STATE_DMA_READING : STATE_DMA_WRITING; | ||
432 | start_data_dma(info); | ||
433 | goto NORMAL_IRQ_EXIT; | ||
513 | } else { | 434 | } else { |
514 | info->state = STATE_PIO_WRITING; | 435 | info->state = (status & NDSR_RDDREQ) ? |
515 | complete(&info->cmd_complete); | 436 | STATE_PIO_READING : STATE_PIO_WRITING; |
437 | handle_data_pio(info); | ||
516 | } | 438 | } |
517 | } else if (status & (NDSR_CS0_BBD | NDSR_CS0_CMDD)) { | ||
518 | if (status & NDSR_CS0_BBD) | ||
519 | info->retcode = ERR_BBERR; | ||
520 | |||
521 | disable_int(info, NDSR_CS0_BBD | NDSR_CS0_CMDD); | ||
522 | info->state = STATE_READY; | ||
523 | complete(&info->cmd_complete); | ||
524 | } | 439 | } |
525 | nand_writel(info, NDSR, status); | 440 | if (status & NDSR_CS0_CMDD) { |
526 | return IRQ_HANDLED; | 441 | info->state = STATE_CMD_DONE; |
527 | } | 442 | is_completed = 1; |
528 | |||
529 | static int pxa3xx_nand_do_cmd(struct pxa3xx_nand_info *info, uint32_t event) | ||
530 | { | ||
531 | uint32_t ndcr; | ||
532 | int ret, timeout = CHIP_DELAY_TIMEOUT; | ||
533 | |||
534 | if (write_cmd(info)) { | ||
535 | info->retcode = ERR_SENDCMD; | ||
536 | goto fail_stop; | ||
537 | } | 443 | } |
538 | 444 | if (status & NDSR_FLASH_RDY) { | |
539 | info->state = STATE_CMD_HANDLE; | 445 | info->is_ready = 1; |
540 | 446 | info->state = STATE_READY; | |
541 | enable_int(info, event); | ||
542 | |||
543 | ret = wait_for_completion_timeout(&info->cmd_complete, timeout); | ||
544 | if (!ret) { | ||
545 | printk(KERN_ERR "command execution timed out\n"); | ||
546 | info->retcode = ERR_SENDCMD; | ||
547 | goto fail_stop; | ||
548 | } | 447 | } |
549 | 448 | ||
550 | if (info->use_dma == 0 && info->data_size > 0) | 449 | if (status & NDSR_WRCMDREQ) { |
551 | if (handle_data_pio(info)) | 450 | nand_writel(info, NDSR, NDSR_WRCMDREQ); |
552 | goto fail_stop; | 451 | status &= ~NDSR_WRCMDREQ; |
553 | 452 | info->state = STATE_CMD_HANDLE; | |
554 | return 0; | 453 | nand_writel(info, NDCB0, info->ndcb0); |
454 | nand_writel(info, NDCB0, info->ndcb1); | ||
455 | nand_writel(info, NDCB0, info->ndcb2); | ||
456 | } | ||
555 | 457 | ||
556 | fail_stop: | 458 | /* clear NDSR to let the controller exit the IRQ */ |
557 | ndcr = nand_readl(info, NDCR); | 459 | nand_writel(info, NDSR, status); |
558 | nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN); | 460 | if (is_completed) |
559 | udelay(10); | 461 | complete(&info->cmd_complete); |
560 | return -ETIMEDOUT; | 462 | NORMAL_IRQ_EXIT: |
463 | return IRQ_HANDLED; | ||
561 | } | 464 | } |
562 | 465 | ||
563 | static int pxa3xx_nand_dev_ready(struct mtd_info *mtd) | 466 | static int pxa3xx_nand_dev_ready(struct mtd_info *mtd) |
@@ -574,125 +477,218 @@ static inline int is_buf_blank(uint8_t *buf, size_t len) | |||
574 | return 1; | 477 | return 1; |
575 | } | 478 | } |
576 | 479 | ||
577 | static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, | 480 | static int prepare_command_pool(struct pxa3xx_nand_info *info, int command, |
578 | int column, int page_addr) | 481 | uint16_t column, int page_addr) |
579 | { | 482 | { |
580 | struct pxa3xx_nand_info *info = mtd->priv; | 483 | uint16_t cmd; |
581 | const struct pxa3xx_nand_cmdset *cmdset = info->cmdset; | 484 | int addr_cycle, exec_cmd, ndcb0; |
582 | int ret; | 485 | struct mtd_info *mtd = info->mtd; |
486 | |||
487 | ndcb0 = 0; | ||
488 | addr_cycle = 0; | ||
489 | exec_cmd = 1; | ||
490 | |||
491 | /* reset data and oob column point to handle data */ | ||
492 | info->buf_start = 0; | ||
493 | info->buf_count = 0; | ||
494 | info->oob_size = 0; | ||
495 | info->use_ecc = 0; | ||
496 | info->is_ready = 0; | ||
497 | info->retcode = ERR_NONE; | ||
583 | 498 | ||
584 | info->use_dma = (use_dma) ? 1 : 0; | 499 | switch (command) { |
585 | info->use_ecc = 0; | 500 | case NAND_CMD_READ0: |
586 | info->data_size = 0; | 501 | case NAND_CMD_PAGEPROG: |
587 | info->state = STATE_READY; | 502 | info->use_ecc = 1; |
503 | case NAND_CMD_READOOB: | ||
504 | pxa3xx_set_datasize(info); | ||
505 | break; | ||
506 | case NAND_CMD_SEQIN: | ||
507 | exec_cmd = 0; | ||
508 | break; | ||
509 | default: | ||
510 | info->ndcb1 = 0; | ||
511 | info->ndcb2 = 0; | ||
512 | break; | ||
513 | } | ||
588 | 514 | ||
589 | init_completion(&info->cmd_complete); | 515 | info->ndcb0 = ndcb0; |
516 | addr_cycle = NDCB0_ADDR_CYC(info->row_addr_cycles | ||
517 | + info->col_addr_cycles); | ||
590 | 518 | ||
591 | switch (command) { | 519 | switch (command) { |
592 | case NAND_CMD_READOOB: | 520 | case NAND_CMD_READOOB: |
593 | /* disable HW ECC to get all the OOB data */ | 521 | case NAND_CMD_READ0: |
594 | info->buf_count = mtd->writesize + mtd->oobsize; | 522 | cmd = info->cmdset->read1; |
595 | info->buf_start = mtd->writesize + column; | 523 | if (command == NAND_CMD_READOOB) |
596 | memset(info->data_buff, 0xFF, info->buf_count); | 524 | info->buf_start = mtd->writesize + column; |
525 | else | ||
526 | info->buf_start = column; | ||
597 | 527 | ||
598 | if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr)) | 528 | if (unlikely(info->page_size < PAGE_CHUNK_SIZE)) |
599 | break; | 529 | info->ndcb0 |= NDCB0_CMD_TYPE(0) |
530 | | addr_cycle | ||
531 | | (cmd & NDCB0_CMD1_MASK); | ||
532 | else | ||
533 | info->ndcb0 |= NDCB0_CMD_TYPE(0) | ||
534 | | NDCB0_DBC | ||
535 | | addr_cycle | ||
536 | | cmd; | ||
600 | 537 | ||
601 | pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR); | 538 | case NAND_CMD_SEQIN: |
539 | /* small page addr setting */ | ||
540 | if (unlikely(info->page_size < PAGE_CHUNK_SIZE)) { | ||
541 | info->ndcb1 = ((page_addr & 0xFFFFFF) << 8) | ||
542 | | (column & 0xFF); | ||
602 | 543 | ||
603 | /* We only are OOB, so if the data has error, does not matter */ | 544 | info->ndcb2 = 0; |
604 | if (info->retcode == ERR_DBERR) | 545 | } else { |
605 | info->retcode = ERR_NONE; | 546 | info->ndcb1 = ((page_addr & 0xFFFF) << 16) |
606 | break; | 547 | | (column & 0xFFFF); |
548 | |||
549 | if (page_addr & 0xFF0000) | ||
550 | info->ndcb2 = (page_addr & 0xFF0000) >> 16; | ||
551 | else | ||
552 | info->ndcb2 = 0; | ||
553 | } | ||
607 | 554 | ||
608 | case NAND_CMD_READ0: | ||
609 | info->use_ecc = 1; | ||
610 | info->retcode = ERR_NONE; | ||
611 | info->buf_start = column; | ||
612 | info->buf_count = mtd->writesize + mtd->oobsize; | 555 | info->buf_count = mtd->writesize + mtd->oobsize; |
613 | memset(info->data_buff, 0xFF, info->buf_count); | 556 | memset(info->data_buff, 0xFF, info->buf_count); |
614 | 557 | ||
615 | if (prepare_read_prog_cmd(info, cmdset->read1, column, page_addr)) | 558 | break; |
559 | |||
560 | case NAND_CMD_PAGEPROG: | ||
561 | if (is_buf_blank(info->data_buff, | ||
562 | (mtd->writesize + mtd->oobsize))) { | ||
563 | exec_cmd = 0; | ||
616 | break; | 564 | break; |
565 | } | ||
617 | 566 | ||
618 | pxa3xx_nand_do_cmd(info, NDSR_RDDREQ | NDSR_DBERR | NDSR_SBERR); | 567 | cmd = info->cmdset->program; |
568 | info->ndcb0 |= NDCB0_CMD_TYPE(0x1) | ||
569 | | NDCB0_AUTO_RS | ||
570 | | NDCB0_ST_ROW_EN | ||
571 | | NDCB0_DBC | ||
572 | | cmd | ||
573 | | addr_cycle; | ||
574 | break; | ||
619 | 575 | ||
620 | if (info->retcode == ERR_DBERR) { | 576 | case NAND_CMD_READID: |
621 | /* for blank page (all 0xff), HW will calculate its ECC as | 577 | cmd = info->cmdset->read_id; |
622 | * 0, which is different from the ECC information within | 578 | info->buf_count = info->read_id_bytes; |
623 | * OOB, ignore such double bit errors | 579 | info->ndcb0 |= NDCB0_CMD_TYPE(3) |
624 | */ | 580 | | NDCB0_ADDR_CYC(1) |
625 | if (is_buf_blank(info->data_buff, mtd->writesize)) | 581 | | cmd; |
626 | info->retcode = ERR_NONE; | 582 | |
627 | } | 583 | info->data_size = 8; |
628 | break; | 584 | break; |
629 | case NAND_CMD_SEQIN: | 585 | case NAND_CMD_STATUS: |
630 | info->buf_start = column; | 586 | cmd = info->cmdset->read_status; |
631 | info->buf_count = mtd->writesize + mtd->oobsize; | 587 | info->buf_count = 1; |
632 | memset(info->data_buff, 0xff, info->buf_count); | 588 | info->ndcb0 |= NDCB0_CMD_TYPE(4) |
589 | | NDCB0_ADDR_CYC(1) | ||
590 | | cmd; | ||
633 | 591 | ||
634 | /* save column/page_addr for next CMD_PAGEPROG */ | 592 | info->data_size = 8; |
635 | info->seqin_column = column; | ||
636 | info->seqin_page_addr = page_addr; | ||
637 | break; | 593 | break; |
638 | case NAND_CMD_PAGEPROG: | ||
639 | info->use_ecc = (info->seqin_column >= mtd->writesize) ? 0 : 1; | ||
640 | 594 | ||
641 | if (prepare_read_prog_cmd(info, cmdset->program, | 595 | case NAND_CMD_ERASE1: |
642 | info->seqin_column, info->seqin_page_addr)) | 596 | cmd = info->cmdset->erase; |
643 | break; | 597 | info->ndcb0 |= NDCB0_CMD_TYPE(2) |
598 | | NDCB0_AUTO_RS | ||
599 | | NDCB0_ADDR_CYC(3) | ||
600 | | NDCB0_DBC | ||
601 | | cmd; | ||
602 | info->ndcb1 = page_addr; | ||
603 | info->ndcb2 = 0; | ||
644 | 604 | ||
645 | pxa3xx_nand_do_cmd(info, NDSR_WRDREQ); | ||
646 | break; | 605 | break; |
647 | case NAND_CMD_ERASE1: | 606 | case NAND_CMD_RESET: |
648 | if (prepare_erase_cmd(info, cmdset->erase, page_addr)) | 607 | cmd = info->cmdset->reset; |
649 | break; | 608 | info->ndcb0 |= NDCB0_CMD_TYPE(5) |
609 | | cmd; | ||
650 | 610 | ||
651 | pxa3xx_nand_do_cmd(info, NDSR_CS0_BBD | NDSR_CS0_CMDD); | ||
652 | break; | 611 | break; |
612 | |||
653 | case NAND_CMD_ERASE2: | 613 | case NAND_CMD_ERASE2: |
614 | exec_cmd = 0; | ||
654 | break; | 615 | break; |
655 | case NAND_CMD_READID: | ||
656 | case NAND_CMD_STATUS: | ||
657 | info->use_dma = 0; /* force PIO read */ | ||
658 | info->buf_start = 0; | ||
659 | info->buf_count = (command == NAND_CMD_READID) ? | ||
660 | info->read_id_bytes : 1; | ||
661 | |||
662 | if (prepare_other_cmd(info, (command == NAND_CMD_READID) ? | ||
663 | cmdset->read_id : cmdset->read_status)) | ||
664 | break; | ||
665 | 616 | ||
666 | pxa3xx_nand_do_cmd(info, NDSR_RDDREQ); | 617 | default: |
618 | exec_cmd = 0; | ||
619 | printk(KERN_ERR "pxa3xx-nand: non-supported" | ||
620 | " command %x\n", command); | ||
667 | break; | 621 | break; |
668 | case NAND_CMD_RESET: | 622 | } |
669 | if (prepare_other_cmd(info, cmdset->reset)) | ||
670 | break; | ||
671 | 623 | ||
672 | ret = pxa3xx_nand_do_cmd(info, NDSR_CS0_CMDD); | 624 | return exec_cmd; |
673 | if (ret == 0) { | 625 | } |
674 | int timeout = 2; | ||
675 | uint32_t ndcr; | ||
676 | 626 | ||
677 | while (timeout--) { | 627 | static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command, |
678 | if (nand_readl(info, NDSR) & NDSR_RDY) | 628 | int column, int page_addr) |
679 | break; | 629 | { |
680 | msleep(10); | 630 | struct pxa3xx_nand_info *info = mtd->priv; |
681 | } | 631 | int ret, exec_cmd; |
682 | 632 | ||
683 | ndcr = nand_readl(info, NDCR); | 633 | /* |
684 | nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN); | 634 | * if this is a x16 device ,then convert the input |
635 | * "byte" address into a "word" address appropriate | ||
636 | * for indexing a word-oriented device | ||
637 | */ | ||
638 | if (info->reg_ndcr & NDCR_DWIDTH_M) | ||
639 | column /= 2; | ||
640 | |||
641 | exec_cmd = prepare_command_pool(info, command, column, page_addr); | ||
642 | if (exec_cmd) { | ||
643 | init_completion(&info->cmd_complete); | ||
644 | pxa3xx_nand_start(info); | ||
645 | |||
646 | ret = wait_for_completion_timeout(&info->cmd_complete, | ||
647 | CHIP_DELAY_TIMEOUT); | ||
648 | if (!ret) { | ||
649 | printk(KERN_ERR "Wait time out!!!\n"); | ||
650 | /* Stop State Machine for next command cycle */ | ||
651 | pxa3xx_nand_stop(info); | ||
685 | } | 652 | } |
686 | break; | 653 | info->state = STATE_IDLE; |
687 | default: | ||
688 | printk(KERN_ERR "non-supported command.\n"); | ||
689 | break; | ||
690 | } | 654 | } |
655 | } | ||
656 | |||
657 | static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd, | ||
658 | struct nand_chip *chip, const uint8_t *buf) | ||
659 | { | ||
660 | chip->write_buf(mtd, buf, mtd->writesize); | ||
661 | chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); | ||
662 | } | ||
691 | 663 | ||
692 | if (info->retcode == ERR_DBERR) { | 664 | static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, |
693 | printk(KERN_ERR "double bit error @ page %08x\n", page_addr); | 665 | struct nand_chip *chip, uint8_t *buf, int page) |
694 | info->retcode = ERR_NONE; | 666 | { |
667 | struct pxa3xx_nand_info *info = mtd->priv; | ||
668 | |||
669 | chip->read_buf(mtd, buf, mtd->writesize); | ||
670 | chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); | ||
671 | |||
672 | if (info->retcode == ERR_SBERR) { | ||
673 | switch (info->use_ecc) { | ||
674 | case 1: | ||
675 | mtd->ecc_stats.corrected++; | ||
676 | break; | ||
677 | case 0: | ||
678 | default: | ||
679 | break; | ||
680 | } | ||
681 | } else if (info->retcode == ERR_DBERR) { | ||
682 | /* | ||
683 | * for blank page (all 0xff), HW will calculate its ECC as | ||
684 | * 0, which is different from the ECC information within | ||
685 | * OOB, ignore such double bit errors | ||
686 | */ | ||
687 | if (is_buf_blank(buf, mtd->writesize)) | ||
688 | mtd->ecc_stats.failed++; | ||
695 | } | 689 | } |
690 | |||
691 | return 0; | ||
696 | } | 692 | } |
697 | 693 | ||
698 | static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd) | 694 | static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd) |
@@ -769,73 +765,12 @@ static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this) | |||
769 | return 0; | 765 | return 0; |
770 | } | 766 | } |
771 | 767 | ||
772 | static void pxa3xx_nand_ecc_hwctl(struct mtd_info *mtd, int mode) | ||
773 | { | ||
774 | return; | ||
775 | } | ||
776 | |||
777 | static int pxa3xx_nand_ecc_calculate(struct mtd_info *mtd, | ||
778 | const uint8_t *dat, uint8_t *ecc_code) | ||
779 | { | ||
780 | return 0; | ||
781 | } | ||
782 | |||
783 | static int pxa3xx_nand_ecc_correct(struct mtd_info *mtd, | ||
784 | uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc) | ||
785 | { | ||
786 | struct pxa3xx_nand_info *info = mtd->priv; | ||
787 | /* | ||
788 | * Any error include ERR_SEND_CMD, ERR_DBERR, ERR_BUSERR, we | ||
789 | * consider it as a ecc error which will tell the caller the | ||
790 | * read fail We have distinguish all the errors, but the | ||
791 | * nand_read_ecc only check this function return value | ||
792 | * | ||
793 | * Corrected (single-bit) errors must also be noted. | ||
794 | */ | ||
795 | if (info->retcode == ERR_SBERR) | ||
796 | return 1; | ||
797 | else if (info->retcode != ERR_NONE) | ||
798 | return -1; | ||
799 | |||
800 | return 0; | ||
801 | } | ||
802 | |||
803 | static int __readid(struct pxa3xx_nand_info *info, uint32_t *id) | ||
804 | { | ||
805 | const struct pxa3xx_nand_cmdset *cmdset = info->cmdset; | ||
806 | uint32_t ndcr; | ||
807 | uint8_t id_buff[8]; | ||
808 | |||
809 | if (prepare_other_cmd(info, cmdset->read_id)) { | ||
810 | printk(KERN_ERR "failed to prepare command\n"); | ||
811 | return -EINVAL; | ||
812 | } | ||
813 | |||
814 | /* Send command */ | ||
815 | if (write_cmd(info)) | ||
816 | goto fail_timeout; | ||
817 | |||
818 | /* Wait for CMDDM(command done successfully) */ | ||
819 | if (wait_for_event(info, NDSR_RDDREQ)) | ||
820 | goto fail_timeout; | ||
821 | |||
822 | __raw_readsl(info->mmio_base + NDDB, id_buff, 2); | ||
823 | *id = id_buff[0] | (id_buff[1] << 8); | ||
824 | return 0; | ||
825 | |||
826 | fail_timeout: | ||
827 | ndcr = nand_readl(info, NDCR); | ||
828 | nand_writel(info, NDCR, ndcr & ~NDCR_ND_RUN); | ||
829 | udelay(10); | ||
830 | return -ETIMEDOUT; | ||
831 | } | ||
832 | |||
833 | static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, | 768 | static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, |
834 | const struct pxa3xx_nand_flash *f) | 769 | const struct pxa3xx_nand_flash *f) |
835 | { | 770 | { |
836 | struct platform_device *pdev = info->pdev; | 771 | struct platform_device *pdev = info->pdev; |
837 | struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; | 772 | struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; |
838 | uint32_t ndcr = 0x00000FFF; /* disable all interrupts */ | 773 | uint32_t ndcr = 0x0; /* enable all interrupts */ |
839 | 774 | ||
840 | if (f->page_size != 2048 && f->page_size != 512) | 775 | if (f->page_size != 2048 && f->page_size != 512) |
841 | return -EINVAL; | 776 | return -EINVAL; |
@@ -844,9 +779,8 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, | |||
844 | return -EINVAL; | 779 | return -EINVAL; |
845 | 780 | ||
846 | /* calculate flash information */ | 781 | /* calculate flash information */ |
847 | info->cmdset = f->cmdset; | 782 | info->cmdset = &default_cmdset; |
848 | info->page_size = f->page_size; | 783 | info->page_size = f->page_size; |
849 | info->oob_buff = info->data_buff + f->page_size; | ||
850 | info->read_id_bytes = (f->page_size == 2048) ? 4 : 2; | 784 | info->read_id_bytes = (f->page_size == 2048) ? 4 : 2; |
851 | 785 | ||
852 | /* calculate addressing information */ | 786 | /* calculate addressing information */ |
@@ -876,87 +810,18 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info, | |||
876 | static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info) | 810 | static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info) |
877 | { | 811 | { |
878 | uint32_t ndcr = nand_readl(info, NDCR); | 812 | uint32_t ndcr = nand_readl(info, NDCR); |
879 | struct nand_flash_dev *type = NULL; | ||
880 | uint32_t id = -1, page_per_block, num_blocks; | ||
881 | int i; | ||
882 | |||
883 | page_per_block = ndcr & NDCR_PG_PER_BLK ? 64 : 32; | ||
884 | info->page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512; | 813 | info->page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512; |
885 | /* set info fields needed to __readid */ | 814 | /* set info fields needed to read id */ |
886 | info->read_id_bytes = (info->page_size == 2048) ? 4 : 2; | 815 | info->read_id_bytes = (info->page_size == 2048) ? 4 : 2; |
887 | info->reg_ndcr = ndcr; | 816 | info->reg_ndcr = ndcr; |
888 | info->cmdset = &default_cmdset; | 817 | info->cmdset = &default_cmdset; |
889 | 818 | ||
890 | if (__readid(info, &id)) | ||
891 | return -ENODEV; | ||
892 | |||
893 | /* Lookup the flash id */ | ||
894 | id = (id >> 8) & 0xff; /* device id is byte 2 */ | ||
895 | for (i = 0; nand_flash_ids[i].name != NULL; i++) { | ||
896 | if (id == nand_flash_ids[i].id) { | ||
897 | type = &nand_flash_ids[i]; | ||
898 | break; | ||
899 | } | ||
900 | } | ||
901 | |||
902 | if (!type) | ||
903 | return -ENODEV; | ||
904 | |||
905 | /* fill the missing flash information */ | ||
906 | i = __ffs(page_per_block * info->page_size); | ||
907 | num_blocks = type->chipsize << (20 - i); | ||
908 | |||
909 | /* calculate addressing information */ | ||
910 | info->col_addr_cycles = (info->page_size == 2048) ? 2 : 1; | ||
911 | |||
912 | if (num_blocks * page_per_block > 65536) | ||
913 | info->row_addr_cycles = 3; | ||
914 | else | ||
915 | info->row_addr_cycles = 2; | ||
916 | |||
917 | info->ndtr0cs0 = nand_readl(info, NDTR0CS0); | 819 | info->ndtr0cs0 = nand_readl(info, NDTR0CS0); |
918 | info->ndtr1cs0 = nand_readl(info, NDTR1CS0); | 820 | info->ndtr1cs0 = nand_readl(info, NDTR1CS0); |
919 | 821 | ||
920 | return 0; | 822 | return 0; |
921 | } | 823 | } |
922 | 824 | ||
923 | static int pxa3xx_nand_detect_flash(struct pxa3xx_nand_info *info, | ||
924 | const struct pxa3xx_nand_platform_data *pdata) | ||
925 | { | ||
926 | const struct pxa3xx_nand_flash *f; | ||
927 | uint32_t id = -1; | ||
928 | int i; | ||
929 | |||
930 | if (pdata->keep_config) | ||
931 | if (pxa3xx_nand_detect_config(info) == 0) | ||
932 | return 0; | ||
933 | |||
934 | /* we use default timing to detect id */ | ||
935 | f = DEFAULT_FLASH_TYPE; | ||
936 | pxa3xx_nand_config_flash(info, f); | ||
937 | if (__readid(info, &id)) | ||
938 | goto fail_detect; | ||
939 | |||
940 | for (i=0; i<ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1; i++) { | ||
941 | /* we first choose the flash definition from platfrom */ | ||
942 | if (i < pdata->num_flash) | ||
943 | f = pdata->flash + i; | ||
944 | else | ||
945 | f = &builtin_flash_types[i - pdata->num_flash + 1]; | ||
946 | if (f->chip_id == id) { | ||
947 | dev_info(&info->pdev->dev, "detect chip id: 0x%x\n", id); | ||
948 | pxa3xx_nand_config_flash(info, f); | ||
949 | return 0; | ||
950 | } | ||
951 | } | ||
952 | |||
953 | dev_warn(&info->pdev->dev, | ||
954 | "failed to detect configured nand flash; found %04x instead of\n", | ||
955 | id); | ||
956 | fail_detect: | ||
957 | return -ENODEV; | ||
958 | } | ||
959 | |||
960 | /* the maximum possible buffer size for large page with OOB data | 825 | /* the maximum possible buffer size for large page with OOB data |
961 | * is: 2048 + 64 = 2112 bytes, allocate a page here for both the | 826 | * is: 2048 + 64 = 2112 bytes, allocate a page here for both the |
962 | * data buffer and the DMA descriptor | 827 | * data buffer and the DMA descriptor |
@@ -998,82 +863,144 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info) | |||
998 | return 0; | 863 | return 0; |
999 | } | 864 | } |
1000 | 865 | ||
1001 | static struct nand_ecclayout hw_smallpage_ecclayout = { | 866 | static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info) |
1002 | .eccbytes = 6, | 867 | { |
1003 | .eccpos = {8, 9, 10, 11, 12, 13 }, | 868 | struct mtd_info *mtd = info->mtd; |
1004 | .oobfree = { {2, 6} } | 869 | struct nand_chip *chip = mtd->priv; |
1005 | }; | ||
1006 | 870 | ||
1007 | static struct nand_ecclayout hw_largepage_ecclayout = { | 871 | /* use the common timing to make a try */ |
1008 | .eccbytes = 24, | 872 | pxa3xx_nand_config_flash(info, &builtin_flash_types[0]); |
1009 | .eccpos = { | 873 | chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0); |
1010 | 40, 41, 42, 43, 44, 45, 46, 47, | 874 | if (info->is_ready) |
1011 | 48, 49, 50, 51, 52, 53, 54, 55, | 875 | return 1; |
1012 | 56, 57, 58, 59, 60, 61, 62, 63}, | 876 | else |
1013 | .oobfree = { {2, 38} } | 877 | return 0; |
1014 | }; | 878 | } |
1015 | 879 | ||
1016 | static void pxa3xx_nand_init_mtd(struct mtd_info *mtd, | 880 | static int pxa3xx_nand_scan(struct mtd_info *mtd) |
1017 | struct pxa3xx_nand_info *info) | ||
1018 | { | 881 | { |
1019 | struct nand_chip *this = &info->nand_chip; | 882 | struct pxa3xx_nand_info *info = mtd->priv; |
1020 | 883 | struct platform_device *pdev = info->pdev; | |
1021 | this->options = (info->reg_ndcr & NDCR_DWIDTH_C) ? NAND_BUSWIDTH_16: 0; | 884 | struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data; |
1022 | 885 | struct nand_flash_dev pxa3xx_flash_ids[2] = { {NULL,}, {NULL,} }; | |
1023 | this->waitfunc = pxa3xx_nand_waitfunc; | 886 | const struct pxa3xx_nand_flash *f = NULL; |
1024 | this->select_chip = pxa3xx_nand_select_chip; | 887 | struct nand_chip *chip = mtd->priv; |
1025 | this->dev_ready = pxa3xx_nand_dev_ready; | 888 | uint32_t id = -1; |
1026 | this->cmdfunc = pxa3xx_nand_cmdfunc; | 889 | uint64_t chipsize; |
1027 | this->read_word = pxa3xx_nand_read_word; | 890 | int i, ret, num; |
1028 | this->read_byte = pxa3xx_nand_read_byte; | 891 | |
1029 | this->read_buf = pxa3xx_nand_read_buf; | 892 | if (pdata->keep_config && !pxa3xx_nand_detect_config(info)) |
1030 | this->write_buf = pxa3xx_nand_write_buf; | 893 | goto KEEP_CONFIG; |
1031 | this->verify_buf = pxa3xx_nand_verify_buf; | 894 | |
1032 | 895 | ret = pxa3xx_nand_sensing(info); | |
1033 | this->ecc.mode = NAND_ECC_HW; | 896 | if (!ret) { |
1034 | this->ecc.hwctl = pxa3xx_nand_ecc_hwctl; | 897 | kfree(mtd); |
1035 | this->ecc.calculate = pxa3xx_nand_ecc_calculate; | 898 | info->mtd = NULL; |
1036 | this->ecc.correct = pxa3xx_nand_ecc_correct; | 899 | printk(KERN_INFO "There is no nand chip on cs 0!\n"); |
1037 | this->ecc.size = info->page_size; | 900 | |
1038 | 901 | return -EINVAL; | |
1039 | if (info->page_size == 2048) | 902 | } |
1040 | this->ecc.layout = &hw_largepage_ecclayout; | 903 | |
904 | chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0); | ||
905 | id = *((uint16_t *)(info->data_buff)); | ||
906 | if (id != 0) | ||
907 | printk(KERN_INFO "Detect a flash id %x\n", id); | ||
908 | else { | ||
909 | kfree(mtd); | ||
910 | info->mtd = NULL; | ||
911 | printk(KERN_WARNING "Read out ID 0, potential timing set wrong!!\n"); | ||
912 | |||
913 | return -EINVAL; | ||
914 | } | ||
915 | |||
916 | num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1; | ||
917 | for (i = 0; i < num; i++) { | ||
918 | if (i < pdata->num_flash) | ||
919 | f = pdata->flash + i; | ||
920 | else | ||
921 | f = &builtin_flash_types[i - pdata->num_flash + 1]; | ||
922 | |||
923 | /* find the chip in default list */ | ||
924 | if (f->chip_id == id) | ||
925 | break; | ||
926 | } | ||
927 | |||
928 | if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) { | ||
929 | kfree(mtd); | ||
930 | info->mtd = NULL; | ||
931 | printk(KERN_ERR "ERROR!! flash not defined!!!\n"); | ||
932 | |||
933 | return -EINVAL; | ||
934 | } | ||
935 | |||
936 | pxa3xx_nand_config_flash(info, f); | ||
937 | pxa3xx_flash_ids[0].name = f->name; | ||
938 | pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff; | ||
939 | pxa3xx_flash_ids[0].pagesize = f->page_size; | ||
940 | chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size; | ||
941 | pxa3xx_flash_ids[0].chipsize = chipsize >> 20; | ||
942 | pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block; | ||
943 | if (f->flash_width == 16) | ||
944 | pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16; | ||
945 | KEEP_CONFIG: | ||
946 | if (nand_scan_ident(mtd, 1, pxa3xx_flash_ids)) | ||
947 | return -ENODEV; | ||
948 | /* calculate addressing information */ | ||
949 | info->col_addr_cycles = (mtd->writesize >= 2048) ? 2 : 1; | ||
950 | info->oob_buff = info->data_buff + mtd->writesize; | ||
951 | if ((mtd->size >> chip->page_shift) > 65536) | ||
952 | info->row_addr_cycles = 3; | ||
1041 | else | 953 | else |
1042 | this->ecc.layout = &hw_smallpage_ecclayout; | 954 | info->row_addr_cycles = 2; |
955 | mtd->name = mtd_names[0]; | ||
956 | chip->ecc.mode = NAND_ECC_HW; | ||
957 | chip->ecc.size = f->page_size; | ||
958 | |||
959 | chip->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16 : 0; | ||
960 | chip->options |= NAND_NO_AUTOINCR; | ||
961 | chip->options |= NAND_NO_READRDY; | ||
1043 | 962 | ||
1044 | this->chip_delay = 25; | 963 | return nand_scan_tail(mtd); |
1045 | } | 964 | } |
1046 | 965 | ||
1047 | static int pxa3xx_nand_probe(struct platform_device *pdev) | 966 | static |
967 | struct pxa3xx_nand_info *alloc_nand_resource(struct platform_device *pdev) | ||
1048 | { | 968 | { |
1049 | struct pxa3xx_nand_platform_data *pdata; | ||
1050 | struct pxa3xx_nand_info *info; | 969 | struct pxa3xx_nand_info *info; |
1051 | struct nand_chip *this; | 970 | struct nand_chip *chip; |
1052 | struct mtd_info *mtd; | 971 | struct mtd_info *mtd; |
1053 | struct resource *r; | 972 | struct resource *r; |
1054 | int ret = 0, irq; | 973 | int ret, irq; |
1055 | |||
1056 | pdata = pdev->dev.platform_data; | ||
1057 | |||
1058 | if (!pdata) { | ||
1059 | dev_err(&pdev->dev, "no platform data defined\n"); | ||
1060 | return -ENODEV; | ||
1061 | } | ||
1062 | 974 | ||
1063 | mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info), | 975 | mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info), |
1064 | GFP_KERNEL); | 976 | GFP_KERNEL); |
1065 | if (!mtd) { | 977 | if (!mtd) { |
1066 | dev_err(&pdev->dev, "failed to allocate memory\n"); | 978 | dev_err(&pdev->dev, "failed to allocate memory\n"); |
1067 | return -ENOMEM; | 979 | return NULL; |
1068 | } | 980 | } |
1069 | 981 | ||
1070 | info = (struct pxa3xx_nand_info *)(&mtd[1]); | 982 | info = (struct pxa3xx_nand_info *)(&mtd[1]); |
983 | chip = (struct nand_chip *)(&mtd[1]); | ||
1071 | info->pdev = pdev; | 984 | info->pdev = pdev; |
1072 | 985 | info->mtd = mtd; | |
1073 | this = &info->nand_chip; | ||
1074 | mtd->priv = info; | 986 | mtd->priv = info; |
1075 | mtd->owner = THIS_MODULE; | 987 | mtd->owner = THIS_MODULE; |
1076 | 988 | ||
989 | chip->ecc.read_page = pxa3xx_nand_read_page_hwecc; | ||
990 | chip->ecc.write_page = pxa3xx_nand_write_page_hwecc; | ||
991 | chip->controller = &info->controller; | ||
992 | chip->waitfunc = pxa3xx_nand_waitfunc; | ||
993 | chip->select_chip = pxa3xx_nand_select_chip; | ||
994 | chip->dev_ready = pxa3xx_nand_dev_ready; | ||
995 | chip->cmdfunc = pxa3xx_nand_cmdfunc; | ||
996 | chip->read_word = pxa3xx_nand_read_word; | ||
997 | chip->read_byte = pxa3xx_nand_read_byte; | ||
998 | chip->read_buf = pxa3xx_nand_read_buf; | ||
999 | chip->write_buf = pxa3xx_nand_write_buf; | ||
1000 | chip->verify_buf = pxa3xx_nand_verify_buf; | ||
1001 | |||
1002 | spin_lock_init(&chip->controller->lock); | ||
1003 | init_waitqueue_head(&chip->controller->wq); | ||
1077 | info->clk = clk_get(&pdev->dev, NULL); | 1004 | info->clk = clk_get(&pdev->dev, NULL); |
1078 | if (IS_ERR(info->clk)) { | 1005 | if (IS_ERR(info->clk)) { |
1079 | dev_err(&pdev->dev, "failed to get nand clock\n"); | 1006 | dev_err(&pdev->dev, "failed to get nand clock\n"); |
@@ -1141,43 +1068,12 @@ static int pxa3xx_nand_probe(struct platform_device *pdev) | |||
1141 | goto fail_free_buf; | 1068 | goto fail_free_buf; |
1142 | } | 1069 | } |
1143 | 1070 | ||
1144 | ret = pxa3xx_nand_detect_flash(info, pdata); | 1071 | platform_set_drvdata(pdev, info); |
1145 | if (ret) { | ||
1146 | dev_err(&pdev->dev, "failed to detect flash\n"); | ||
1147 | ret = -ENODEV; | ||
1148 | goto fail_free_irq; | ||
1149 | } | ||
1150 | |||
1151 | pxa3xx_nand_init_mtd(mtd, info); | ||
1152 | |||
1153 | platform_set_drvdata(pdev, mtd); | ||
1154 | |||
1155 | if (nand_scan(mtd, 1)) { | ||
1156 | dev_err(&pdev->dev, "failed to scan nand\n"); | ||
1157 | ret = -ENXIO; | ||
1158 | goto fail_free_irq; | ||
1159 | } | ||
1160 | |||
1161 | #ifdef CONFIG_MTD_PARTITIONS | ||
1162 | if (mtd_has_cmdlinepart()) { | ||
1163 | static const char *probes[] = { "cmdlinepart", NULL }; | ||
1164 | struct mtd_partition *parts; | ||
1165 | int nr_parts; | ||
1166 | |||
1167 | nr_parts = parse_mtd_partitions(mtd, probes, &parts, 0); | ||
1168 | |||
1169 | if (nr_parts) | ||
1170 | return add_mtd_partitions(mtd, parts, nr_parts); | ||
1171 | } | ||
1172 | 1072 | ||
1173 | return add_mtd_partitions(mtd, pdata->parts, pdata->nr_parts); | 1073 | return info; |
1174 | #else | ||
1175 | return 0; | ||
1176 | #endif | ||
1177 | 1074 | ||
1178 | fail_free_irq: | ||
1179 | free_irq(irq, info); | ||
1180 | fail_free_buf: | 1075 | fail_free_buf: |
1076 | free_irq(irq, info); | ||
1181 | if (use_dma) { | 1077 | if (use_dma) { |
1182 | pxa_free_dma(info->data_dma_ch); | 1078 | pxa_free_dma(info->data_dma_ch); |
1183 | dma_free_coherent(&pdev->dev, info->data_buff_size, | 1079 | dma_free_coherent(&pdev->dev, info->data_buff_size, |
@@ -1193,22 +1089,18 @@ fail_put_clk: | |||
1193 | clk_put(info->clk); | 1089 | clk_put(info->clk); |
1194 | fail_free_mtd: | 1090 | fail_free_mtd: |
1195 | kfree(mtd); | 1091 | kfree(mtd); |
1196 | return ret; | 1092 | return NULL; |
1197 | } | 1093 | } |
1198 | 1094 | ||
1199 | static int pxa3xx_nand_remove(struct platform_device *pdev) | 1095 | static int pxa3xx_nand_remove(struct platform_device *pdev) |
1200 | { | 1096 | { |
1201 | struct mtd_info *mtd = platform_get_drvdata(pdev); | 1097 | struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); |
1202 | struct pxa3xx_nand_info *info = mtd->priv; | 1098 | struct mtd_info *mtd = info->mtd; |
1203 | struct resource *r; | 1099 | struct resource *r; |
1204 | int irq; | 1100 | int irq; |
1205 | 1101 | ||
1206 | platform_set_drvdata(pdev, NULL); | 1102 | platform_set_drvdata(pdev, NULL); |
1207 | 1103 | ||
1208 | del_mtd_device(mtd); | ||
1209 | #ifdef CONFIG_MTD_PARTITIONS | ||
1210 | del_mtd_partitions(mtd); | ||
1211 | #endif | ||
1212 | irq = platform_get_irq(pdev, 0); | 1104 | irq = platform_get_irq(pdev, 0); |
1213 | if (irq >= 0) | 1105 | if (irq >= 0) |
1214 | free_irq(irq, info); | 1106 | free_irq(irq, info); |
@@ -1226,17 +1118,62 @@ static int pxa3xx_nand_remove(struct platform_device *pdev) | |||
1226 | clk_disable(info->clk); | 1118 | clk_disable(info->clk); |
1227 | clk_put(info->clk); | 1119 | clk_put(info->clk); |
1228 | 1120 | ||
1229 | kfree(mtd); | 1121 | if (mtd) { |
1122 | del_mtd_device(mtd); | ||
1123 | #ifdef CONFIG_MTD_PARTITIONS | ||
1124 | del_mtd_partitions(mtd); | ||
1125 | #endif | ||
1126 | kfree(mtd); | ||
1127 | } | ||
1230 | return 0; | 1128 | return 0; |
1231 | } | 1129 | } |
1232 | 1130 | ||
1131 | static int pxa3xx_nand_probe(struct platform_device *pdev) | ||
1132 | { | ||
1133 | struct pxa3xx_nand_platform_data *pdata; | ||
1134 | struct pxa3xx_nand_info *info; | ||
1135 | |||
1136 | pdata = pdev->dev.platform_data; | ||
1137 | if (!pdata) { | ||
1138 | dev_err(&pdev->dev, "no platform data defined\n"); | ||
1139 | return -ENODEV; | ||
1140 | } | ||
1141 | |||
1142 | info = alloc_nand_resource(pdev); | ||
1143 | if (info == NULL) | ||
1144 | return -ENOMEM; | ||
1145 | |||
1146 | if (pxa3xx_nand_scan(info->mtd)) { | ||
1147 | dev_err(&pdev->dev, "failed to scan nand\n"); | ||
1148 | pxa3xx_nand_remove(pdev); | ||
1149 | return -ENODEV; | ||
1150 | } | ||
1151 | |||
1152 | #ifdef CONFIG_MTD_PARTITIONS | ||
1153 | if (mtd_has_cmdlinepart()) { | ||
1154 | const char *probes[] = { "cmdlinepart", NULL }; | ||
1155 | struct mtd_partition *parts; | ||
1156 | int nr_parts; | ||
1157 | |||
1158 | nr_parts = parse_mtd_partitions(info->mtd, probes, &parts, 0); | ||
1159 | |||
1160 | if (nr_parts) | ||
1161 | return add_mtd_partitions(info->mtd, parts, nr_parts); | ||
1162 | } | ||
1163 | |||
1164 | return add_mtd_partitions(info->mtd, pdata->parts, pdata->nr_parts); | ||
1165 | #else | ||
1166 | return 0; | ||
1167 | #endif | ||
1168 | } | ||
1169 | |||
1233 | #ifdef CONFIG_PM | 1170 | #ifdef CONFIG_PM |
1234 | static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state) | 1171 | static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state) |
1235 | { | 1172 | { |
1236 | struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev); | 1173 | struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); |
1237 | struct pxa3xx_nand_info *info = mtd->priv; | 1174 | struct mtd_info *mtd = info->mtd; |
1238 | 1175 | ||
1239 | if (info->state != STATE_READY) { | 1176 | if (info->state) { |
1240 | dev_err(&pdev->dev, "driver busy, state = %d\n", info->state); | 1177 | dev_err(&pdev->dev, "driver busy, state = %d\n", info->state); |
1241 | return -EAGAIN; | 1178 | return -EAGAIN; |
1242 | } | 1179 | } |
@@ -1246,8 +1183,8 @@ static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state) | |||
1246 | 1183 | ||
1247 | static int pxa3xx_nand_resume(struct platform_device *pdev) | 1184 | static int pxa3xx_nand_resume(struct platform_device *pdev) |
1248 | { | 1185 | { |
1249 | struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev); | 1186 | struct pxa3xx_nand_info *info = platform_get_drvdata(pdev); |
1250 | struct pxa3xx_nand_info *info = mtd->priv; | 1187 | struct mtd_info *mtd = info->mtd; |
1251 | 1188 | ||
1252 | nand_writel(info, NDTR0CS0, info->ndtr0cs0); | 1189 | nand_writel(info, NDTR0CS0, info->ndtr0cs0); |
1253 | nand_writel(info, NDTR1CS0, info->ndtr1cs0); | 1190 | nand_writel(info, NDTR1CS0, info->ndtr1cs0); |
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index 14a49abe057e..f591f615d3f6 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c | |||
@@ -629,6 +629,7 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) | |||
629 | { | 629 | { |
630 | struct omap_onenand_platform_data *pdata; | 630 | struct omap_onenand_platform_data *pdata; |
631 | struct omap2_onenand *c; | 631 | struct omap2_onenand *c; |
632 | struct onenand_chip *this; | ||
632 | int r; | 633 | int r; |
633 | 634 | ||
634 | pdata = pdev->dev.platform_data; | 635 | pdata = pdev->dev.platform_data; |
@@ -726,9 +727,8 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) | |||
726 | 727 | ||
727 | c->mtd.dev.parent = &pdev->dev; | 728 | c->mtd.dev.parent = &pdev->dev; |
728 | 729 | ||
730 | this = &c->onenand; | ||
729 | if (c->dma_channel >= 0) { | 731 | if (c->dma_channel >= 0) { |
730 | struct onenand_chip *this = &c->onenand; | ||
731 | |||
732 | this->wait = omap2_onenand_wait; | 732 | this->wait = omap2_onenand_wait; |
733 | if (cpu_is_omap34xx()) { | 733 | if (cpu_is_omap34xx()) { |
734 | this->read_bufferram = omap3_onenand_read_bufferram; | 734 | this->read_bufferram = omap3_onenand_read_bufferram; |
@@ -749,6 +749,9 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) | |||
749 | c->onenand.disable = omap2_onenand_disable; | 749 | c->onenand.disable = omap2_onenand_disable; |
750 | } | 750 | } |
751 | 751 | ||
752 | if (pdata->skip_initial_unlocking) | ||
753 | this->options |= ONENAND_SKIP_INITIAL_UNLOCKING; | ||
754 | |||
752 | if ((r = onenand_scan(&c->mtd, 1)) < 0) | 755 | if ((r = onenand_scan(&c->mtd, 1)) < 0) |
753 | goto err_release_regulator; | 756 | goto err_release_regulator; |
754 | 757 | ||
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index bac41caa8df7..56a8b2005bda 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c | |||
@@ -1132,6 +1132,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from, | |||
1132 | onenand_update_bufferram(mtd, from, !ret); | 1132 | onenand_update_bufferram(mtd, from, !ret); |
1133 | if (ret == -EBADMSG) | 1133 | if (ret == -EBADMSG) |
1134 | ret = 0; | 1134 | ret = 0; |
1135 | if (ret) | ||
1136 | break; | ||
1135 | } | 1137 | } |
1136 | 1138 | ||
1137 | this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen); | 1139 | this->read_bufferram(mtd, ONENAND_DATARAM, buf, column, thislen); |
@@ -1646,11 +1648,10 @@ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, | |||
1646 | int ret = 0; | 1648 | int ret = 0; |
1647 | int thislen, column; | 1649 | int thislen, column; |
1648 | 1650 | ||
1651 | column = addr & (this->writesize - 1); | ||
1652 | |||
1649 | while (len != 0) { | 1653 | while (len != 0) { |
1650 | thislen = min_t(int, this->writesize, len); | 1654 | thislen = min_t(int, this->writesize - column, len); |
1651 | column = addr & (this->writesize - 1); | ||
1652 | if (column + thislen > this->writesize) | ||
1653 | thislen = this->writesize - column; | ||
1654 | 1655 | ||
1655 | this->command(mtd, ONENAND_CMD_READ, addr, this->writesize); | 1656 | this->command(mtd, ONENAND_CMD_READ, addr, this->writesize); |
1656 | 1657 | ||
@@ -1664,12 +1665,13 @@ static int onenand_verify(struct mtd_info *mtd, const u_char *buf, loff_t addr, | |||
1664 | 1665 | ||
1665 | this->read_bufferram(mtd, ONENAND_DATARAM, this->verify_buf, 0, mtd->writesize); | 1666 | this->read_bufferram(mtd, ONENAND_DATARAM, this->verify_buf, 0, mtd->writesize); |
1666 | 1667 | ||
1667 | if (memcmp(buf, this->verify_buf, thislen)) | 1668 | if (memcmp(buf, this->verify_buf + column, thislen)) |
1668 | return -EBADMSG; | 1669 | return -EBADMSG; |
1669 | 1670 | ||
1670 | len -= thislen; | 1671 | len -= thislen; |
1671 | buf += thislen; | 1672 | buf += thislen; |
1672 | addr += thislen; | 1673 | addr += thislen; |
1674 | column = 0; | ||
1673 | } | 1675 | } |
1674 | 1676 | ||
1675 | return 0; | 1677 | return 0; |
@@ -4083,7 +4085,8 @@ int onenand_scan(struct mtd_info *mtd, int maxchips) | |||
4083 | mtd->writebufsize = mtd->writesize; | 4085 | mtd->writebufsize = mtd->writesize; |
4084 | 4086 | ||
4085 | /* Unlock whole block */ | 4087 | /* Unlock whole block */ |
4086 | this->unlock_all(mtd); | 4088 | if (!(this->options & ONENAND_SKIP_INITIAL_UNLOCKING)) |
4089 | this->unlock_all(mtd); | ||
4087 | 4090 | ||
4088 | ret = this->scan_bbt(mtd); | 4091 | ret = this->scan_bbt(mtd); |
4089 | if ((!FLEXONENAND(this)) || ret) | 4092 | if ((!FLEXONENAND(this)) || ret) |
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index ac0d6a8613b5..2b0daae4018d 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c | |||
@@ -64,12 +64,16 @@ struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl) | |||
64 | SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET); | 64 | SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET); |
65 | 65 | ||
66 | char *vendor = kmalloc(vendor_len, GFP_KERNEL); | 66 | char *vendor = kmalloc(vendor_len, GFP_KERNEL); |
67 | if (!vendor) | ||
68 | goto error1; | ||
67 | memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len); | 69 | memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len); |
68 | vendor[vendor_len] = 0; | 70 | vendor[vendor_len] = 0; |
69 | 71 | ||
70 | /* Initialize sysfs attributes */ | 72 | /* Initialize sysfs attributes */ |
71 | vendor_attribute = | 73 | vendor_attribute = |
72 | kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL); | 74 | kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL); |
75 | if (!vendor_attribute) | ||
76 | goto error2; | ||
73 | 77 | ||
74 | sysfs_attr_init(&vendor_attribute->dev_attr.attr); | 78 | sysfs_attr_init(&vendor_attribute->dev_attr.attr); |
75 | 79 | ||
@@ -83,12 +87,24 @@ struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl) | |||
83 | /* Create array of pointers to the attributes */ | 87 | /* Create array of pointers to the attributes */ |
84 | attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1), | 88 | attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1), |
85 | GFP_KERNEL); | 89 | GFP_KERNEL); |
90 | if (!attributes) | ||
91 | goto error3; | ||
86 | attributes[0] = &vendor_attribute->dev_attr.attr; | 92 | attributes[0] = &vendor_attribute->dev_attr.attr; |
87 | 93 | ||
88 | /* Finally create the attribute group */ | 94 | /* Finally create the attribute group */ |
89 | attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL); | 95 | attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL); |
96 | if (!attr_group) | ||
97 | goto error4; | ||
90 | attr_group->attrs = attributes; | 98 | attr_group->attrs = attributes; |
91 | return attr_group; | 99 | return attr_group; |
100 | error4: | ||
101 | kfree(attributes); | ||
102 | error3: | ||
103 | kfree(vendor_attribute); | ||
104 | error2: | ||
105 | kfree(vendor); | ||
106 | error1: | ||
107 | return NULL; | ||
92 | } | 108 | } |
93 | 109 | ||
94 | void sm_delete_sysfs_attributes(struct sm_ftl *ftl) | 110 | void sm_delete_sysfs_attributes(struct sm_ftl *ftl) |
@@ -1178,6 +1194,8 @@ static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
1178 | } | 1194 | } |
1179 | 1195 | ||
1180 | ftl->disk_attributes = sm_create_sysfs_attributes(ftl); | 1196 | ftl->disk_attributes = sm_create_sysfs_attributes(ftl); |
1197 | if (!ftl->disk_attributes) | ||
1198 | goto error6; | ||
1181 | trans->disk_attributes = ftl->disk_attributes; | 1199 | trans->disk_attributes = ftl->disk_attributes; |
1182 | 1200 | ||
1183 | sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d", | 1201 | sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d", |
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c index 161feeb7b8b9..627d4e2466a3 100644 --- a/drivers/mtd/tests/mtd_speedtest.c +++ b/drivers/mtd/tests/mtd_speedtest.c | |||
@@ -16,7 +16,7 @@ | |||
16 | * | 16 | * |
17 | * Test read and write speed of a MTD device. | 17 | * Test read and write speed of a MTD device. |
18 | * | 18 | * |
19 | * Author: Adrian Hunter <ext-adrian.hunter@nokia.com> | 19 | * Author: Adrian Hunter <adrian.hunter@nokia.com> |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
@@ -33,6 +33,11 @@ static int dev; | |||
33 | module_param(dev, int, S_IRUGO); | 33 | module_param(dev, int, S_IRUGO); |
34 | MODULE_PARM_DESC(dev, "MTD device number to use"); | 34 | MODULE_PARM_DESC(dev, "MTD device number to use"); |
35 | 35 | ||
36 | static int count; | ||
37 | module_param(count, int, S_IRUGO); | ||
38 | MODULE_PARM_DESC(count, "Maximum number of eraseblocks to use " | ||
39 | "(0 means use all)"); | ||
40 | |||
36 | static struct mtd_info *mtd; | 41 | static struct mtd_info *mtd; |
37 | static unsigned char *iobuf; | 42 | static unsigned char *iobuf; |
38 | static unsigned char *bbt; | 43 | static unsigned char *bbt; |
@@ -89,6 +94,33 @@ static int erase_eraseblock(int ebnum) | |||
89 | return 0; | 94 | return 0; |
90 | } | 95 | } |
91 | 96 | ||
97 | static int multiblock_erase(int ebnum, int blocks) | ||
98 | { | ||
99 | int err; | ||
100 | struct erase_info ei; | ||
101 | loff_t addr = ebnum * mtd->erasesize; | ||
102 | |||
103 | memset(&ei, 0, sizeof(struct erase_info)); | ||
104 | ei.mtd = mtd; | ||
105 | ei.addr = addr; | ||
106 | ei.len = mtd->erasesize * blocks; | ||
107 | |||
108 | err = mtd->erase(mtd, &ei); | ||
109 | if (err) { | ||
110 | printk(PRINT_PREF "error %d while erasing EB %d, blocks %d\n", | ||
111 | err, ebnum, blocks); | ||
112 | return err; | ||
113 | } | ||
114 | |||
115 | if (ei.state == MTD_ERASE_FAILED) { | ||
116 | printk(PRINT_PREF "some erase error occurred at EB %d," | ||
117 | "blocks %d\n", ebnum, blocks); | ||
118 | return -EIO; | ||
119 | } | ||
120 | |||
121 | return 0; | ||
122 | } | ||
123 | |||
92 | static int erase_whole_device(void) | 124 | static int erase_whole_device(void) |
93 | { | 125 | { |
94 | int err; | 126 | int err; |
@@ -282,13 +314,16 @@ static inline void stop_timing(void) | |||
282 | 314 | ||
283 | static long calc_speed(void) | 315 | static long calc_speed(void) |
284 | { | 316 | { |
285 | long ms, k, speed; | 317 | uint64_t k; |
318 | long ms; | ||
286 | 319 | ||
287 | ms = (finish.tv_sec - start.tv_sec) * 1000 + | 320 | ms = (finish.tv_sec - start.tv_sec) * 1000 + |
288 | (finish.tv_usec - start.tv_usec) / 1000; | 321 | (finish.tv_usec - start.tv_usec) / 1000; |
289 | k = goodebcnt * mtd->erasesize / 1024; | 322 | if (ms == 0) |
290 | speed = (k * 1000) / ms; | 323 | return 0; |
291 | return speed; | 324 | k = goodebcnt * (mtd->erasesize / 1024) * 1000; |
325 | do_div(k, ms); | ||
326 | return k; | ||
292 | } | 327 | } |
293 | 328 | ||
294 | static int scan_for_bad_eraseblocks(void) | 329 | static int scan_for_bad_eraseblocks(void) |
@@ -320,13 +355,16 @@ out: | |||
320 | 355 | ||
321 | static int __init mtd_speedtest_init(void) | 356 | static int __init mtd_speedtest_init(void) |
322 | { | 357 | { |
323 | int err, i; | 358 | int err, i, blocks, j, k; |
324 | long speed; | 359 | long speed; |
325 | uint64_t tmp; | 360 | uint64_t tmp; |
326 | 361 | ||
327 | printk(KERN_INFO "\n"); | 362 | printk(KERN_INFO "\n"); |
328 | printk(KERN_INFO "=================================================\n"); | 363 | printk(KERN_INFO "=================================================\n"); |
329 | printk(PRINT_PREF "MTD device: %d\n", dev); | 364 | if (count) |
365 | printk(PRINT_PREF "MTD device: %d count: %d\n", dev, count); | ||
366 | else | ||
367 | printk(PRINT_PREF "MTD device: %d\n", dev); | ||
330 | 368 | ||
331 | mtd = get_mtd_device(NULL, dev); | 369 | mtd = get_mtd_device(NULL, dev); |
332 | if (IS_ERR(mtd)) { | 370 | if (IS_ERR(mtd)) { |
@@ -353,6 +391,9 @@ static int __init mtd_speedtest_init(void) | |||
353 | (unsigned long long)mtd->size, mtd->erasesize, | 391 | (unsigned long long)mtd->size, mtd->erasesize, |
354 | pgsize, ebcnt, pgcnt, mtd->oobsize); | 392 | pgsize, ebcnt, pgcnt, mtd->oobsize); |
355 | 393 | ||
394 | if (count > 0 && count < ebcnt) | ||
395 | ebcnt = count; | ||
396 | |||
356 | err = -ENOMEM; | 397 | err = -ENOMEM; |
357 | iobuf = kmalloc(mtd->erasesize, GFP_KERNEL); | 398 | iobuf = kmalloc(mtd->erasesize, GFP_KERNEL); |
358 | if (!iobuf) { | 399 | if (!iobuf) { |
@@ -484,6 +525,31 @@ static int __init mtd_speedtest_init(void) | |||
484 | speed = calc_speed(); | 525 | speed = calc_speed(); |
485 | printk(PRINT_PREF "erase speed is %ld KiB/s\n", speed); | 526 | printk(PRINT_PREF "erase speed is %ld KiB/s\n", speed); |
486 | 527 | ||
528 | /* Multi-block erase all eraseblocks */ | ||
529 | for (k = 1; k < 7; k++) { | ||
530 | blocks = 1 << k; | ||
531 | printk(PRINT_PREF "Testing %dx multi-block erase speed\n", | ||
532 | blocks); | ||
533 | start_timing(); | ||
534 | for (i = 0; i < ebcnt; ) { | ||
535 | for (j = 0; j < blocks && (i + j) < ebcnt; j++) | ||
536 | if (bbt[i + j]) | ||
537 | break; | ||
538 | if (j < 1) { | ||
539 | i++; | ||
540 | continue; | ||
541 | } | ||
542 | err = multiblock_erase(i, j); | ||
543 | if (err) | ||
544 | goto out; | ||
545 | cond_resched(); | ||
546 | i += j; | ||
547 | } | ||
548 | stop_timing(); | ||
549 | speed = calc_speed(); | ||
550 | printk(PRINT_PREF "%dx multi-block erase speed is %ld KiB/s\n", | ||
551 | blocks, speed); | ||
552 | } | ||
487 | printk(PRINT_PREF "finished\n"); | 553 | printk(PRINT_PREF "finished\n"); |
488 | out: | 554 | out: |
489 | kfree(iobuf); | 555 | kfree(iobuf); |
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c index 11204e8aab5f..334eae53a3db 100644 --- a/drivers/mtd/tests/mtd_subpagetest.c +++ b/drivers/mtd/tests/mtd_subpagetest.c | |||
@@ -394,6 +394,11 @@ static int __init mtd_subpagetest_init(void) | |||
394 | } | 394 | } |
395 | 395 | ||
396 | subpgsize = mtd->writesize >> mtd->subpage_sft; | 396 | subpgsize = mtd->writesize >> mtd->subpage_sft; |
397 | tmp = mtd->size; | ||
398 | do_div(tmp, mtd->erasesize); | ||
399 | ebcnt = tmp; | ||
400 | pgcnt = mtd->erasesize / mtd->writesize; | ||
401 | |||
397 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " | 402 | printk(PRINT_PREF "MTD device size %llu, eraseblock size %u, " |
398 | "page size %u, subpage size %u, count of eraseblocks %u, " | 403 | "page size %u, subpage size %u, count of eraseblocks %u, " |
399 | "pages per eraseblock %u, OOB size %u\n", | 404 | "pages per eraseblock %u, OOB size %u\n", |
@@ -413,11 +418,6 @@ static int __init mtd_subpagetest_init(void) | |||
413 | goto out; | 418 | goto out; |
414 | } | 419 | } |
415 | 420 | ||
416 | tmp = mtd->size; | ||
417 | do_div(tmp, mtd->erasesize); | ||
418 | ebcnt = tmp; | ||
419 | pgcnt = mtd->erasesize / mtd->writesize; | ||
420 | |||
421 | err = scan_for_bad_eraseblocks(); | 421 | err = scan_for_bad_eraseblocks(); |
422 | if (err) | 422 | if (err) |
423 | goto out; | 423 | goto out; |