aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS4
-rw-r--r--drivers/mtd/Kconfig4
-rw-r--r--drivers/mtd/chips/Kconfig1
-rw-r--r--drivers/mtd/chips/Makefile7
-rw-r--r--drivers/mtd/chips/amd_flash.c8
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c474
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c22
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c22
-rw-r--r--drivers/mtd/chips/cfi_probe.c8
-rw-r--r--drivers/mtd/chips/gen_probe.c45
-rw-r--r--drivers/mtd/chips/map_ram.c2
-rw-r--r--drivers/mtd/chips/map_rom.c4
-rw-r--r--drivers/mtd/chips/sharp.c1
-rw-r--r--drivers/mtd/devices/Kconfig6
-rw-r--r--drivers/mtd/devices/Makefile7
-rw-r--r--drivers/mtd/devices/block2mtd.c27
-rw-r--r--drivers/mtd/devices/doc2000.c129
-rw-r--r--drivers/mtd/devices/doc2001.c60
-rw-r--r--drivers/mtd/devices/doc2001plus.c60
-rw-r--r--drivers/mtd/devices/docprobe.c26
-rw-r--r--drivers/mtd/devices/lart.c1
-rw-r--r--drivers/mtd/devices/m25p80.c1
-rw-r--r--drivers/mtd/devices/ms02-nv.c2
-rw-r--r--drivers/mtd/devices/mtdram.c1
-rw-r--r--drivers/mtd/devices/phram.c16
-rw-r--r--drivers/mtd/devices/slram.c3
-rw-r--r--drivers/mtd/inftlcore.c192
-rw-r--r--drivers/mtd/inftlmount.c60
-rw-r--r--drivers/mtd/maps/Kconfig11
-rw-r--r--drivers/mtd/maps/cfi_flagadm.c4
-rw-r--r--drivers/mtd/maps/dbox2-flash.c2
-rw-r--r--drivers/mtd/maps/mtx-1_flash.c2
-rw-r--r--drivers/mtd/maps/nettel.c4
-rw-r--r--drivers/mtd/maps/pcmciamtd.c1
-rw-r--r--drivers/mtd/maps/physmap.c255
-rw-r--r--drivers/mtd/mtdblock.c16
-rw-r--r--drivers/mtd/mtdblock_ro.c4
-rw-r--r--drivers/mtd/mtdchar.c327
-rw-r--r--drivers/mtd/mtdconcat.c335
-rw-r--r--drivers/mtd/mtdcore.c33
-rw-r--r--drivers/mtd/mtdpart.c171
-rw-r--r--drivers/mtd/nand/Kconfig57
-rw-r--r--drivers/mtd/nand/Makefile4
-rw-r--r--drivers/mtd/nand/ams-delta.c237
-rw-r--r--drivers/mtd/nand/au1550nd.c321
-rw-r--r--drivers/mtd/nand/autcpu12.c125
-rw-r--r--drivers/mtd/nand/cs553x_nand.c353
-rw-r--r--drivers/mtd/nand/diskonchip.c530
-rw-r--r--drivers/mtd/nand/edb7312.c97
-rw-r--r--drivers/mtd/nand/h1910.c98
-rw-r--r--drivers/mtd/nand/nand_base.c3249
-rw-r--r--drivers/mtd/nand/nand_bbt.c502
-rw-r--r--drivers/mtd/nand/nand_ecc.c227
-rw-r--r--drivers/mtd/nand/nand_ids.c177
-rw-r--r--drivers/mtd/nand/nandsim.c95
-rw-r--r--drivers/mtd/nand/ndfc.c311
-rw-r--r--drivers/mtd/nand/ppchameleonevb.c256
-rw-r--r--drivers/mtd/nand/rtc_from4.c351
-rw-r--r--drivers/mtd/nand/s3c2410.c252
-rw-r--r--drivers/mtd/nand/sharpsl.c146
-rw-r--r--drivers/mtd/nand/spia.c101
-rw-r--r--drivers/mtd/nand/toto.c121
-rw-r--r--drivers/mtd/nand/ts7250.c206
-rw-r--r--drivers/mtd/nftlcore.c221
-rw-r--r--drivers/mtd/nftlmount.c91
-rw-r--r--drivers/mtd/onenand/Kconfig14
-rw-r--r--drivers/mtd/onenand/onenand_base.c718
-rw-r--r--drivers/mtd/onenand/onenand_bbt.c9
-rw-r--r--drivers/mtd/redboot.c18
-rw-r--r--drivers/mtd/rfd_ftl.c48
-rw-r--r--fs/Kconfig38
-rw-r--r--fs/jffs/intrep.c15
-rw-r--r--fs/jffs2/Makefile3
-rw-r--r--fs/jffs2/README.Locking21
-rw-r--r--fs/jffs2/acl.c485
-rw-r--r--fs/jffs2/acl.h45
-rw-r--r--fs/jffs2/build.c2
-rw-r--r--fs/jffs2/compr.c2
-rw-r--r--fs/jffs2/compr.h4
-rw-r--r--fs/jffs2/debug.c14
-rw-r--r--fs/jffs2/debug.h6
-rw-r--r--fs/jffs2/dir.c121
-rw-r--r--fs/jffs2/erase.c56
-rw-r--r--fs/jffs2/file.c35
-rw-r--r--fs/jffs2/fs.c63
-rw-r--r--fs/jffs2/gc.c131
-rw-r--r--fs/jffs2/histo.h3
-rw-r--r--fs/jffs2/jffs2_fs_i.h (renamed from include/linux/jffs2_fs_i.h)5
-rw-r--r--fs/jffs2/jffs2_fs_sb.h (renamed from include/linux/jffs2_fs_sb.h)13
-rw-r--r--fs/jffs2/malloc.c127
-rw-r--r--fs/jffs2/nodelist.c183
-rw-r--r--fs/jffs2/nodelist.h189
-rw-r--r--fs/jffs2/nodemgmt.c196
-rw-r--r--fs/jffs2/os-linux.h23
-rw-r--r--fs/jffs2/readinode.c119
-rw-r--r--fs/jffs2/scan.c442
-rw-r--r--fs/jffs2/security.c82
-rw-r--r--fs/jffs2/summary.c484
-rw-r--r--fs/jffs2/summary.h64
-rw-r--r--fs/jffs2/super.c20
-rw-r--r--fs/jffs2/symlink.c7
-rw-r--r--fs/jffs2/wbuf.c968
-rw-r--r--fs/jffs2/write.c147
-rw-r--r--fs/jffs2/xattr.c1238
-rw-r--r--fs/jffs2/xattr.h116
-rw-r--r--fs/jffs2/xattr_trusted.c52
-rw-r--r--fs/jffs2/xattr_user.c52
-rw-r--r--include/linux/jffs2.h56
-rw-r--r--include/linux/module.h9
-rw-r--r--include/linux/mtd/inftl.h2
-rw-r--r--include/linux/mtd/mtd.h97
-rw-r--r--include/linux/mtd/nand.h354
-rw-r--r--include/linux/mtd/ndfc.h67
-rw-r--r--include/linux/mtd/nftl.h2
-rw-r--r--include/linux/mtd/onenand.h11
-rw-r--r--include/linux/mtd/onenand_regs.h8
-rw-r--r--include/linux/mtd/partitions.h2
-rw-r--r--include/linux/mtd/physmap.h29
-rw-r--r--include/mtd/mtd-abi.h90
-rw-r--r--include/mtd/mtd-user.h1
-rw-r--r--init/Kconfig3
-rw-r--r--init/do_mounts.c4
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/intermodule.c184
124 files changed, 11074 insertions, 6710 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 854a152e5e95..ce37c4b1ef94 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1843,12 +1843,12 @@ S: linux-scsi@vger.kernel.org
1843W: http://megaraid.lsilogic.com 1843W: http://megaraid.lsilogic.com
1844S: Maintained 1844S: Maintained
1845 1845
1846MEMORY TECHNOLOGY DEVICES 1846MEMORY TECHNOLOGY DEVICES (MTD)
1847P: David Woodhouse 1847P: David Woodhouse
1848M: dwmw2@infradead.org 1848M: dwmw2@infradead.org
1849W: http://www.linux-mtd.infradead.org/ 1849W: http://www.linux-mtd.infradead.org/
1850L: linux-mtd@lists.infradead.org 1850L: linux-mtd@lists.infradead.org
1851T: git kernel.org:/pub/scm/linux/kernel/git/tglx/mtd-2.6.git 1851T: git git://git.infradead.org/mtd-2.6.git
1852S: Maintained 1852S: Maintained
1853 1853
1854MICROTEK X6 SCANNER 1854MICROTEK X6 SCANNER
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index f6b775e63ac8..5ac265dde423 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -78,7 +78,7 @@ config MTD_REDBOOT_DIRECTORY_BLOCK
78 option. 78 option.
79 79
80 The option specifies which Flash sectors holds the RedBoot 80 The option specifies which Flash sectors holds the RedBoot
81 partition table. A zero or positive value gives an absolete 81 partition table. A zero or positive value gives an absolute
82 erase block number. A negative value specifies a number of 82 erase block number. A negative value specifies a number of
83 sectors before the end of the device. 83 sectors before the end of the device.
84 84
@@ -103,7 +103,7 @@ config MTD_CMDLINE_PARTS
103 bool "Command line partition table parsing" 103 bool "Command line partition table parsing"
104 depends on MTD_PARTITIONS = "y" 104 depends on MTD_PARTITIONS = "y"
105 ---help--- 105 ---help---
106 Allow generic configuration of the MTD paritition tables via the kernel 106 Allow generic configuration of the MTD partition tables via the kernel
107 command line. Multiple flash resources are supported for hardware where 107 command line. Multiple flash resources are supported for hardware where
108 different kinds of flash memory are available. 108 different kinds of flash memory are available.
109 109
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index a7ec5954caf5..6d8f30deb868 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -30,7 +30,6 @@ config MTD_JEDECPROBE
30 30
31config MTD_GEN_PROBE 31config MTD_GEN_PROBE
32 tristate 32 tristate
33 select OBSOLETE_INTERMODULE
34 33
35config MTD_CFI_ADV_OPTIONS 34config MTD_CFI_ADV_OPTIONS
36 bool "Flash chip driver advanced configuration options" 35 bool "Flash chip driver advanced configuration options"
diff --git a/drivers/mtd/chips/Makefile b/drivers/mtd/chips/Makefile
index 8afe3092c4e3..75bc1c2a0f43 100644
--- a/drivers/mtd/chips/Makefile
+++ b/drivers/mtd/chips/Makefile
@@ -3,13 +3,6 @@
3# 3#
4# $Id: Makefile.common,v 1.5 2005/11/07 11:14:22 gleixner Exp $ 4# $Id: Makefile.common,v 1.5 2005/11/07 11:14:22 gleixner Exp $
5 5
6# *** BIG UGLY NOTE ***
7#
8# The removal of get_module_symbol() and replacement with
9# inter_module_register() et al has introduced a link order dependency
10# here where previously there was none. We now have to ensure that
11# the CFI command set drivers are linked before gen_probe.o
12
13obj-$(CONFIG_MTD) += chipreg.o 6obj-$(CONFIG_MTD) += chipreg.o
14obj-$(CONFIG_MTD_AMDSTD) += amd_flash.o 7obj-$(CONFIG_MTD_AMDSTD) += amd_flash.o
15obj-$(CONFIG_MTD_CFI) += cfi_probe.o 8obj-$(CONFIG_MTD_CFI) += cfi_probe.o
diff --git a/drivers/mtd/chips/amd_flash.c b/drivers/mtd/chips/amd_flash.c
index 57115618c496..16eaca69fb5a 100644
--- a/drivers/mtd/chips/amd_flash.c
+++ b/drivers/mtd/chips/amd_flash.c
@@ -97,7 +97,6 @@ struct amd_flash_private {
97 int interleave; 97 int interleave;
98 int numchips; 98 int numchips;
99 unsigned long chipshift; 99 unsigned long chipshift;
100// const char *im_name;
101 struct flchip chips[0]; 100 struct flchip chips[0];
102}; 101};
103 102
@@ -131,12 +130,6 @@ static struct mtd_chip_driver amd_flash_chipdrv = {
131 .module = THIS_MODULE 130 .module = THIS_MODULE
132}; 131};
133 132
134
135
136static const char im_name[] = "amd_flash";
137
138
139
140static inline __u32 wide_read(struct map_info *map, __u32 addr) 133static inline __u32 wide_read(struct map_info *map, __u32 addr)
141{ 134{
142 if (map->buswidth == 1) { 135 if (map->buswidth == 1) {
@@ -737,6 +730,7 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
737 offset += dev_size; 730 offset += dev_size;
738 } 731 }
739 mtd->type = MTD_NORFLASH; 732 mtd->type = MTD_NORFLASH;
733 mtd->writesize = 1;
740 mtd->flags = MTD_CAP_NORFLASH; 734 mtd->flags = MTD_CAP_NORFLASH;
741 mtd->name = map->name; 735 mtd->name = map->name;
742 mtd->erase = amd_flash_erase; 736 mtd->erase = amd_flash_erase;
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 1c074d63ff3a..0d435814aaa1 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -331,13 +331,6 @@ read_pri_intelext(struct map_info *map, __u16 adr)
331 return extp; 331 return extp;
332} 332}
333 333
334/* This routine is made available to other mtd code via
335 * inter_module_register. It must only be accessed through
336 * inter_module_get which will bump the use count of this module. The
337 * addresses passed back in cfi are valid as long as the use count of
338 * this module is non-zero, i.e. between inter_module_get and
339 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
340 */
341struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary) 334struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
342{ 335{
343 struct cfi_private *cfi = map->fldrv_priv; 336 struct cfi_private *cfi = map->fldrv_priv;
@@ -406,7 +399,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
406 for (i=0; i< cfi->numchips; i++) { 399 for (i=0; i< cfi->numchips; i++) {
407 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; 400 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
408 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; 401 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
409 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; 402 cfi->chips[i].erase_time = 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
410 cfi->chips[i].ref_point_counter = 0; 403 cfi->chips[i].ref_point_counter = 0;
411 init_waitqueue_head(&(cfi->chips[i].wq)); 404 init_waitqueue_head(&(cfi->chips[i].wq));
412 } 405 }
@@ -415,6 +408,11 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
415 408
416 return cfi_intelext_setup(mtd); 409 return cfi_intelext_setup(mtd);
417} 410}
411struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
412struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
413EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
414EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
415EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
418 416
419static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd) 417static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
420{ 418{
@@ -547,12 +545,12 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
547 if (extp->MinorVersion >= '4') { 545 if (extp->MinorVersion >= '4') {
548 struct cfi_intelext_programming_regioninfo *prinfo; 546 struct cfi_intelext_programming_regioninfo *prinfo;
549 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs]; 547 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
550 MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift; 548 mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
551 MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid; 549 MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
552 MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid; 550 MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
553 mtd->flags |= MTD_PROGRAM_REGIONS; 551 mtd->flags &= ~MTD_BIT_WRITEABLE;
554 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n", 552 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
555 map->name, MTD_PROGREGION_SIZE(mtd), 553 map->name, mtd->writesize,
556 MTD_PROGREGION_CTRLMODE_VALID(mtd), 554 MTD_PROGREGION_CTRLMODE_VALID(mtd),
557 MTD_PROGREGION_CTRLMODE_INVALID(mtd)); 555 MTD_PROGREGION_CTRLMODE_INVALID(mtd));
558 } 556 }
@@ -896,26 +894,33 @@ static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
896 894
897/* 895/*
898 * When a delay is required for the flash operation to complete, the 896 * When a delay is required for the flash operation to complete, the
899 * xip_udelay() function is polling for both the given timeout and pending 897 * xip_wait_for_operation() function is polling for both the given timeout
900 * (but still masked) hardware interrupts. Whenever there is an interrupt 898 * and pending (but still masked) hardware interrupts. Whenever there is an
901 * pending then the flash erase or write operation is suspended, array mode 899 * interrupt pending then the flash erase or write operation is suspended,
902 * restored and interrupts unmasked. Task scheduling might also happen at that 900 * array mode restored and interrupts unmasked. Task scheduling might also
903 * point. The CPU eventually returns from the interrupt or the call to 901 * happen at that point. The CPU eventually returns from the interrupt or
904 * schedule() and the suspended flash operation is resumed for the remaining 902 * the call to schedule() and the suspended flash operation is resumed for
905 * of the delay period. 903 * the remaining of the delay period.
906 * 904 *
907 * Warning: this function _will_ fool interrupt latency tracing tools. 905 * Warning: this function _will_ fool interrupt latency tracing tools.
908 */ 906 */
909 907
910static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, 908static int __xipram xip_wait_for_operation(
911 unsigned long adr, int usec) 909 struct map_info *map, struct flchip *chip,
910 unsigned long adr, int *chip_op_time )
912{ 911{
913 struct cfi_private *cfi = map->fldrv_priv; 912 struct cfi_private *cfi = map->fldrv_priv;
914 struct cfi_pri_intelext *cfip = cfi->cmdset_priv; 913 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
915 map_word status, OK = CMD(0x80); 914 map_word status, OK = CMD(0x80);
916 unsigned long suspended, start = xip_currtime(); 915 unsigned long usec, suspended, start, done;
917 flstate_t oldstate, newstate; 916 flstate_t oldstate, newstate;
918 917
918 start = xip_currtime();
919 usec = *chip_op_time * 8;
920 if (usec == 0)
921 usec = 500000;
922 done = 0;
923
919 do { 924 do {
920 cpu_relax(); 925 cpu_relax();
921 if (xip_irqpending() && cfip && 926 if (xip_irqpending() && cfip &&
@@ -932,9 +937,9 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
932 * we resume the whole thing at once). Yes, it 937 * we resume the whole thing at once). Yes, it
933 * can happen! 938 * can happen!
934 */ 939 */
940 usec -= done;
935 map_write(map, CMD(0xb0), adr); 941 map_write(map, CMD(0xb0), adr);
936 map_write(map, CMD(0x70), adr); 942 map_write(map, CMD(0x70), adr);
937 usec -= xip_elapsed_since(start);
938 suspended = xip_currtime(); 943 suspended = xip_currtime();
939 do { 944 do {
940 if (xip_elapsed_since(suspended) > 100000) { 945 if (xip_elapsed_since(suspended) > 100000) {
@@ -944,7 +949,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
944 * This is a critical error but there 949 * This is a critical error but there
945 * is not much we can do here. 950 * is not much we can do here.
946 */ 951 */
947 return; 952 return -EIO;
948 } 953 }
949 status = map_read(map, adr); 954 status = map_read(map, adr);
950 } while (!map_word_andequal(map, status, OK, OK)); 955 } while (!map_word_andequal(map, status, OK, OK));
@@ -1004,65 +1009,107 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
1004 xip_cpu_idle(); 1009 xip_cpu_idle();
1005 } 1010 }
1006 status = map_read(map, adr); 1011 status = map_read(map, adr);
1012 done = xip_elapsed_since(start);
1007 } while (!map_word_andequal(map, status, OK, OK) 1013 } while (!map_word_andequal(map, status, OK, OK)
1008 && xip_elapsed_since(start) < usec); 1014 && done < usec);
1009}
1010 1015
1011#define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) 1016 return (done >= usec) ? -ETIME : 0;
1017}
1012 1018
1013/* 1019/*
1014 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 1020 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1015 * the flash is actively programming or erasing since we have to poll for 1021 * the flash is actively programming or erasing since we have to poll for
1016 * the operation to complete anyway. We can't do that in a generic way with 1022 * the operation to complete anyway. We can't do that in a generic way with
1017 * a XIP setup so do it before the actual flash operation in this case 1023 * a XIP setup so do it before the actual flash operation in this case
1018 * and stub it out from INVALIDATE_CACHE_UDELAY. 1024 * and stub it out from INVAL_CACHE_AND_WAIT.
1019 */ 1025 */
1020#define XIP_INVAL_CACHED_RANGE(map, from, size) \ 1026#define XIP_INVAL_CACHED_RANGE(map, from, size) \
1021 INVALIDATE_CACHED_RANGE(map, from, size) 1027 INVALIDATE_CACHED_RANGE(map, from, size)
1022 1028
1023#define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \ 1029#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, p_usec) \
1024 UDELAY(map, chip, cmd_adr, usec) 1030 xip_wait_for_operation(map, chip, cmd_adr, p_usec)
1025
1026/*
1027 * Extra notes:
1028 *
1029 * Activating this XIP support changes the way the code works a bit. For
1030 * example the code to suspend the current process when concurrent access
1031 * happens is never executed because xip_udelay() will always return with the
1032 * same chip state as it was entered with. This is why there is no care for
1033 * the presence of add_wait_queue() or schedule() calls from within a couple
1034 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1035 * The queueing and scheduling are always happening within xip_udelay().
1036 *
1037 * Similarly, get_chip() and put_chip() just happen to always be executed
1038 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1039 * is in array mode, therefore never executing many cases therein and not
1040 * causing any problem with XIP.
1041 */
1042 1031
1043#else 1032#else
1044 1033
1045#define xip_disable(map, chip, adr) 1034#define xip_disable(map, chip, adr)
1046#define xip_enable(map, chip, adr) 1035#define xip_enable(map, chip, adr)
1047#define XIP_INVAL_CACHED_RANGE(x...) 1036#define XIP_INVAL_CACHED_RANGE(x...)
1037#define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1038
1039static int inval_cache_and_wait_for_operation(
1040 struct map_info *map, struct flchip *chip,
1041 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1042 int *chip_op_time )
1043{
1044 struct cfi_private *cfi = map->fldrv_priv;
1045 map_word status, status_OK = CMD(0x80);
1046 int z, chip_state = chip->state;
1047 unsigned long timeo;
1048
1049 spin_unlock(chip->mutex);
1050 if (inval_len)
1051 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1052 if (*chip_op_time)
1053 cfi_udelay(*chip_op_time);
1054 spin_lock(chip->mutex);
1055
1056 timeo = *chip_op_time * 8 * HZ / 1000000;
1057 if (timeo < HZ/2)
1058 timeo = HZ/2;
1059 timeo += jiffies;
1060
1061 z = 0;
1062 for (;;) {
1063 if (chip->state != chip_state) {
1064 /* Someone's suspended the operation: sleep */
1065 DECLARE_WAITQUEUE(wait, current);
1066
1067 set_current_state(TASK_UNINTERRUPTIBLE);
1068 add_wait_queue(&chip->wq, &wait);
1069 spin_unlock(chip->mutex);
1070 schedule();
1071 remove_wait_queue(&chip->wq, &wait);
1072 timeo = jiffies + (HZ / 2); /* FIXME */
1073 spin_lock(chip->mutex);
1074 continue;
1075 }
1048 1076
1049#define UDELAY(map, chip, adr, usec) \ 1077 status = map_read(map, cmd_adr);
1050do { \ 1078 if (map_word_andequal(map, status, status_OK, status_OK))
1051 spin_unlock(chip->mutex); \ 1079 break;
1052 cfi_udelay(usec); \ 1080
1053 spin_lock(chip->mutex); \ 1081 /* OK Still waiting */
1054} while (0) 1082 if (time_after(jiffies, timeo)) {
1055 1083 map_write(map, CMD(0x70), cmd_adr);
1056#define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \ 1084 chip->state = FL_STATUS;
1057do { \ 1085 return -ETIME;
1058 spin_unlock(chip->mutex); \ 1086 }
1059 INVALIDATE_CACHED_RANGE(map, adr, len); \ 1087
1060 cfi_udelay(usec); \ 1088 /* Latency issues. Drop the lock, wait a while and retry */
1061 spin_lock(chip->mutex); \ 1089 z++;
1062} while (0) 1090 spin_unlock(chip->mutex);
1091 cfi_udelay(1);
1092 spin_lock(chip->mutex);
1093 }
1094
1095 if (!z) {
1096 if (!--(*chip_op_time))
1097 *chip_op_time = 1;
1098 } else if (z > 1)
1099 ++(*chip_op_time);
1100
1101 /* Done and happy. */
1102 chip->state = FL_STATUS;
1103 return 0;
1104}
1063 1105
1064#endif 1106#endif
1065 1107
1108#define WAIT_TIMEOUT(map, chip, adr, udelay) \
1109 ({ int __udelay = (udelay); \
1110 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, &__udelay); })
1111
1112
1066static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len) 1113static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1067{ 1114{
1068 unsigned long cmd_addr; 1115 unsigned long cmd_addr;
@@ -1252,14 +1299,11 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1252 unsigned long adr, map_word datum, int mode) 1299 unsigned long adr, map_word datum, int mode)
1253{ 1300{
1254 struct cfi_private *cfi = map->fldrv_priv; 1301 struct cfi_private *cfi = map->fldrv_priv;
1255 map_word status, status_OK, write_cmd; 1302 map_word status, write_cmd;
1256 unsigned long timeo; 1303 int ret=0;
1257 int z, ret=0;
1258 1304
1259 adr += chip->start; 1305 adr += chip->start;
1260 1306
1261 /* Let's determine those according to the interleave only once */
1262 status_OK = CMD(0x80);
1263 switch (mode) { 1307 switch (mode) {
1264 case FL_WRITING: 1308 case FL_WRITING:
1265 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41); 1309 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
@@ -1285,57 +1329,17 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1285 map_write(map, datum, adr); 1329 map_write(map, datum, adr);
1286 chip->state = mode; 1330 chip->state = mode;
1287 1331
1288 INVALIDATE_CACHE_UDELAY(map, chip, adr, 1332 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1289 adr, map_bankwidth(map), 1333 adr, map_bankwidth(map),
1290 chip->word_write_time); 1334 &chip->word_write_time);
1291 1335 if (ret) {
1292 timeo = jiffies + (HZ/2); 1336 xip_enable(map, chip, adr);
1293 z = 0; 1337 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1294 for (;;) { 1338 goto out;
1295 if (chip->state != mode) {
1296 /* Someone's suspended the write. Sleep */
1297 DECLARE_WAITQUEUE(wait, current);
1298
1299 set_current_state(TASK_UNINTERRUPTIBLE);
1300 add_wait_queue(&chip->wq, &wait);
1301 spin_unlock(chip->mutex);
1302 schedule();
1303 remove_wait_queue(&chip->wq, &wait);
1304 timeo = jiffies + (HZ / 2); /* FIXME */
1305 spin_lock(chip->mutex);
1306 continue;
1307 }
1308
1309 status = map_read(map, adr);
1310 if (map_word_andequal(map, status, status_OK, status_OK))
1311 break;
1312
1313 /* OK Still waiting */
1314 if (time_after(jiffies, timeo)) {
1315 map_write(map, CMD(0x70), adr);
1316 chip->state = FL_STATUS;
1317 xip_enable(map, chip, adr);
1318 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1319 ret = -EIO;
1320 goto out;
1321 }
1322
1323 /* Latency issues. Drop the lock, wait a while and retry */
1324 z++;
1325 UDELAY(map, chip, adr, 1);
1326 }
1327 if (!z) {
1328 chip->word_write_time--;
1329 if (!chip->word_write_time)
1330 chip->word_write_time = 1;
1331 } 1339 }
1332 if (z > 1)
1333 chip->word_write_time++;
1334
1335 /* Done and happy. */
1336 chip->state = FL_STATUS;
1337 1340
1338 /* check for errors */ 1341 /* check for errors */
1342 status = map_read(map, adr);
1339 if (map_word_bitsset(map, status, CMD(0x1a))) { 1343 if (map_word_bitsset(map, status, CMD(0x1a))) {
1340 unsigned long chipstatus = MERGESTATUS(status); 1344 unsigned long chipstatus = MERGESTATUS(status);
1341 1345
@@ -1452,9 +1456,9 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1452 unsigned long *pvec_seek, int len) 1456 unsigned long *pvec_seek, int len)
1453{ 1457{
1454 struct cfi_private *cfi = map->fldrv_priv; 1458 struct cfi_private *cfi = map->fldrv_priv;
1455 map_word status, status_OK, write_cmd, datum; 1459 map_word status, write_cmd, datum;
1456 unsigned long cmd_adr, timeo; 1460 unsigned long cmd_adr;
1457 int wbufsize, z, ret=0, word_gap, words; 1461 int ret, wbufsize, word_gap, words;
1458 const struct kvec *vec; 1462 const struct kvec *vec;
1459 unsigned long vec_seek; 1463 unsigned long vec_seek;
1460 1464
@@ -1463,7 +1467,6 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1463 cmd_adr = adr & ~(wbufsize-1); 1467 cmd_adr = adr & ~(wbufsize-1);
1464 1468
1465 /* Let's determine this according to the interleave only once */ 1469 /* Let's determine this according to the interleave only once */
1466 status_OK = CMD(0x80);
1467 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9); 1470 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1468 1471
1469 spin_lock(chip->mutex); 1472 spin_lock(chip->mutex);
@@ -1477,12 +1480,14 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1477 ENABLE_VPP(map); 1480 ENABLE_VPP(map);
1478 xip_disable(map, chip, cmd_adr); 1481 xip_disable(map, chip, cmd_adr);
1479 1482
1480 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set 1483 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1481 [...], the device will not accept any more Write to Buffer commands". 1484 [...], the device will not accept any more Write to Buffer commands".
1482 So we must check here and reset those bits if they're set. Otherwise 1485 So we must check here and reset those bits if they're set. Otherwise
1483 we're just pissing in the wind */ 1486 we're just pissing in the wind */
1484 if (chip->state != FL_STATUS) 1487 if (chip->state != FL_STATUS) {
1485 map_write(map, CMD(0x70), cmd_adr); 1488 map_write(map, CMD(0x70), cmd_adr);
1489 chip->state = FL_STATUS;
1490 }
1486 status = map_read(map, cmd_adr); 1491 status = map_read(map, cmd_adr);
1487 if (map_word_bitsset(map, status, CMD(0x30))) { 1492 if (map_word_bitsset(map, status, CMD(0x30))) {
1488 xip_enable(map, chip, cmd_adr); 1493 xip_enable(map, chip, cmd_adr);
@@ -1493,32 +1498,20 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1493 } 1498 }
1494 1499
1495 chip->state = FL_WRITING_TO_BUFFER; 1500 chip->state = FL_WRITING_TO_BUFFER;
1496 1501 map_write(map, write_cmd, cmd_adr);
1497 z = 0; 1502 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1498 for (;;) { 1503 if (ret) {
1499 map_write(map, write_cmd, cmd_adr); 1504 /* Argh. Not ready for write to buffer */
1500 1505 map_word Xstatus = map_read(map, cmd_adr);
1506 map_write(map, CMD(0x70), cmd_adr);
1507 chip->state = FL_STATUS;
1501 status = map_read(map, cmd_adr); 1508 status = map_read(map, cmd_adr);
1502 if (map_word_andequal(map, status, status_OK, status_OK)) 1509 map_write(map, CMD(0x50), cmd_adr);
1503 break; 1510 map_write(map, CMD(0x70), cmd_adr);
1504 1511 xip_enable(map, chip, cmd_adr);
1505 UDELAY(map, chip, cmd_adr, 1); 1512 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1506 1513 map->name, Xstatus.x[0], status.x[0]);
1507 if (++z > 20) { 1514 goto out;
1508 /* Argh. Not ready for write to buffer */
1509 map_word Xstatus;
1510 map_write(map, CMD(0x70), cmd_adr);
1511 chip->state = FL_STATUS;
1512 Xstatus = map_read(map, cmd_adr);
1513 /* Odd. Clear status bits */
1514 map_write(map, CMD(0x50), cmd_adr);
1515 map_write(map, CMD(0x70), cmd_adr);
1516 xip_enable(map, chip, cmd_adr);
1517 printk(KERN_ERR "%s: Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1518 map->name, status.x[0], Xstatus.x[0]);
1519 ret = -EIO;
1520 goto out;
1521 }
1522 } 1515 }
1523 1516
1524 /* Figure out the number of words to write */ 1517 /* Figure out the number of words to write */
@@ -1573,56 +1566,19 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1573 map_write(map, CMD(0xd0), cmd_adr); 1566 map_write(map, CMD(0xd0), cmd_adr);
1574 chip->state = FL_WRITING; 1567 chip->state = FL_WRITING;
1575 1568
1576 INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, 1569 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1577 adr, len, 1570 adr, len,
1578 chip->buffer_write_time); 1571 &chip->buffer_write_time);
1579 1572 if (ret) {
1580 timeo = jiffies + (HZ/2); 1573 map_write(map, CMD(0x70), cmd_adr);
1581 z = 0; 1574 chip->state = FL_STATUS;
1582 for (;;) { 1575 xip_enable(map, chip, cmd_adr);
1583 if (chip->state != FL_WRITING) { 1576 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1584 /* Someone's suspended the write. Sleep */ 1577 goto out;
1585 DECLARE_WAITQUEUE(wait, current);
1586 set_current_state(TASK_UNINTERRUPTIBLE);
1587 add_wait_queue(&chip->wq, &wait);
1588 spin_unlock(chip->mutex);
1589 schedule();
1590 remove_wait_queue(&chip->wq, &wait);
1591 timeo = jiffies + (HZ / 2); /* FIXME */
1592 spin_lock(chip->mutex);
1593 continue;
1594 }
1595
1596 status = map_read(map, cmd_adr);
1597 if (map_word_andequal(map, status, status_OK, status_OK))
1598 break;
1599
1600 /* OK Still waiting */
1601 if (time_after(jiffies, timeo)) {
1602 map_write(map, CMD(0x70), cmd_adr);
1603 chip->state = FL_STATUS;
1604 xip_enable(map, chip, cmd_adr);
1605 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1606 ret = -EIO;
1607 goto out;
1608 }
1609
1610 /* Latency issues. Drop the lock, wait a while and retry */
1611 z++;
1612 UDELAY(map, chip, cmd_adr, 1);
1613 }
1614 if (!z) {
1615 chip->buffer_write_time--;
1616 if (!chip->buffer_write_time)
1617 chip->buffer_write_time = 1;
1618 } 1578 }
1619 if (z > 1)
1620 chip->buffer_write_time++;
1621
1622 /* Done and happy. */
1623 chip->state = FL_STATUS;
1624 1579
1625 /* check for errors */ 1580 /* check for errors */
1581 status = map_read(map, cmd_adr);
1626 if (map_word_bitsset(map, status, CMD(0x1a))) { 1582 if (map_word_bitsset(map, status, CMD(0x1a))) {
1627 unsigned long chipstatus = MERGESTATUS(status); 1583 unsigned long chipstatus = MERGESTATUS(status);
1628 1584
@@ -1693,6 +1649,11 @@ static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1693 if (chipnum == cfi->numchips) 1649 if (chipnum == cfi->numchips)
1694 return 0; 1650 return 0;
1695 } 1651 }
1652
1653 /* Be nice and reschedule with the chip in a usable state for other
1654 processes. */
1655 cond_resched();
1656
1696 } while (len); 1657 } while (len);
1697 1658
1698 return 0; 1659 return 0;
@@ -1713,17 +1674,12 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1713 unsigned long adr, int len, void *thunk) 1674 unsigned long adr, int len, void *thunk)
1714{ 1675{
1715 struct cfi_private *cfi = map->fldrv_priv; 1676 struct cfi_private *cfi = map->fldrv_priv;
1716 map_word status, status_OK; 1677 map_word status;
1717 unsigned long timeo;
1718 int retries = 3; 1678 int retries = 3;
1719 DECLARE_WAITQUEUE(wait, current); 1679 int ret;
1720 int ret = 0;
1721 1680
1722 adr += chip->start; 1681 adr += chip->start;
1723 1682
1724 /* Let's determine this according to the interleave only once */
1725 status_OK = CMD(0x80);
1726
1727 retry: 1683 retry:
1728 spin_lock(chip->mutex); 1684 spin_lock(chip->mutex);
1729 ret = get_chip(map, chip, adr, FL_ERASING); 1685 ret = get_chip(map, chip, adr, FL_ERASING);
@@ -1745,48 +1701,15 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1745 chip->state = FL_ERASING; 1701 chip->state = FL_ERASING;
1746 chip->erase_suspended = 0; 1702 chip->erase_suspended = 0;
1747 1703
1748 INVALIDATE_CACHE_UDELAY(map, chip, adr, 1704 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1749 adr, len, 1705 adr, len,
1750 chip->erase_time*1000/2); 1706 &chip->erase_time);
1751 1707 if (ret) {
1752 /* FIXME. Use a timer to check this, and return immediately. */ 1708 map_write(map, CMD(0x70), adr);
1753 /* Once the state machine's known to be working I'll do that */ 1709 chip->state = FL_STATUS;
1754 1710 xip_enable(map, chip, adr);
1755 timeo = jiffies + (HZ*20); 1711 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1756 for (;;) { 1712 goto out;
1757 if (chip->state != FL_ERASING) {
1758 /* Someone's suspended the erase. Sleep */
1759 set_current_state(TASK_UNINTERRUPTIBLE);
1760 add_wait_queue(&chip->wq, &wait);
1761 spin_unlock(chip->mutex);
1762 schedule();
1763 remove_wait_queue(&chip->wq, &wait);
1764 spin_lock(chip->mutex);
1765 continue;
1766 }
1767 if (chip->erase_suspended) {
1768 /* This erase was suspended and resumed.
1769 Adjust the timeout */
1770 timeo = jiffies + (HZ*20); /* FIXME */
1771 chip->erase_suspended = 0;
1772 }
1773
1774 status = map_read(map, adr);
1775 if (map_word_andequal(map, status, status_OK, status_OK))
1776 break;
1777
1778 /* OK Still waiting */
1779 if (time_after(jiffies, timeo)) {
1780 map_write(map, CMD(0x70), adr);
1781 chip->state = FL_STATUS;
1782 xip_enable(map, chip, adr);
1783 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1784 ret = -EIO;
1785 goto out;
1786 }
1787
1788 /* Latency issues. Drop the lock, wait a while and retry */
1789 UDELAY(map, chip, adr, 1000000/HZ);
1790 } 1713 }
1791 1714
1792 /* We've broken this before. It doesn't hurt to be safe */ 1715 /* We've broken this before. It doesn't hurt to be safe */
@@ -1815,7 +1738,6 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1815 ret = -EIO; 1738 ret = -EIO;
1816 } else if (chipstatus & 0x20 && retries--) { 1739 } else if (chipstatus & 0x20 && retries--) {
1817 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); 1740 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1818 timeo = jiffies + HZ;
1819 put_chip(map, chip, adr); 1741 put_chip(map, chip, adr);
1820 spin_unlock(chip->mutex); 1742 spin_unlock(chip->mutex);
1821 goto retry; 1743 goto retry;
@@ -1921,15 +1843,11 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
1921{ 1843{
1922 struct cfi_private *cfi = map->fldrv_priv; 1844 struct cfi_private *cfi = map->fldrv_priv;
1923 struct cfi_pri_intelext *extp = cfi->cmdset_priv; 1845 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1924 map_word status, status_OK; 1846 int udelay;
1925 unsigned long timeo = jiffies + HZ;
1926 int ret; 1847 int ret;
1927 1848
1928 adr += chip->start; 1849 adr += chip->start;
1929 1850
1930 /* Let's determine this according to the interleave only once */
1931 status_OK = CMD(0x80);
1932
1933 spin_lock(chip->mutex); 1851 spin_lock(chip->mutex);
1934 ret = get_chip(map, chip, adr, FL_LOCKING); 1852 ret = get_chip(map, chip, adr, FL_LOCKING);
1935 if (ret) { 1853 if (ret) {
@@ -1954,41 +1872,21 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
1954 * If Instant Individual Block Locking supported then no need 1872 * If Instant Individual Block Locking supported then no need
1955 * to delay. 1873 * to delay.
1956 */ 1874 */
1875 udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1957 1876
1958 if (!extp || !(extp->FeatureSupport & (1 << 5))) 1877 ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1959 UDELAY(map, chip, adr, 1000000/HZ); 1878 if (ret) {
1960 1879 map_write(map, CMD(0x70), adr);
1961 /* FIXME. Use a timer to check this, and return immediately. */ 1880 chip->state = FL_STATUS;
1962 /* Once the state machine's known to be working I'll do that */ 1881 xip_enable(map, chip, adr);
1963 1882 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1964 timeo = jiffies + (HZ*20); 1883 goto out;
1965 for (;;) {
1966
1967 status = map_read(map, adr);
1968 if (map_word_andequal(map, status, status_OK, status_OK))
1969 break;
1970
1971 /* OK Still waiting */
1972 if (time_after(jiffies, timeo)) {
1973 map_write(map, CMD(0x70), adr);
1974 chip->state = FL_STATUS;
1975 xip_enable(map, chip, adr);
1976 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1977 put_chip(map, chip, adr);
1978 spin_unlock(chip->mutex);
1979 return -EIO;
1980 }
1981
1982 /* Latency issues. Drop the lock, wait a while and retry */
1983 UDELAY(map, chip, adr, 1);
1984 } 1884 }
1985 1885
1986 /* Done and happy. */
1987 chip->state = FL_STATUS;
1988 xip_enable(map, chip, adr); 1886 xip_enable(map, chip, adr);
1989 put_chip(map, chip, adr); 1887out: put_chip(map, chip, adr);
1990 spin_unlock(chip->mutex); 1888 spin_unlock(chip->mutex);
1991 return 0; 1889 return ret;
1992} 1890}
1993 1891
1994static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len) 1892static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
@@ -2445,28 +2343,8 @@ static void cfi_intelext_destroy(struct mtd_info *mtd)
2445 kfree(mtd->eraseregions); 2343 kfree(mtd->eraseregions);
2446} 2344}
2447 2345
2448static char im_name_0001[] = "cfi_cmdset_0001";
2449static char im_name_0003[] = "cfi_cmdset_0003";
2450static char im_name_0200[] = "cfi_cmdset_0200";
2451
2452static int __init cfi_intelext_init(void)
2453{
2454 inter_module_register(im_name_0001, THIS_MODULE, &cfi_cmdset_0001);
2455 inter_module_register(im_name_0003, THIS_MODULE, &cfi_cmdset_0001);
2456 inter_module_register(im_name_0200, THIS_MODULE, &cfi_cmdset_0001);
2457 return 0;
2458}
2459
2460static void __exit cfi_intelext_exit(void)
2461{
2462 inter_module_unregister(im_name_0001);
2463 inter_module_unregister(im_name_0003);
2464 inter_module_unregister(im_name_0200);
2465}
2466
2467module_init(cfi_intelext_init);
2468module_exit(cfi_intelext_exit);
2469
2470MODULE_LICENSE("GPL"); 2346MODULE_LICENSE("GPL");
2471MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al."); 2347MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2472MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips"); 2348MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2349MODULE_ALIAS("cfi_cmdset_0003");
2350MODULE_ALIAS("cfi_cmdset_0200");
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index aed10bd5c3c3..1e01ad38b26e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -236,6 +236,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
236 mtd->resume = cfi_amdstd_resume; 236 mtd->resume = cfi_amdstd_resume;
237 mtd->flags = MTD_CAP_NORFLASH; 237 mtd->flags = MTD_CAP_NORFLASH;
238 mtd->name = map->name; 238 mtd->name = map->name;
239 mtd->writesize = 1;
239 240
240 if (cfi->cfi_mode==CFI_MODE_CFI){ 241 if (cfi->cfi_mode==CFI_MODE_CFI){
241 unsigned char bootloc; 242 unsigned char bootloc;
@@ -326,7 +327,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
326 327
327 return cfi_amdstd_setup(mtd); 328 return cfi_amdstd_setup(mtd);
328} 329}
329 330EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
330 331
331static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) 332static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
332{ 333{
@@ -1758,25 +1759,6 @@ static void cfi_amdstd_destroy(struct mtd_info *mtd)
1758 kfree(mtd->eraseregions); 1759 kfree(mtd->eraseregions);
1759} 1760}
1760 1761
1761static char im_name[]="cfi_cmdset_0002";
1762
1763
1764static int __init cfi_amdstd_init(void)
1765{
1766 inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002);
1767 return 0;
1768}
1769
1770
1771static void __exit cfi_amdstd_exit(void)
1772{
1773 inter_module_unregister(im_name);
1774}
1775
1776
1777module_init(cfi_amdstd_init);
1778module_exit(cfi_amdstd_exit);
1779
1780MODULE_LICENSE("GPL"); 1762MODULE_LICENSE("GPL");
1781MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); 1763MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
1782MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); 1764MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 0807c1c91e55..fae70a5db540 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -162,6 +162,7 @@ struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
162 162
163 return cfi_staa_setup(map); 163 return cfi_staa_setup(map);
164} 164}
165EXPORT_SYMBOL_GPL(cfi_cmdset_0020);
165 166
166static struct mtd_info *cfi_staa_setup(struct map_info *map) 167static struct mtd_info *cfi_staa_setup(struct map_info *map)
167{ 168{
@@ -237,9 +238,8 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map)
237 mtd->unlock = cfi_staa_unlock; 238 mtd->unlock = cfi_staa_unlock;
238 mtd->suspend = cfi_staa_suspend; 239 mtd->suspend = cfi_staa_suspend;
239 mtd->resume = cfi_staa_resume; 240 mtd->resume = cfi_staa_resume;
240 mtd->flags = MTD_CAP_NORFLASH; 241 mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
241 mtd->flags |= MTD_ECC; /* FIXME: Not all STMicro flashes have this */ 242 mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
242 mtd->eccsize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
243 map->fldrv = &cfi_staa_chipdrv; 243 map->fldrv = &cfi_staa_chipdrv;
244 __module_get(THIS_MODULE); 244 __module_get(THIS_MODULE);
245 mtd->name = map->name; 245 mtd->name = map->name;
@@ -1410,20 +1410,4 @@ static void cfi_staa_destroy(struct mtd_info *mtd)
1410 kfree(cfi); 1410 kfree(cfi);
1411} 1411}
1412 1412
1413static char im_name[]="cfi_cmdset_0020";
1414
1415static int __init cfi_staa_init(void)
1416{
1417 inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0020);
1418 return 0;
1419}
1420
1421static void __exit cfi_staa_exit(void)
1422{
1423 inter_module_unregister(im_name);
1424}
1425
1426module_init(cfi_staa_init);
1427module_exit(cfi_staa_exit);
1428
1429MODULE_LICENSE("GPL"); 1413MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index e636aa86bc24..4bf9f8cac0dd 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -349,12 +349,12 @@ static void print_cfi_ident(struct cfi_ident *cfip)
349 else 349 else
350 printk("No Vpp line\n"); 350 printk("No Vpp line\n");
351 351
352 printk("Typical byte/word write timeout: %d µs\n", 1<<cfip->WordWriteTimeoutTyp); 352 printk("Typical byte/word write timeout: %d µs\n", 1<<cfip->WordWriteTimeoutTyp);
353 printk("Maximum byte/word write timeout: %d µs\n", (1<<cfip->WordWriteTimeoutMax) * (1<<cfip->WordWriteTimeoutTyp)); 353 printk("Maximum byte/word write timeout: %d µs\n", (1<<cfip->WordWriteTimeoutMax) * (1<<cfip->WordWriteTimeoutTyp));
354 354
355 if (cfip->BufWriteTimeoutTyp || cfip->BufWriteTimeoutMax) { 355 if (cfip->BufWriteTimeoutTyp || cfip->BufWriteTimeoutMax) {
356 printk("Typical full buffer write timeout: %d µs\n", 1<<cfip->BufWriteTimeoutTyp); 356 printk("Typical full buffer write timeout: %d µs\n", 1<<cfip->BufWriteTimeoutTyp);
357 printk("Maximum full buffer write timeout: %d µs\n", (1<<cfip->BufWriteTimeoutMax) * (1<<cfip->BufWriteTimeoutTyp)); 357 printk("Maximum full buffer write timeout: %d µs\n", (1<<cfip->BufWriteTimeoutMax) * (1<<cfip->BufWriteTimeoutTyp));
358 } 358 }
359 else 359 else
360 printk("Full buffer write not supported\n"); 360 printk("Full buffer write not supported\n");
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c
index 41bd59d20d85..cdb0f590b40c 100644
--- a/drivers/mtd/chips/gen_probe.c
+++ b/drivers/mtd/chips/gen_probe.c
@@ -37,8 +37,15 @@ struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp)
37 if (!mtd) 37 if (!mtd)
38 mtd = check_cmd_set(map, 0); /* Then the secondary */ 38 mtd = check_cmd_set(map, 0); /* Then the secondary */
39 39
40 if (mtd) 40 if (mtd) {
41 if (mtd->size > map->size) {
42 printk(KERN_WARNING "Reducing visibility of %ldKiB chip to %ldKiB\n",
43 (unsigned long)mtd->size >> 10,
44 (unsigned long)map->size >> 10);
45 mtd->size = map->size;
46 }
41 return mtd; 47 return mtd;
48 }
42 49
43 printk(KERN_WARNING"gen_probe: No supported Vendor Command Set found\n"); 50 printk(KERN_WARNING"gen_probe: No supported Vendor Command Set found\n");
44 51
@@ -100,7 +107,12 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi
100 * Align bitmap storage size to full byte. 107 * Align bitmap storage size to full byte.
101 */ 108 */
102 max_chips = map->size >> cfi.chipshift; 109 max_chips = map->size >> cfi.chipshift;
103 mapsize = (max_chips / 8) + ((max_chips % 8) ? 1 : 0); 110 if (!max_chips) {
111 printk(KERN_WARNING "NOR chip too large to fit in mapping. Attempting to cope...\n");
112 max_chips = 1;
113 }
114
115 mapsize = (max_chips + BITS_PER_LONG-1) / BITS_PER_LONG;
104 chip_map = kmalloc(mapsize, GFP_KERNEL); 116 chip_map = kmalloc(mapsize, GFP_KERNEL);
105 if (!chip_map) { 117 if (!chip_map) {
106 printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name); 118 printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name);
@@ -194,25 +206,28 @@ static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map,
194{ 206{
195 struct cfi_private *cfi = map->fldrv_priv; 207 struct cfi_private *cfi = map->fldrv_priv;
196 __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID; 208 __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID;
197#if defined(CONFIG_MODULES) && defined(HAVE_INTER_MODULE) 209#ifdef CONFIG_MODULES
198 char probename[32]; 210 char probename[16+sizeof(MODULE_SYMBOL_PREFIX)];
199 cfi_cmdset_fn_t *probe_function; 211 cfi_cmdset_fn_t *probe_function;
200 212
201 sprintf(probename, "cfi_cmdset_%4.4X", type); 213 sprintf(probename, MODULE_SYMBOL_PREFIX "cfi_cmdset_%4.4X", type);
202 214
203 probe_function = inter_module_get_request(probename, probename); 215 probe_function = __symbol_get(probename);
216 if (!probe_function) {
217 request_module(probename + sizeof(MODULE_SYMBOL_PREFIX) - 1);
218 probe_function = __symbol_get(probename);
219 }
204 220
205 if (probe_function) { 221 if (probe_function) {
206 struct mtd_info *mtd; 222 struct mtd_info *mtd;
207 223
208 mtd = (*probe_function)(map, primary); 224 mtd = (*probe_function)(map, primary);
209 /* If it was happy, it'll have increased its own use count */ 225 /* If it was happy, it'll have increased its own use count */
210 inter_module_put(probename); 226 symbol_put_addr(probe_function);
211 return mtd; 227 return mtd;
212 } 228 }
213#endif 229#endif
214 printk(KERN_NOTICE "Support for command set %04X not present\n", 230 printk(KERN_NOTICE "Support for command set %04X not present\n", type);
215 type);
216 231
217 return NULL; 232 return NULL;
218} 233}
@@ -226,12 +241,8 @@ static struct mtd_info *check_cmd_set(struct map_info *map, int primary)
226 return NULL; 241 return NULL;
227 242
228 switch(type){ 243 switch(type){
229 /* Urgh. Ifdefs. The version with weak symbols was 244 /* We need these for the !CONFIG_MODULES case,
230 * _much_ nicer. Shame it didn't seem to work on 245 because symbol_get() doesn't work there */
231 * anything but x86, really.
232 * But we can't rely in inter_module_get() because
233 * that'd mean we depend on link order.
234 */
235#ifdef CONFIG_MTD_CFI_INTELEXT 246#ifdef CONFIG_MTD_CFI_INTELEXT
236 case 0x0001: 247 case 0x0001:
237 case 0x0003: 248 case 0x0003:
@@ -246,9 +257,9 @@ static struct mtd_info *check_cmd_set(struct map_info *map, int primary)
246 case 0x0020: 257 case 0x0020:
247 return cfi_cmdset_0020(map, primary); 258 return cfi_cmdset_0020(map, primary);
248#endif 259#endif
260 default:
261 return cfi_cmdset_unknown(map, primary);
249 } 262 }
250
251 return cfi_cmdset_unknown(map, primary);
252} 263}
253 264
254MODULE_LICENSE("GPL"); 265MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index bd2e876a814b..763925747db6 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -70,7 +70,7 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
70 mtd->read = mapram_read; 70 mtd->read = mapram_read;
71 mtd->write = mapram_write; 71 mtd->write = mapram_write;
72 mtd->sync = mapram_nop; 72 mtd->sync = mapram_nop;
73 mtd->flags = MTD_CAP_RAM | MTD_VOLATILE; 73 mtd->flags = MTD_CAP_RAM;
74 74
75 mtd->erasesize = PAGE_SIZE; 75 mtd->erasesize = PAGE_SIZE;
76 while(mtd->size & (mtd->erasesize - 1)) 76 while(mtd->size & (mtd->erasesize - 1))
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
index 624c12c232c8..bc6ee9ef8a31 100644
--- a/drivers/mtd/chips/map_rom.c
+++ b/drivers/mtd/chips/map_rom.c
@@ -46,9 +46,7 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
46 mtd->write = maprom_write; 46 mtd->write = maprom_write;
47 mtd->sync = maprom_nop; 47 mtd->sync = maprom_nop;
48 mtd->flags = MTD_CAP_ROM; 48 mtd->flags = MTD_CAP_ROM;
49 mtd->erasesize = 131072; 49 mtd->erasesize = map->size;
50 while(mtd->size & (mtd->erasesize - 1))
51 mtd->erasesize >>= 1;
52 50
53 __module_get(THIS_MODULE); 51 __module_get(THIS_MODULE);
54 return mtd; 52 return mtd;
diff --git a/drivers/mtd/chips/sharp.c b/drivers/mtd/chips/sharp.c
index 3cc0b23c5865..967abbecdff9 100644
--- a/drivers/mtd/chips/sharp.c
+++ b/drivers/mtd/chips/sharp.c
@@ -140,6 +140,7 @@ static struct mtd_info *sharp_probe(struct map_info *map)
140 mtd->suspend = sharp_suspend; 140 mtd->suspend = sharp_suspend;
141 mtd->resume = sharp_resume; 141 mtd->resume = sharp_resume;
142 mtd->flags = MTD_CAP_NORFLASH; 142 mtd->flags = MTD_CAP_NORFLASH;
143 mtd->writesize = 1;
143 mtd->name = map->name; 144 mtd->name = map->name;
144 145
145 memset(sharp, 0, sizeof(*sharp)); 146 memset(sharp, 0, sizeof(*sharp));
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 7fac438b5c32..16c02b5ccf7e 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -47,6 +47,11 @@ config MTD_MS02NV
47 accelerator. Say Y here if you have a DECstation 5000/2x0 or a 47 accelerator. Say Y here if you have a DECstation 5000/2x0 or a
48 DECsystem 5900 equipped with such a module. 48 DECsystem 5900 equipped with such a module.
49 49
50 If you want to compile this driver as a module ( = code which can be
51 inserted in and removed from the running kernel whenever you want),
52 say M here and read <file:Documentation/modules.txt>. The module will
53 be called ms02-nv.o.
54
50config MTD_DATAFLASH 55config MTD_DATAFLASH
51 tristate "Support for AT45xxx DataFlash" 56 tristate "Support for AT45xxx DataFlash"
52 depends on MTD && SPI_MASTER && EXPERIMENTAL 57 depends on MTD && SPI_MASTER && EXPERIMENTAL
@@ -209,7 +214,6 @@ config MTD_DOC2001PLUS
209config MTD_DOCPROBE 214config MTD_DOCPROBE
210 tristate 215 tristate
211 select MTD_DOCECC 216 select MTD_DOCECC
212 select OBSOLETE_INTERMODULE
213 217
214config MTD_DOCECC 218config MTD_DOCECC
215 tristate 219 tristate
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index b6573670316f..0f788d5c4bf8 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -3,13 +3,6 @@
3# 3#
4# $Id: Makefile.common,v 1.7 2004/12/22 17:51:15 joern Exp $ 4# $Id: Makefile.common,v 1.7 2004/12/22 17:51:15 joern Exp $
5 5
6# *** BIG UGLY NOTE ***
7#
8# The removal of get_module_symbol() and replacement with
9# inter_module_register() et al has introduced a link order dependency
10# here where previously there was none. We now have to ensure that
11# doc200[01].o are linked before docprobe.o
12
13obj-$(CONFIG_MTD_DOC2000) += doc2000.o 6obj-$(CONFIG_MTD_DOC2000) += doc2000.o
14obj-$(CONFIG_MTD_DOC2001) += doc2001.o 7obj-$(CONFIG_MTD_DOC2001) += doc2001.o
15obj-$(CONFIG_MTD_DOC2001PLUS) += doc2001plus.o 8obj-$(CONFIG_MTD_DOC2001PLUS) += doc2001plus.o
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 4160b8334c53..0d98c223c5fc 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -4,7 +4,7 @@
4 * block2mtd.c - create an mtd from a block device 4 * block2mtd.c - create an mtd from a block device
5 * 5 *
6 * Copyright (C) 2001,2002 Simon Evans <spse@secret.org.uk> 6 * Copyright (C) 2001,2002 Simon Evans <spse@secret.org.uk>
7 * Copyright (C) 2004,2005 Jörn Engel <joern@wh.fh-wedel.de> 7 * Copyright (C) 2004-2006 Jörn Engel <joern@wh.fh-wedel.de>
8 * 8 *
9 * Licence: GPL 9 * Licence: GPL
10 */ 10 */
@@ -331,7 +331,6 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
331 dev->mtd.writev = default_mtd_writev; 331 dev->mtd.writev = default_mtd_writev;
332 dev->mtd.sync = block2mtd_sync; 332 dev->mtd.sync = block2mtd_sync;
333 dev->mtd.read = block2mtd_read; 333 dev->mtd.read = block2mtd_read;
334 dev->mtd.readv = default_mtd_readv;
335 dev->mtd.priv = dev; 334 dev->mtd.priv = dev;
336 dev->mtd.owner = THIS_MODULE; 335 dev->mtd.owner = THIS_MODULE;
337 336
@@ -351,6 +350,12 @@ devinit_err:
351} 350}
352 351
353 352
353/* This function works similar to reguler strtoul. In addition, it
354 * allows some suffixes for a more human-readable number format:
355 * ki, Ki, kiB, KiB - multiply result with 1024
356 * Mi, MiB - multiply result with 1024^2
357 * Gi, GiB - multiply result with 1024^3
358 */
354static int ustrtoul(const char *cp, char **endp, unsigned int base) 359static int ustrtoul(const char *cp, char **endp, unsigned int base)
355{ 360{
356 unsigned long result = simple_strtoul(cp, endp, base); 361 unsigned long result = simple_strtoul(cp, endp, base);
@@ -359,11 +364,16 @@ static int ustrtoul(const char *cp, char **endp, unsigned int base)
359 result *= 1024; 364 result *= 1024;
360 case 'M': 365 case 'M':
361 result *= 1024; 366 result *= 1024;
367 case 'K':
362 case 'k': 368 case 'k':
363 result *= 1024; 369 result *= 1024;
364 /* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */ 370 /* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
365 if ((*endp)[1] == 'i') 371 if ((*endp)[1] == 'i') {
366 (*endp) += 2; 372 if ((*endp)[2] == 'B')
373 (*endp) += 3;
374 else
375 (*endp) += 2;
376 }
367 } 377 }
368 return result; 378 return result;
369} 379}
@@ -418,7 +428,8 @@ static inline void kill_final_newline(char *str)
418 428
419static int block2mtd_setup(const char *val, struct kernel_param *kp) 429static int block2mtd_setup(const char *val, struct kernel_param *kp)
420{ 430{
421 char buf[80+12], *str=buf; /* 80 for device, 12 for erase size */ 431 char buf[80+12]; /* 80 for device, 12 for erase size */
432 char *str = buf;
422 char *token[2]; 433 char *token[2];
423 char *name; 434 char *name;
424 size_t erase_size = PAGE_SIZE; 435 size_t erase_size = PAGE_SIZE;
@@ -430,7 +441,7 @@ static int block2mtd_setup(const char *val, struct kernel_param *kp)
430 strcpy(str, val); 441 strcpy(str, val);
431 kill_final_newline(str); 442 kill_final_newline(str);
432 443
433 for (i=0; i<2; i++) 444 for (i = 0; i < 2; i++)
434 token[i] = strsep(&str, ","); 445 token[i] = strsep(&str, ",");
435 446
436 if (str) 447 if (str)
@@ -449,8 +460,10 @@ static int block2mtd_setup(const char *val, struct kernel_param *kp)
449 460
450 if (token[1]) { 461 if (token[1]) {
451 ret = parse_num(&erase_size, token[1]); 462 ret = parse_num(&erase_size, token[1]);
452 if (ret) 463 if (ret) {
464 kfree(name);
453 parse_err("illegal erase size"); 465 parse_err("illegal erase size");
466 }
454 } 467 }
455 468
456 add_device(name, erase_size); 469 add_device(name, erase_size);
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index 23e7a5c7d2c1..c54e40464d82 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -59,13 +59,10 @@ static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
59 size_t *retlen, u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel); 59 size_t *retlen, u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel);
60static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len, 60static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
61 size_t *retlen, const u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel); 61 size_t *retlen, const u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel);
62static int doc_writev_ecc(struct mtd_info *mtd, const struct kvec *vecs, 62static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
63 unsigned long count, loff_t to, size_t *retlen, 63 struct mtd_oob_ops *ops);
64 u_char *eccbuf, struct nand_oobinfo *oobsel); 64static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
65static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len, 65 struct mtd_oob_ops *ops);
66 size_t *retlen, u_char *buf);
67static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
68 size_t *retlen, const u_char *buf);
69static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len, 66static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len,
70 size_t *retlen, const u_char *buf); 67 size_t *retlen, const u_char *buf);
71static int doc_erase (struct mtd_info *mtd, struct erase_info *instr); 68static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
@@ -517,16 +514,9 @@ static int DoC2k_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2)
517 return retval; 514 return retval;
518} 515}
519 516
520static const char im_name[] = "DoC2k_init"; 517/* This routine is found from the docprobe code by symbol_get(),
521 518 * which will bump the use count of this module. */
522/* This routine is made available to other mtd code via 519void DoC2k_init(struct mtd_info *mtd)
523 * inter_module_register. It must only be accessed through
524 * inter_module_get which will bump the use count of this module. The
525 * addresses passed back in mtd are valid as long as the use count of
526 * this module is non-zero, i.e. between inter_module_get and
527 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
528 */
529static void DoC2k_init(struct mtd_info *mtd)
530{ 520{
531 struct DiskOnChip *this = mtd->priv; 521 struct DiskOnChip *this = mtd->priv;
532 struct DiskOnChip *old = NULL; 522 struct DiskOnChip *old = NULL;
@@ -586,7 +576,7 @@ static void DoC2k_init(struct mtd_info *mtd)
586 mtd->ecctype = MTD_ECC_RS_DiskOnChip; 576 mtd->ecctype = MTD_ECC_RS_DiskOnChip;
587 mtd->size = 0; 577 mtd->size = 0;
588 mtd->erasesize = 0; 578 mtd->erasesize = 0;
589 mtd->oobblock = 512; 579 mtd->writesize = 512;
590 mtd->oobsize = 16; 580 mtd->oobsize = 16;
591 mtd->owner = THIS_MODULE; 581 mtd->owner = THIS_MODULE;
592 mtd->erase = doc_erase; 582 mtd->erase = doc_erase;
@@ -594,9 +584,6 @@ static void DoC2k_init(struct mtd_info *mtd)
594 mtd->unpoint = NULL; 584 mtd->unpoint = NULL;
595 mtd->read = doc_read; 585 mtd->read = doc_read;
596 mtd->write = doc_write; 586 mtd->write = doc_write;
597 mtd->read_ecc = doc_read_ecc;
598 mtd->write_ecc = doc_write_ecc;
599 mtd->writev_ecc = doc_writev_ecc;
600 mtd->read_oob = doc_read_oob; 587 mtd->read_oob = doc_read_oob;
601 mtd->write_oob = doc_write_oob; 588 mtd->write_oob = doc_write_oob;
602 mtd->sync = NULL; 589 mtd->sync = NULL;
@@ -623,6 +610,7 @@ static void DoC2k_init(struct mtd_info *mtd)
623 return; 610 return;
624 } 611 }
625} 612}
613EXPORT_SYMBOL_GPL(DoC2k_init);
626 614
627static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, 615static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
628 size_t * retlen, u_char * buf) 616 size_t * retlen, u_char * buf)
@@ -971,72 +959,18 @@ static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
971 return 0; 959 return 0;
972} 960}
973 961
974static int doc_writev_ecc(struct mtd_info *mtd, const struct kvec *vecs, 962static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
975 unsigned long count, loff_t to, size_t *retlen, 963 struct mtd_oob_ops *ops)
976 u_char *eccbuf, struct nand_oobinfo *oobsel)
977{
978 static char static_buf[512];
979 static DEFINE_MUTEX(writev_buf_mutex);
980
981 size_t totretlen = 0;
982 size_t thisvecofs = 0;
983 int ret= 0;
984
985 mutex_lock(&writev_buf_mutex);
986
987 while(count) {
988 size_t thislen, thisretlen;
989 unsigned char *buf;
990
991 buf = vecs->iov_base + thisvecofs;
992 thislen = vecs->iov_len - thisvecofs;
993
994
995 if (thislen >= 512) {
996 thislen = thislen & ~(512-1);
997 thisvecofs += thislen;
998 } else {
999 /* Not enough to fill a page. Copy into buf */
1000 memcpy(static_buf, buf, thislen);
1001 buf = &static_buf[thislen];
1002
1003 while(count && thislen < 512) {
1004 vecs++;
1005 count--;
1006 thisvecofs = min((512-thislen), vecs->iov_len);
1007 memcpy(buf, vecs->iov_base, thisvecofs);
1008 thislen += thisvecofs;
1009 buf += thisvecofs;
1010 }
1011 buf = static_buf;
1012 }
1013 if (count && thisvecofs == vecs->iov_len) {
1014 thisvecofs = 0;
1015 vecs++;
1016 count--;
1017 }
1018 ret = doc_write_ecc(mtd, to, thislen, &thisretlen, buf, eccbuf, oobsel);
1019
1020 totretlen += thisretlen;
1021
1022 if (ret || thisretlen != thislen)
1023 break;
1024
1025 to += thislen;
1026 }
1027
1028 mutex_unlock(&writev_buf_mutex);
1029 *retlen = totretlen;
1030 return ret;
1031}
1032
1033
1034static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
1035 size_t * retlen, u_char * buf)
1036{ 964{
1037 struct DiskOnChip *this = mtd->priv; 965 struct DiskOnChip *this = mtd->priv;
1038 int len256 = 0, ret; 966 int len256 = 0, ret;
1039 struct Nand *mychip; 967 struct Nand *mychip;
968 uint8_t *buf = ops->oobbuf;
969 size_t len = ops->len;
970
971 BUG_ON(ops->mode != MTD_OOB_PLACE);
972
973 ofs += ops->ooboffs;
1040 974
1041 mutex_lock(&this->lock); 975 mutex_lock(&this->lock);
1042 976
@@ -1077,7 +1011,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
1077 1011
1078 DoC_ReadBuf(this, &buf[len256], len - len256); 1012 DoC_ReadBuf(this, &buf[len256], len - len256);
1079 1013
1080 *retlen = len; 1014 ops->retlen = len;
1081 /* Reading the full OOB data drops us off of the end of the page, 1015 /* Reading the full OOB data drops us off of the end of the page,
1082 * causing the flash device to go into busy mode, so we need 1016 * causing the flash device to go into busy mode, so we need
1083 * to wait until ready 11.4.1 and Toshiba TC58256FT docs */ 1017 * to wait until ready 11.4.1 and Toshiba TC58256FT docs */
@@ -1192,17 +1126,20 @@ static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len,
1192 1126
1193} 1127}
1194 1128
1195static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len, 1129static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
1196 size_t * retlen, const u_char * buf) 1130 struct mtd_oob_ops *ops)
1197{ 1131{
1198 struct DiskOnChip *this = mtd->priv; 1132 struct DiskOnChip *this = mtd->priv;
1199 int ret; 1133 int ret;
1200 1134
1201 mutex_lock(&this->lock); 1135 BUG_ON(ops->mode != MTD_OOB_PLACE);
1202 ret = doc_write_oob_nolock(mtd, ofs, len, retlen, buf); 1136
1137 mutex_lock(&this->lock);
1138 ret = doc_write_oob_nolock(mtd, ofs + ops->ooboffs, ops->len,
1139 &ops->retlen, ops->oobbuf);
1203 1140
1204 mutex_unlock(&this->lock); 1141 mutex_unlock(&this->lock);
1205 return ret; 1142 return ret;
1206} 1143}
1207 1144
1208static int doc_erase(struct mtd_info *mtd, struct erase_info *instr) 1145static int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
@@ -1277,12 +1214,6 @@ static int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
1277 * 1214 *
1278 ****************************************************************************/ 1215 ****************************************************************************/
1279 1216
1280static int __init init_doc2000(void)
1281{
1282 inter_module_register(im_name, THIS_MODULE, &DoC2k_init);
1283 return 0;
1284}
1285
1286static void __exit cleanup_doc2000(void) 1217static void __exit cleanup_doc2000(void)
1287{ 1218{
1288 struct mtd_info *mtd; 1219 struct mtd_info *mtd;
@@ -1298,11 +1229,9 @@ static void __exit cleanup_doc2000(void)
1298 kfree(this->chips); 1229 kfree(this->chips);
1299 kfree(mtd); 1230 kfree(mtd);
1300 } 1231 }
1301 inter_module_unregister(im_name);
1302} 1232}
1303 1233
1304module_exit(cleanup_doc2000); 1234module_exit(cleanup_doc2000);
1305module_init(init_doc2000);
1306 1235
1307MODULE_LICENSE("GPL"); 1236MODULE_LICENSE("GPL");
1308MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al."); 1237MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index 681a9c73a2a3..0cf022a69e65 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -43,10 +43,10 @@ static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
43static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len, 43static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
44 size_t *retlen, const u_char *buf, u_char *eccbuf, 44 size_t *retlen, const u_char *buf, u_char *eccbuf,
45 struct nand_oobinfo *oobsel); 45 struct nand_oobinfo *oobsel);
46static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len, 46static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
47 size_t *retlen, u_char *buf); 47 struct mtd_oob_ops *ops);
48static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len, 48static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
49 size_t *retlen, const u_char *buf); 49 struct mtd_oob_ops *ops);
50static int doc_erase (struct mtd_info *mtd, struct erase_info *instr); 50static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
51 51
52static struct mtd_info *docmillist = NULL; 52static struct mtd_info *docmillist = NULL;
@@ -324,16 +324,9 @@ static int DoCMil_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2)
324 return retval; 324 return retval;
325} 325}
326 326
327static const char im_name[] = "DoCMil_init"; 327/* This routine is found from the docprobe code by symbol_get(),
328 328 * which will bump the use count of this module. */
329/* This routine is made available to other mtd code via 329void DoCMil_init(struct mtd_info *mtd)
330 * inter_module_register. It must only be accessed through
331 * inter_module_get which will bump the use count of this module. The
332 * addresses passed back in mtd are valid as long as the use count of
333 * this module is non-zero, i.e. between inter_module_get and
334 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
335 */
336static void DoCMil_init(struct mtd_info *mtd)
337{ 330{
338 struct DiskOnChip *this = mtd->priv; 331 struct DiskOnChip *this = mtd->priv;
339 struct DiskOnChip *old = NULL; 332 struct DiskOnChip *old = NULL;
@@ -368,7 +361,7 @@ static void DoCMil_init(struct mtd_info *mtd)
368 /* FIXME: erase size is not always 8KiB */ 361 /* FIXME: erase size is not always 8KiB */
369 mtd->erasesize = 0x2000; 362 mtd->erasesize = 0x2000;
370 363
371 mtd->oobblock = 512; 364 mtd->writesize = 512;
372 mtd->oobsize = 16; 365 mtd->oobsize = 16;
373 mtd->owner = THIS_MODULE; 366 mtd->owner = THIS_MODULE;
374 mtd->erase = doc_erase; 367 mtd->erase = doc_erase;
@@ -376,8 +369,6 @@ static void DoCMil_init(struct mtd_info *mtd)
376 mtd->unpoint = NULL; 369 mtd->unpoint = NULL;
377 mtd->read = doc_read; 370 mtd->read = doc_read;
378 mtd->write = doc_write; 371 mtd->write = doc_write;
379 mtd->read_ecc = doc_read_ecc;
380 mtd->write_ecc = doc_write_ecc;
381 mtd->read_oob = doc_read_oob; 372 mtd->read_oob = doc_read_oob;
382 mtd->write_oob = doc_write_oob; 373 mtd->write_oob = doc_write_oob;
383 mtd->sync = NULL; 374 mtd->sync = NULL;
@@ -401,6 +392,7 @@ static void DoCMil_init(struct mtd_info *mtd)
401 return; 392 return;
402 } 393 }
403} 394}
395EXPORT_SYMBOL_GPL(DoCMil_init);
404 396
405static int doc_read (struct mtd_info *mtd, loff_t from, size_t len, 397static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
406 size_t *retlen, u_char *buf) 398 size_t *retlen, u_char *buf)
@@ -670,8 +662,8 @@ static int doc_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
670 return ret; 662 return ret;
671} 663}
672 664
673static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len, 665static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
674 size_t *retlen, u_char *buf) 666 struct mtd_oob_ops *ops)
675{ 667{
676#ifndef USE_MEMCPY 668#ifndef USE_MEMCPY
677 int i; 669 int i;
@@ -680,6 +672,12 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
680 struct DiskOnChip *this = mtd->priv; 672 struct DiskOnChip *this = mtd->priv;
681 void __iomem *docptr = this->virtadr; 673 void __iomem *docptr = this->virtadr;
682 struct Nand *mychip = &this->chips[ofs >> this->chipshift]; 674 struct Nand *mychip = &this->chips[ofs >> this->chipshift];
675 uint8_t *buf = ops->oobbuf;
676 size_t len = ops->len;
677
678 BUG_ON(ops->mode != MTD_OOB_PLACE);
679
680 ofs += ops->ooboffs;
683 681
684 /* Find the chip which is to be used and select it */ 682 /* Find the chip which is to be used and select it */
685 if (this->curfloor != mychip->floor) { 683 if (this->curfloor != mychip->floor) {
@@ -716,13 +714,13 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
716#endif 714#endif
717 buf[len - 1] = ReadDOC(docptr, LastDataRead); 715 buf[len - 1] = ReadDOC(docptr, LastDataRead);
718 716
719 *retlen = len; 717 ops->retlen = len;
720 718
721 return 0; 719 return 0;
722} 720}
723 721
724static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len, 722static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
725 size_t *retlen, const u_char *buf) 723 struct mtd_oob_ops *ops)
726{ 724{
727#ifndef USE_MEMCPY 725#ifndef USE_MEMCPY
728 int i; 726 int i;
@@ -732,6 +730,12 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
732 struct DiskOnChip *this = mtd->priv; 730 struct DiskOnChip *this = mtd->priv;
733 void __iomem *docptr = this->virtadr; 731 void __iomem *docptr = this->virtadr;
734 struct Nand *mychip = &this->chips[ofs >> this->chipshift]; 732 struct Nand *mychip = &this->chips[ofs >> this->chipshift];
733 uint8_t *buf = ops->oobbuf;
734 size_t len = ops->len;
735
736 BUG_ON(ops->mode != MTD_OOB_PLACE);
737
738 ofs += ops->ooboffs;
735 739
736 /* Find the chip which is to be used and select it */ 740 /* Find the chip which is to be used and select it */
737 if (this->curfloor != mychip->floor) { 741 if (this->curfloor != mychip->floor) {
@@ -783,12 +787,12 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
783 if (ReadDOC(docptr, Mil_CDSN_IO) & 1) { 787 if (ReadDOC(docptr, Mil_CDSN_IO) & 1) {
784 printk("Error programming oob data\n"); 788 printk("Error programming oob data\n");
785 /* FIXME: implement Bad Block Replacement (in nftl.c ??) */ 789 /* FIXME: implement Bad Block Replacement (in nftl.c ??) */
786 *retlen = 0; 790 ops->retlen = 0;
787 ret = -EIO; 791 ret = -EIO;
788 } 792 }
789 dummy = ReadDOC(docptr, LastDataRead); 793 dummy = ReadDOC(docptr, LastDataRead);
790 794
791 *retlen = len; 795 ops->retlen = len;
792 796
793 return ret; 797 return ret;
794} 798}
@@ -856,12 +860,6 @@ int doc_erase (struct mtd_info *mtd, struct erase_info *instr)
856 * 860 *
857 ****************************************************************************/ 861 ****************************************************************************/
858 862
859static int __init init_doc2001(void)
860{
861 inter_module_register(im_name, THIS_MODULE, &DoCMil_init);
862 return 0;
863}
864
865static void __exit cleanup_doc2001(void) 863static void __exit cleanup_doc2001(void)
866{ 864{
867 struct mtd_info *mtd; 865 struct mtd_info *mtd;
@@ -877,11 +875,9 @@ static void __exit cleanup_doc2001(void)
877 kfree(this->chips); 875 kfree(this->chips);
878 kfree(mtd); 876 kfree(mtd);
879 } 877 }
880 inter_module_unregister(im_name);
881} 878}
882 879
883module_exit(cleanup_doc2001); 880module_exit(cleanup_doc2001);
884module_init(init_doc2001);
885 881
886MODULE_LICENSE("GPL"); 882MODULE_LICENSE("GPL");
887MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al."); 883MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 5f57f29efee4..66cb1e50469a 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -47,10 +47,10 @@ static int doc_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
47static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len, 47static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
48 size_t *retlen, const u_char *buf, u_char *eccbuf, 48 size_t *retlen, const u_char *buf, u_char *eccbuf,
49 struct nand_oobinfo *oobsel); 49 struct nand_oobinfo *oobsel);
50static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len, 50static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
51 size_t *retlen, u_char *buf); 51 struct mtd_oob_ops *ops);
52static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len, 52static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
53 size_t *retlen, const u_char *buf); 53 struct mtd_oob_ops *ops);
54static int doc_erase (struct mtd_info *mtd, struct erase_info *instr); 54static int doc_erase (struct mtd_info *mtd, struct erase_info *instr);
55 55
56static struct mtd_info *docmilpluslist = NULL; 56static struct mtd_info *docmilpluslist = NULL;
@@ -447,16 +447,9 @@ static int DoCMilPlus_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2)
447 return retval; 447 return retval;
448} 448}
449 449
450static const char im_name[] = "DoCMilPlus_init"; 450/* This routine is found from the docprobe code by symbol_get(),
451 451 * which will bump the use count of this module. */
452/* This routine is made available to other mtd code via 452void DoCMilPlus_init(struct mtd_info *mtd)
453 * inter_module_register. It must only be accessed through
454 * inter_module_get which will bump the use count of this module. The
455 * addresses passed back in mtd are valid as long as the use count of
456 * this module is non-zero, i.e. between inter_module_get and
457 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
458 */
459static void DoCMilPlus_init(struct mtd_info *mtd)
460{ 453{
461 struct DiskOnChip *this = mtd->priv; 454 struct DiskOnChip *this = mtd->priv;
462 struct DiskOnChip *old = NULL; 455 struct DiskOnChip *old = NULL;
@@ -490,7 +483,7 @@ static void DoCMilPlus_init(struct mtd_info *mtd)
490 mtd->size = 0; 483 mtd->size = 0;
491 484
492 mtd->erasesize = 0; 485 mtd->erasesize = 0;
493 mtd->oobblock = 512; 486 mtd->writesize = 512;
494 mtd->oobsize = 16; 487 mtd->oobsize = 16;
495 mtd->owner = THIS_MODULE; 488 mtd->owner = THIS_MODULE;
496 mtd->erase = doc_erase; 489 mtd->erase = doc_erase;
@@ -498,8 +491,6 @@ static void DoCMilPlus_init(struct mtd_info *mtd)
498 mtd->unpoint = NULL; 491 mtd->unpoint = NULL;
499 mtd->read = doc_read; 492 mtd->read = doc_read;
500 mtd->write = doc_write; 493 mtd->write = doc_write;
501 mtd->read_ecc = doc_read_ecc;
502 mtd->write_ecc = doc_write_ecc;
503 mtd->read_oob = doc_read_oob; 494 mtd->read_oob = doc_read_oob;
504 mtd->write_oob = doc_write_oob; 495 mtd->write_oob = doc_write_oob;
505 mtd->sync = NULL; 496 mtd->sync = NULL;
@@ -524,6 +515,7 @@ static void DoCMilPlus_init(struct mtd_info *mtd)
524 return; 515 return;
525 } 516 }
526} 517}
518EXPORT_SYMBOL_GPL(DoCMilPlus_init);
527 519
528#if 0 520#if 0
529static int doc_dumpblk(struct mtd_info *mtd, loff_t from) 521static int doc_dumpblk(struct mtd_info *mtd, loff_t from)
@@ -876,14 +868,20 @@ static int doc_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
876 return ret; 868 return ret;
877} 869}
878 870
879static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len, 871static int doc_read_oob(struct mtd_info *mtd, loff_t ofs,
880 size_t *retlen, u_char *buf) 872 struct mtd_oob_ops *ops)
881{ 873{
882 loff_t fofs, base; 874 loff_t fofs, base;
883 struct DiskOnChip *this = mtd->priv; 875 struct DiskOnChip *this = mtd->priv;
884 void __iomem * docptr = this->virtadr; 876 void __iomem * docptr = this->virtadr;
885 struct Nand *mychip = &this->chips[ofs >> this->chipshift]; 877 struct Nand *mychip = &this->chips[ofs >> this->chipshift];
886 size_t i, size, got, want; 878 size_t i, size, got, want;
879 uint8_t *buf = ops->oobbuf;
880 size_t len = ops->len;
881
882 BUG_ON(ops->mode != MTD_OOB_PLACE);
883
884 ofs += ops->ooboffs;
887 885
888 DoC_CheckASIC(docptr); 886 DoC_CheckASIC(docptr);
889 887
@@ -949,12 +947,12 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
949 /* Disable flash internally */ 947 /* Disable flash internally */
950 WriteDOC(0, docptr, Mplus_FlashSelect); 948 WriteDOC(0, docptr, Mplus_FlashSelect);
951 949
952 *retlen = len; 950 ops->retlen = len;
953 return 0; 951 return 0;
954} 952}
955 953
956static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len, 954static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
957 size_t *retlen, const u_char *buf) 955 struct mtd_oob_ops *ops)
958{ 956{
959 volatile char dummy; 957 volatile char dummy;
960 loff_t fofs, base; 958 loff_t fofs, base;
@@ -963,6 +961,12 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
963 struct Nand *mychip = &this->chips[ofs >> this->chipshift]; 961 struct Nand *mychip = &this->chips[ofs >> this->chipshift];
964 size_t i, size, got, want; 962 size_t i, size, got, want;
965 int ret = 0; 963 int ret = 0;
964 uint8_t *buf = ops->oobbuf;
965 size_t len = ops->len;
966
967 BUG_ON(ops->mode != MTD_OOB_PLACE);
968
969 ofs += ops->ooboffs;
966 970
967 DoC_CheckASIC(docptr); 971 DoC_CheckASIC(docptr);
968 972
@@ -1038,7 +1042,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
1038 printk("MTD: Error 0x%x programming oob at 0x%x\n", 1042 printk("MTD: Error 0x%x programming oob at 0x%x\n",
1039 dummy, (int)ofs); 1043 dummy, (int)ofs);
1040 /* FIXME: implement Bad Block Replacement */ 1044 /* FIXME: implement Bad Block Replacement */
1041 *retlen = 0; 1045 ops->retlen = 0;
1042 ret = -EIO; 1046 ret = -EIO;
1043 } 1047 }
1044 dummy = ReadDOC(docptr, Mplus_LastDataRead); 1048 dummy = ReadDOC(docptr, Mplus_LastDataRead);
@@ -1051,7 +1055,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, size_t len,
1051 /* Disable flash internally */ 1055 /* Disable flash internally */
1052 WriteDOC(0, docptr, Mplus_FlashSelect); 1056 WriteDOC(0, docptr, Mplus_FlashSelect);
1053 1057
1054 *retlen = len; 1058 ops->retlen = len;
1055 return ret; 1059 return ret;
1056} 1060}
1057 1061
@@ -1122,12 +1126,6 @@ int doc_erase(struct mtd_info *mtd, struct erase_info *instr)
1122 * 1126 *
1123 ****************************************************************************/ 1127 ****************************************************************************/
1124 1128
1125static int __init init_doc2001plus(void)
1126{
1127 inter_module_register(im_name, THIS_MODULE, &DoCMilPlus_init);
1128 return 0;
1129}
1130
1131static void __exit cleanup_doc2001plus(void) 1129static void __exit cleanup_doc2001plus(void)
1132{ 1130{
1133 struct mtd_info *mtd; 1131 struct mtd_info *mtd;
@@ -1143,11 +1141,9 @@ static void __exit cleanup_doc2001plus(void)
1143 kfree(this->chips); 1141 kfree(this->chips);
1144 kfree(mtd); 1142 kfree(mtd);
1145 } 1143 }
1146 inter_module_unregister(im_name);
1147} 1144}
1148 1145
1149module_exit(cleanup_doc2001plus); 1146module_exit(cleanup_doc2001plus);
1150module_init(init_doc2001plus);
1151 1147
1152MODULE_LICENSE("GPL"); 1148MODULE_LICENSE("GPL");
1153MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com> et al."); 1149MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com> et al.");
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c
index 13178b9dd00a..593bb033a3fa 100644
--- a/drivers/mtd/devices/docprobe.c
+++ b/drivers/mtd/devices/docprobe.c
@@ -231,6 +231,10 @@ static inline int __init doccheck(void __iomem *potential, unsigned long physadr
231 231
232static int docfound; 232static int docfound;
233 233
234extern void DoC2k_init(struct mtd_info *);
235extern void DoCMil_init(struct mtd_info *);
236extern void DoCMilPlus_init(struct mtd_info *);
237
234static void __init DoC_Probe(unsigned long physadr) 238static void __init DoC_Probe(unsigned long physadr)
235{ 239{
236 void __iomem *docptr; 240 void __iomem *docptr;
@@ -239,8 +243,6 @@ static void __init DoC_Probe(unsigned long physadr)
239 int ChipID; 243 int ChipID;
240 char namebuf[15]; 244 char namebuf[15];
241 char *name = namebuf; 245 char *name = namebuf;
242 char *im_funcname = NULL;
243 char *im_modname = NULL;
244 void (*initroutine)(struct mtd_info *) = NULL; 246 void (*initroutine)(struct mtd_info *) = NULL;
245 247
246 docptr = ioremap(physadr, DOC_IOREMAP_LEN); 248 docptr = ioremap(physadr, DOC_IOREMAP_LEN);
@@ -278,41 +280,33 @@ static void __init DoC_Probe(unsigned long physadr)
278 switch(ChipID) { 280 switch(ChipID) {
279 case DOC_ChipID_Doc2kTSOP: 281 case DOC_ChipID_Doc2kTSOP:
280 name="2000 TSOP"; 282 name="2000 TSOP";
281 im_funcname = "DoC2k_init"; 283 initroutine = symbol_request(DoC2k_init);
282 im_modname = "doc2000";
283 break; 284 break;
284 285
285 case DOC_ChipID_Doc2k: 286 case DOC_ChipID_Doc2k:
286 name="2000"; 287 name="2000";
287 im_funcname = "DoC2k_init"; 288 initroutine = symbol_request(DoC2k_init);
288 im_modname = "doc2000";
289 break; 289 break;
290 290
291 case DOC_ChipID_DocMil: 291 case DOC_ChipID_DocMil:
292 name="Millennium"; 292 name="Millennium";
293#ifdef DOC_SINGLE_DRIVER 293#ifdef DOC_SINGLE_DRIVER
294 im_funcname = "DoC2k_init"; 294 initroutine = symbol_request(DoC2k_init);
295 im_modname = "doc2000";
296#else 295#else
297 im_funcname = "DoCMil_init"; 296 initroutine = symbol_request(DoCMil_init);
298 im_modname = "doc2001";
299#endif /* DOC_SINGLE_DRIVER */ 297#endif /* DOC_SINGLE_DRIVER */
300 break; 298 break;
301 299
302 case DOC_ChipID_DocMilPlus16: 300 case DOC_ChipID_DocMilPlus16:
303 case DOC_ChipID_DocMilPlus32: 301 case DOC_ChipID_DocMilPlus32:
304 name="MillenniumPlus"; 302 name="MillenniumPlus";
305 im_funcname = "DoCMilPlus_init"; 303 initroutine = symbol_request(DoCMilPlus_init);
306 im_modname = "doc2001plus";
307 break; 304 break;
308 } 305 }
309 306
310 if (im_funcname)
311 initroutine = inter_module_get_request(im_funcname, im_modname);
312
313 if (initroutine) { 307 if (initroutine) {
314 (*initroutine)(mtd); 308 (*initroutine)(mtd);
315 inter_module_put(im_funcname); 309 symbol_put_addr(initroutine);
316 return; 310 return;
317 } 311 }
318 printk(KERN_NOTICE "Cannot find driver for DiskOnChip %s at 0x%lX\n", name, physadr); 312 printk(KERN_NOTICE "Cannot find driver for DiskOnChip %s at 0x%lX\n", name, physadr);
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 29b0ddaa324e..4ea50a1dda85 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -635,6 +635,7 @@ int __init lart_flash_init (void)
635 printk ("%s: This looks like a LART board to me.\n",module_name); 635 printk ("%s: This looks like a LART board to me.\n",module_name);
636 mtd.name = module_name; 636 mtd.name = module_name;
637 mtd.type = MTD_NORFLASH; 637 mtd.type = MTD_NORFLASH;
638 mtd.writesize = 1;
638 mtd.flags = MTD_CAP_NORFLASH; 639 mtd.flags = MTD_CAP_NORFLASH;
639 mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN; 640 mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN;
640 mtd.erasesize = FLASH_BLOCKSIZE_MAIN; 641 mtd.erasesize = FLASH_BLOCKSIZE_MAIN;
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 04e65d5dae00..a8466141e914 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -465,6 +465,7 @@ static int __devinit m25p_probe(struct spi_device *spi)
465 flash->mtd.name = spi->dev.bus_id; 465 flash->mtd.name = spi->dev.bus_id;
466 466
467 flash->mtd.type = MTD_NORFLASH; 467 flash->mtd.type = MTD_NORFLASH;
468 flash->mtd.writesize = 1;
468 flash->mtd.flags = MTD_CAP_NORFLASH; 469 flash->mtd.flags = MTD_CAP_NORFLASH;
469 flash->mtd.size = info->sector_size * info->n_sectors; 470 flash->mtd.size = info->sector_size * info->n_sectors;
470 flash->mtd.erasesize = info->sector_size; 471 flash->mtd.erasesize = info->sector_size;
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 485f663493d2..4ab7670770e4 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -219,7 +219,7 @@ static int __init ms02nv_init_one(ulong addr)
219 mp->uaddr = phys_to_virt(fixaddr); 219 mp->uaddr = phys_to_virt(fixaddr);
220 220
221 mtd->type = MTD_RAM; 221 mtd->type = MTD_RAM;
222 mtd->flags = MTD_CAP_RAM | MTD_XIP; 222 mtd->flags = MTD_CAP_RAM;
223 mtd->size = fixsize; 223 mtd->size = fixsize;
224 mtd->name = (char *)ms02nv_name; 224 mtd->name = (char *)ms02nv_name;
225 mtd->owner = THIS_MODULE; 225 mtd->owner = THIS_MODULE;
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c
index 1443117fd8f4..b4438eacfd80 100644
--- a/drivers/mtd/devices/mtdram.c
+++ b/drivers/mtd/devices/mtdram.c
@@ -106,6 +106,7 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
106 mtd->type = MTD_RAM; 106 mtd->type = MTD_RAM;
107 mtd->flags = MTD_CAP_RAM; 107 mtd->flags = MTD_CAP_RAM;
108 mtd->size = size; 108 mtd->size = size;
109 mtd->writesize = 1;
109 mtd->erasesize = MTDRAM_ERASE_SIZE; 110 mtd->erasesize = MTDRAM_ERASE_SIZE;
110 mtd->priv = mapped_address; 111 mtd->priv = mapped_address;
111 112
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index e8685ee6c1e4..e09e416667d3 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -1,8 +1,8 @@
1/** 1/**
2 * $Id: phram.c,v 1.16 2005/11/07 11:14:25 gleixner Exp $ 2 * $Id: phram.c,v 1.16 2005/11/07 11:14:25 gleixner Exp $
3 * 3 *
4 * Copyright (c) ???? Jochen Schäuble <psionic@psionic.de> 4 * Copyright (c) ???? Jochen Schäuble <psionic@psionic.de>
5 * Copyright (c) 2003-2004 Jörn Engel <joern@wh.fh-wedel.de> 5 * Copyright (c) 2003-2004 Jörn Engel <joern@wh.fh-wedel.de>
6 * 6 *
7 * Usage: 7 * Usage:
8 * 8 *
@@ -142,7 +142,7 @@ static int register_device(char *name, unsigned long start, unsigned long len)
142 142
143 new->mtd.name = name; 143 new->mtd.name = name;
144 new->mtd.size = len; 144 new->mtd.size = len;
145 new->mtd.flags = MTD_CAP_RAM | MTD_ERASEABLE | MTD_VOLATILE; 145 new->mtd.flags = MTD_CAP_RAM;
146 new->mtd.erase = phram_erase; 146 new->mtd.erase = phram_erase;
147 new->mtd.point = phram_point; 147 new->mtd.point = phram_point;
148 new->mtd.unpoint = phram_unpoint; 148 new->mtd.unpoint = phram_unpoint;
@@ -266,12 +266,16 @@ static int phram_setup(const char *val, struct kernel_param *kp)
266 return 0; 266 return 0;
267 267
268 ret = parse_num32(&start, token[1]); 268 ret = parse_num32(&start, token[1]);
269 if (ret) 269 if (ret) {
270 kfree(name);
270 parse_err("illegal start address\n"); 271 parse_err("illegal start address\n");
272 }
271 273
272 ret = parse_num32(&len, token[2]); 274 ret = parse_num32(&len, token[2]);
273 if (ret) 275 if (ret) {
276 kfree(name);
274 parse_err("illegal device length\n"); 277 parse_err("illegal device length\n");
278 }
275 279
276 register_device(name, start, len); 280 register_device(name, start, len);
277 281
@@ -296,5 +300,5 @@ module_init(init_phram);
296module_exit(cleanup_phram); 300module_exit(cleanup_phram);
297 301
298MODULE_LICENSE("GPL"); 302MODULE_LICENSE("GPL");
299MODULE_AUTHOR("Jörn Engel <joern@wh.fh-wedel.de>"); 303MODULE_AUTHOR("Jörn Engel <joern@wh.fh-wedel.de>");
300MODULE_DESCRIPTION("MTD driver for physical RAM"); 304MODULE_DESCRIPTION("MTD driver for physical RAM");
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index 6faee6c6958c..b3f665e3c38b 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -200,8 +200,7 @@ static int register_device(char *name, unsigned long start, unsigned long length
200 200
201 (*curmtd)->mtdinfo->name = name; 201 (*curmtd)->mtdinfo->name = name;
202 (*curmtd)->mtdinfo->size = length; 202 (*curmtd)->mtdinfo->size = length;
203 (*curmtd)->mtdinfo->flags = MTD_CLEAR_BITS | MTD_SET_BITS | 203 (*curmtd)->mtdinfo->flags = MTD_CAP_RAM;
204 MTD_WRITEB_WRITEABLE | MTD_VOLATILE | MTD_CAP_RAM;
205 (*curmtd)->mtdinfo->erase = slram_erase; 204 (*curmtd)->mtdinfo->erase = slram_erase;
206 (*curmtd)->mtdinfo->point = slram_point; 205 (*curmtd)->mtdinfo->point = slram_point;
207 (*curmtd)->mtdinfo->unpoint = slram_unpoint; 206 (*curmtd)->mtdinfo->unpoint = slram_unpoint;
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index a3b92479719d..1e21a2c3dd29 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -36,6 +36,7 @@
36#include <linux/mtd/mtd.h> 36#include <linux/mtd/mtd.h>
37#include <linux/mtd/nftl.h> 37#include <linux/mtd/nftl.h>
38#include <linux/mtd/inftl.h> 38#include <linux/mtd/inftl.h>
39#include <linux/mtd/nand.h>
39#include <asm/uaccess.h> 40#include <asm/uaccess.h>
40#include <asm/errno.h> 41#include <asm/errno.h>
41#include <asm/io.h> 42#include <asm/io.h>
@@ -79,14 +80,12 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
79 inftl->mbd.devnum = -1; 80 inftl->mbd.devnum = -1;
80 inftl->mbd.blksize = 512; 81 inftl->mbd.blksize = 512;
81 inftl->mbd.tr = tr; 82 inftl->mbd.tr = tr;
82 memcpy(&inftl->oobinfo, &mtd->oobinfo, sizeof(struct nand_oobinfo));
83 inftl->oobinfo.useecc = MTD_NANDECC_PLACEONLY;
84 83
85 if (INFTL_mount(inftl) < 0) { 84 if (INFTL_mount(inftl) < 0) {
86 printk(KERN_WARNING "INFTL: could not mount device\n"); 85 printk(KERN_WARNING "INFTL: could not mount device\n");
87 kfree(inftl); 86 kfree(inftl);
88 return; 87 return;
89 } 88 }
90 89
91 /* OK, it's a new one. Set up all the data structures. */ 90 /* OK, it's a new one. Set up all the data structures. */
92 91
@@ -152,6 +151,69 @@ static void inftl_remove_dev(struct mtd_blktrans_dev *dev)
152 */ 151 */
153 152
154/* 153/*
154 * Read oob data from flash
155 */
156int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
157 size_t *retlen, uint8_t *buf)
158{
159 struct mtd_oob_ops ops;
160 int res;
161
162 ops.mode = MTD_OOB_PLACE;
163 ops.ooboffs = offs & (mtd->writesize - 1);
164 ops.ooblen = len;
165 ops.oobbuf = buf;
166 ops.datbuf = NULL;
167 ops.len = len;
168
169 res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
170 *retlen = ops.retlen;
171 return res;
172}
173
174/*
175 * Write oob data to flash
176 */
177int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
178 size_t *retlen, uint8_t *buf)
179{
180 struct mtd_oob_ops ops;
181 int res;
182
183 ops.mode = MTD_OOB_PLACE;
184 ops.ooboffs = offs & (mtd->writesize - 1);
185 ops.ooblen = len;
186 ops.oobbuf = buf;
187 ops.datbuf = NULL;
188 ops.len = len;
189
190 res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
191 *retlen = ops.retlen;
192 return res;
193}
194
195/*
196 * Write data and oob to flash
197 */
198static int inftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
199 size_t *retlen, uint8_t *buf, uint8_t *oob)
200{
201 struct mtd_oob_ops ops;
202 int res;
203
204 ops.mode = MTD_OOB_PLACE;
205 ops.ooboffs = offs;
206 ops.ooblen = mtd->oobsize;
207 ops.oobbuf = oob;
208 ops.datbuf = buf;
209 ops.len = len;
210
211 res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
212 *retlen = ops.retlen;
213 return res;
214}
215
216/*
155 * INFTL_findfreeblock: Find a free Erase Unit on the INFTL partition. 217 * INFTL_findfreeblock: Find a free Erase Unit on the INFTL partition.
156 * This function is used when the give Virtual Unit Chain. 218 * This function is used when the give Virtual Unit Chain.
157 */ 219 */
@@ -198,10 +260,11 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
198 u16 BlockMap[MAX_SECTORS_PER_UNIT]; 260 u16 BlockMap[MAX_SECTORS_PER_UNIT];
199 unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT]; 261 unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT];
200 unsigned int thisEUN, prevEUN, status; 262 unsigned int thisEUN, prevEUN, status;
263 struct mtd_info *mtd = inftl->mbd.mtd;
201 int block, silly; 264 int block, silly;
202 unsigned int targetEUN; 265 unsigned int targetEUN;
203 struct inftl_oob oob; 266 struct inftl_oob oob;
204 size_t retlen; 267 size_t retlen;
205 268
206 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d," 269 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
207 "pending=%d)\n", inftl, thisVUC, pendingblock); 270 "pending=%d)\n", inftl, thisVUC, pendingblock);
@@ -221,18 +284,18 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
221 * Scan to find the Erase Unit which holds the actual data for each 284 * Scan to find the Erase Unit which holds the actual data for each
222 * 512-byte block within the Chain. 285 * 512-byte block within the Chain.
223 */ 286 */
224 silly = MAX_LOOPS; 287 silly = MAX_LOOPS;
225 while (thisEUN < inftl->nb_blocks) { 288 while (thisEUN < inftl->nb_blocks) {
226 for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) { 289 for (block = 0; block < inftl->EraseSize/SECTORSIZE; block ++) {
227 if ((BlockMap[block] != 0xffff) || BlockDeleted[block]) 290 if ((BlockMap[block] != 0xffff) || BlockDeleted[block])
228 continue; 291 continue;
229 292
230 if (MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize) 293 if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize)
231 + (block * SECTORSIZE), 16 , &retlen, 294 + (block * SECTORSIZE), 16, &retlen,
232 (char *)&oob) < 0) 295 (char *)&oob) < 0)
233 status = SECTOR_IGNORE; 296 status = SECTOR_IGNORE;
234 else 297 else
235 status = oob.b.Status | oob.b.Status1; 298 status = oob.b.Status | oob.b.Status1;
236 299
237 switch(status) { 300 switch(status) {
238 case SECTOR_FREE: 301 case SECTOR_FREE:
@@ -282,29 +345,31 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
282 continue; 345 continue;
283 } 346 }
284 347
285 /* 348 /*
286 * Copy only in non free block (free blocks can only 349 * Copy only in non free block (free blocks can only
287 * happen in case of media errors or deleted blocks). 350 * happen in case of media errors or deleted blocks).
288 */ 351 */
289 if (BlockMap[block] == BLOCK_NIL) 352 if (BlockMap[block] == BLOCK_NIL)
290 continue; 353 continue;
291 354
292 ret = MTD_READ(inftl->mbd.mtd, (inftl->EraseSize * 355 ret = mtd->read(mtd, (inftl->EraseSize * BlockMap[block]) +
293 BlockMap[block]) + (block * SECTORSIZE), SECTORSIZE, 356 (block * SECTORSIZE), SECTORSIZE, &retlen,
294 &retlen, movebuf); 357 movebuf);
295 if (ret < 0) { 358 if (ret < 0 && ret != -EUCLEAN) {
296 ret = MTD_READ(inftl->mbd.mtd, (inftl->EraseSize * 359 ret = mtd->read(mtd,
297 BlockMap[block]) + (block * SECTORSIZE), 360 (inftl->EraseSize * BlockMap[block]) +
298 SECTORSIZE, &retlen, movebuf); 361 (block * SECTORSIZE), SECTORSIZE,
362 &retlen, movebuf);
299 if (ret != -EIO) 363 if (ret != -EIO)
300 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: error went " 364 DEBUG(MTD_DEBUG_LEVEL1, "INFTL: error went "
301 "away on retry?\n"); 365 "away on retry?\n");
302 } 366 }
303 memset(&oob, 0xff, sizeof(struct inftl_oob)); 367 memset(&oob, 0xff, sizeof(struct inftl_oob));
304 oob.b.Status = oob.b.Status1 = SECTOR_USED; 368 oob.b.Status = oob.b.Status1 = SECTOR_USED;
305 MTD_WRITEECC(inftl->mbd.mtd, (inftl->EraseSize * targetEUN) + 369
306 (block * SECTORSIZE), SECTORSIZE, &retlen, 370 inftl_write(inftl->mbd.mtd, (inftl->EraseSize * targetEUN) +
307 movebuf, (char *)&oob, &inftl->oobinfo); 371 (block * SECTORSIZE), SECTORSIZE, &retlen,
372 movebuf, (char *)&oob);
308 } 373 }
309 374
310 /* 375 /*
@@ -329,17 +394,17 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
329 if (thisEUN == targetEUN) 394 if (thisEUN == targetEUN)
330 break; 395 break;
331 396
332 if (INFTL_formatblock(inftl, thisEUN) < 0) { 397 if (INFTL_formatblock(inftl, thisEUN) < 0) {
333 /* 398 /*
334 * Could not erase : mark block as reserved. 399 * Could not erase : mark block as reserved.
335 */ 400 */
336 inftl->PUtable[thisEUN] = BLOCK_RESERVED; 401 inftl->PUtable[thisEUN] = BLOCK_RESERVED;
337 } else { 402 } else {
338 /* Correctly erased : mark it as free */ 403 /* Correctly erased : mark it as free */
339 inftl->PUtable[thisEUN] = BLOCK_FREE; 404 inftl->PUtable[thisEUN] = BLOCK_FREE;
340 inftl->PUtable[prevEUN] = BLOCK_NIL; 405 inftl->PUtable[prevEUN] = BLOCK_NIL;
341 inftl->numfreeEUNs++; 406 inftl->numfreeEUNs++;
342 } 407 }
343 } 408 }
344 409
345 return targetEUN; 410 return targetEUN;
@@ -415,6 +480,7 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
415 unsigned int thisVUC = block / (inftl->EraseSize / SECTORSIZE); 480 unsigned int thisVUC = block / (inftl->EraseSize / SECTORSIZE);
416 unsigned int thisEUN, writeEUN, prev_block, status; 481 unsigned int thisEUN, writeEUN, prev_block, status;
417 unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize -1); 482 unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize -1);
483 struct mtd_info *mtd = inftl->mbd.mtd;
418 struct inftl_oob oob; 484 struct inftl_oob oob;
419 struct inftl_bci bci; 485 struct inftl_bci bci;
420 unsigned char anac, nacs, parity; 486 unsigned char anac, nacs, parity;
@@ -434,10 +500,10 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block)
434 silly = MAX_LOOPS; 500 silly = MAX_LOOPS;
435 501
436 while (thisEUN <= inftl->lastEUN) { 502 while (thisEUN <= inftl->lastEUN) {
437 MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize) + 503 inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
438 blockofs, 8, &retlen, (char *)&bci); 504 blockofs, 8, &retlen, (char *)&bci);
439 505
440 status = bci.Status | bci.Status1; 506 status = bci.Status | bci.Status1;
441 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: status of block %d in " 507 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: status of block %d in "
442 "EUN %d is %x\n", block , writeEUN, status); 508 "EUN %d is %x\n", block , writeEUN, status);
443 509
@@ -522,8 +588,8 @@ hitused:
522 nacs = 0; 588 nacs = 0;
523 thisEUN = inftl->VUtable[thisVUC]; 589 thisEUN = inftl->VUtable[thisVUC];
524 if (thisEUN != BLOCK_NIL) { 590 if (thisEUN != BLOCK_NIL) {
525 MTD_READOOB(inftl->mbd.mtd, thisEUN * inftl->EraseSize 591 inftl_read_oob(mtd, thisEUN * inftl->EraseSize
526 + 8, 8, &retlen, (char *)&oob.u); 592 + 8, 8, &retlen, (char *)&oob.u);
527 anac = oob.u.a.ANAC + 1; 593 anac = oob.u.a.ANAC + 1;
528 nacs = oob.u.a.NACs + 1; 594 nacs = oob.u.a.NACs + 1;
529 } 595 }
@@ -544,8 +610,8 @@ hitused:
544 oob.u.a.parityPerField = parity; 610 oob.u.a.parityPerField = parity;
545 oob.u.a.discarded = 0xaa; 611 oob.u.a.discarded = 0xaa;
546 612
547 MTD_WRITEOOB(inftl->mbd.mtd, writeEUN * inftl->EraseSize + 8, 8, 613 inftl_write_oob(mtd, writeEUN * inftl->EraseSize + 8, 8,
548 &retlen, (char *)&oob.u); 614 &retlen, (char *)&oob.u);
549 615
550 /* Also back up header... */ 616 /* Also back up header... */
551 oob.u.b.virtualUnitNo = cpu_to_le16(thisVUC); 617 oob.u.b.virtualUnitNo = cpu_to_le16(thisVUC);
@@ -555,8 +621,8 @@ hitused:
555 oob.u.b.parityPerField = parity; 621 oob.u.b.parityPerField = parity;
556 oob.u.b.discarded = 0xaa; 622 oob.u.b.discarded = 0xaa;
557 623
558 MTD_WRITEOOB(inftl->mbd.mtd, writeEUN * inftl->EraseSize + 624 inftl_write_oob(mtd, writeEUN * inftl->EraseSize +
559 SECTORSIZE * 4 + 8, 8, &retlen, (char *)&oob.u); 625 SECTORSIZE * 4 + 8, 8, &retlen, (char *)&oob.u);
560 626
561 inftl->PUtable[writeEUN] = inftl->VUtable[thisVUC]; 627 inftl->PUtable[writeEUN] = inftl->VUtable[thisVUC];
562 inftl->VUtable[thisVUC] = writeEUN; 628 inftl->VUtable[thisVUC] = writeEUN;
@@ -576,6 +642,7 @@ hitused:
576 */ 642 */
577static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC) 643static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
578{ 644{
645 struct mtd_info *mtd = inftl->mbd.mtd;
579 unsigned char BlockUsed[MAX_SECTORS_PER_UNIT]; 646 unsigned char BlockUsed[MAX_SECTORS_PER_UNIT];
580 unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT]; 647 unsigned char BlockDeleted[MAX_SECTORS_PER_UNIT];
581 unsigned int thisEUN, status; 648 unsigned int thisEUN, status;
@@ -606,9 +673,9 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
606 if (BlockUsed[block] || BlockDeleted[block]) 673 if (BlockUsed[block] || BlockDeleted[block])
607 continue; 674 continue;
608 675
609 if (MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize) 676 if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize)
610 + (block * SECTORSIZE), 8 , &retlen, 677 + (block * SECTORSIZE), 8 , &retlen,
611 (char *)&bci) < 0) 678 (char *)&bci) < 0)
612 status = SECTOR_IGNORE; 679 status = SECTOR_IGNORE;
613 else 680 else
614 status = bci.Status | bci.Status1; 681 status = bci.Status | bci.Status1;
@@ -670,12 +737,12 @@ static void INFTL_trydeletechain(struct INFTLrecord *inftl, unsigned thisVUC)
670 DEBUG(MTD_DEBUG_LEVEL3, "Deleting EUN %d from VUC %d\n", 737 DEBUG(MTD_DEBUG_LEVEL3, "Deleting EUN %d from VUC %d\n",
671 thisEUN, thisVUC); 738 thisEUN, thisVUC);
672 739
673 if (INFTL_formatblock(inftl, thisEUN) < 0) { 740 if (INFTL_formatblock(inftl, thisEUN) < 0) {
674 /* 741 /*
675 * Could not erase : mark block as reserved. 742 * Could not erase : mark block as reserved.
676 */ 743 */
677 inftl->PUtable[thisEUN] = BLOCK_RESERVED; 744 inftl->PUtable[thisEUN] = BLOCK_RESERVED;
678 } else { 745 } else {
679 /* Correctly erased : mark it as free */ 746 /* Correctly erased : mark it as free */
680 inftl->PUtable[thisEUN] = BLOCK_FREE; 747 inftl->PUtable[thisEUN] = BLOCK_FREE;
681 inftl->numfreeEUNs++; 748 inftl->numfreeEUNs++;
@@ -697,6 +764,7 @@ static int INFTL_deleteblock(struct INFTLrecord *inftl, unsigned block)
697{ 764{
698 unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)]; 765 unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)];
699 unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1); 766 unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
767 struct mtd_info *mtd = inftl->mbd.mtd;
700 unsigned int status; 768 unsigned int status;
701 int silly = MAX_LOOPS; 769 int silly = MAX_LOOPS;
702 size_t retlen; 770 size_t retlen;
@@ -706,8 +774,8 @@ static int INFTL_deleteblock(struct INFTLrecord *inftl, unsigned block)
706 "block=%d)\n", inftl, block); 774 "block=%d)\n", inftl, block);
707 775
708 while (thisEUN < inftl->nb_blocks) { 776 while (thisEUN < inftl->nb_blocks) {
709 if (MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize) + 777 if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
710 blockofs, 8, &retlen, (char *)&bci) < 0) 778 blockofs, 8, &retlen, (char *)&bci) < 0)
711 status = SECTOR_IGNORE; 779 status = SECTOR_IGNORE;
712 else 780 else
713 status = bci.Status | bci.Status1; 781 status = bci.Status | bci.Status1;
@@ -741,10 +809,10 @@ foundit:
741 if (thisEUN != BLOCK_NIL) { 809 if (thisEUN != BLOCK_NIL) {
742 loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs; 810 loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
743 811
744 if (MTD_READOOB(inftl->mbd.mtd, ptr, 8, &retlen, (char *)&bci) < 0) 812 if (inftl_read_oob(mtd, ptr, 8, &retlen, (char *)&bci) < 0)
745 return -EIO; 813 return -EIO;
746 bci.Status = bci.Status1 = SECTOR_DELETED; 814 bci.Status = bci.Status1 = SECTOR_DELETED;
747 if (MTD_WRITEOOB(inftl->mbd.mtd, ptr, 8, &retlen, (char *)&bci) < 0) 815 if (inftl_write_oob(mtd, ptr, 8, &retlen, (char *)&bci) < 0)
748 return -EIO; 816 return -EIO;
749 INFTL_trydeletechain(inftl, block / (inftl->EraseSize / SECTORSIZE)); 817 INFTL_trydeletechain(inftl, block / (inftl->EraseSize / SECTORSIZE));
750 } 818 }
@@ -784,9 +852,10 @@ static int inftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
784 852
785 memset(&oob, 0xff, sizeof(struct inftl_oob)); 853 memset(&oob, 0xff, sizeof(struct inftl_oob));
786 oob.b.Status = oob.b.Status1 = SECTOR_USED; 854 oob.b.Status = oob.b.Status1 = SECTOR_USED;
787 MTD_WRITEECC(inftl->mbd.mtd, (writeEUN * inftl->EraseSize) + 855
788 blockofs, SECTORSIZE, &retlen, (char *)buffer, 856 inftl_write(inftl->mbd.mtd, (writeEUN * inftl->EraseSize) +
789 (char *)&oob, &inftl->oobinfo); 857 blockofs, SECTORSIZE, &retlen, (char *)buffer,
858 (char *)&oob);
790 /* 859 /*
791 * need to write SECTOR_USED flags since they are not written 860 * need to write SECTOR_USED flags since they are not written
792 * in mtd_writeecc 861 * in mtd_writeecc
@@ -804,17 +873,18 @@ static int inftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
804 struct INFTLrecord *inftl = (void *)mbd; 873 struct INFTLrecord *inftl = (void *)mbd;
805 unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)]; 874 unsigned int thisEUN = inftl->VUtable[block / (inftl->EraseSize / SECTORSIZE)];
806 unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1); 875 unsigned long blockofs = (block * SECTORSIZE) & (inftl->EraseSize - 1);
807 unsigned int status; 876 struct mtd_info *mtd = inftl->mbd.mtd;
877 unsigned int status;
808 int silly = MAX_LOOPS; 878 int silly = MAX_LOOPS;
809 struct inftl_bci bci; 879 struct inftl_bci bci;
810 size_t retlen; 880 size_t retlen;
811 881
812 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=%p,block=%ld," 882 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=%p,block=%ld,"
813 "buffer=%p)\n", inftl, block, buffer); 883 "buffer=%p)\n", inftl, block, buffer);
814 884
815 while (thisEUN < inftl->nb_blocks) { 885 while (thisEUN < inftl->nb_blocks) {
816 if (MTD_READOOB(inftl->mbd.mtd, (thisEUN * inftl->EraseSize) + 886 if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) +
817 blockofs, 8, &retlen, (char *)&bci) < 0) 887 blockofs, 8, &retlen, (char *)&bci) < 0)
818 status = SECTOR_IGNORE; 888 status = SECTOR_IGNORE;
819 else 889 else
820 status = bci.Status | bci.Status1; 890 status = bci.Status | bci.Status1;
@@ -850,10 +920,12 @@ foundit:
850 /* The requested block is not on the media, return all 0x00 */ 920 /* The requested block is not on the media, return all 0x00 */
851 memset(buffer, 0, SECTORSIZE); 921 memset(buffer, 0, SECTORSIZE);
852 } else { 922 } else {
853 size_t retlen; 923 size_t retlen;
854 loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs; 924 loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
855 if (MTD_READ(inftl->mbd.mtd, ptr, SECTORSIZE, &retlen, 925 int ret = mtd->read(mtd, ptr, SECTORSIZE, &retlen, buffer);
856 buffer)) 926
927 /* Handle corrected bit flips gracefully */
928 if (ret < 0 && ret != -EUCLEAN)
857 return -EIO; 929 return -EIO;
858 } 930 }
859 return 0; 931 return 0;
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 43fdc9433882..8f6006f1a519 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -43,6 +43,11 @@
43 43
44char inftlmountrev[]="$Revision: 1.18 $"; 44char inftlmountrev[]="$Revision: 1.18 $";
45 45
46extern int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
47 size_t *retlen, uint8_t *buf);
48extern int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
49 size_t *retlen, uint8_t *buf);
50
46/* 51/*
47 * find_boot_record: Find the INFTL Media Header and its Spare copy which 52 * find_boot_record: Find the INFTL Media Header and its Spare copy which
48 * contains the various device information of the INFTL partition and 53 * contains the various device information of the INFTL partition and
@@ -57,6 +62,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
57 unsigned int i, block; 62 unsigned int i, block;
58 u8 buf[SECTORSIZE]; 63 u8 buf[SECTORSIZE];
59 struct INFTLMediaHeader *mh = &inftl->MediaHdr; 64 struct INFTLMediaHeader *mh = &inftl->MediaHdr;
65 struct mtd_info *mtd = inftl->mbd.mtd;
60 struct INFTLPartition *ip; 66 struct INFTLPartition *ip;
61 size_t retlen; 67 size_t retlen;
62 68
@@ -80,8 +86,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
80 * Check for BNAND header first. Then whinge if it's found 86 * Check for BNAND header first. Then whinge if it's found
81 * but later checks fail. 87 * but later checks fail.
82 */ 88 */
83 ret = MTD_READ(inftl->mbd.mtd, block * inftl->EraseSize, 89 ret = mtd->read(mtd, block * inftl->EraseSize,
84 SECTORSIZE, &retlen, buf); 90 SECTORSIZE, &retlen, buf);
85 /* We ignore ret in case the ECC of the MediaHeader is invalid 91 /* We ignore ret in case the ECC of the MediaHeader is invalid
86 (which is apparently acceptable) */ 92 (which is apparently acceptable) */
87 if (retlen != SECTORSIZE) { 93 if (retlen != SECTORSIZE) {
@@ -106,8 +112,9 @@ static int find_boot_record(struct INFTLrecord *inftl)
106 } 112 }
107 113
108 /* To be safer with BIOS, also use erase mark as discriminant */ 114 /* To be safer with BIOS, also use erase mark as discriminant */
109 if ((ret = MTD_READOOB(inftl->mbd.mtd, block * inftl->EraseSize + 115 if ((ret = inftl_read_oob(mtd, block * inftl->EraseSize +
110 SECTORSIZE + 8, 8, &retlen, (char *)&h1) < 0)) { 116 SECTORSIZE + 8, 8, &retlen,
117 (char *)&h1) < 0)) {
111 printk(KERN_WARNING "INFTL: ANAND header found at " 118 printk(KERN_WARNING "INFTL: ANAND header found at "
112 "0x%x in mtd%d, but OOB data read failed " 119 "0x%x in mtd%d, but OOB data read failed "
113 "(err %d)\n", block * inftl->EraseSize, 120 "(err %d)\n", block * inftl->EraseSize,
@@ -123,8 +130,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
123 memcpy(mh, buf, sizeof(struct INFTLMediaHeader)); 130 memcpy(mh, buf, sizeof(struct INFTLMediaHeader));
124 131
125 /* Read the spare media header at offset 4096 */ 132 /* Read the spare media header at offset 4096 */
126 MTD_READ(inftl->mbd.mtd, block * inftl->EraseSize + 4096, 133 mtd->read(mtd, block * inftl->EraseSize + 4096,
127 SECTORSIZE, &retlen, buf); 134 SECTORSIZE, &retlen, buf);
128 if (retlen != SECTORSIZE) { 135 if (retlen != SECTORSIZE) {
129 printk(KERN_WARNING "INFTL: Unable to read spare " 136 printk(KERN_WARNING "INFTL: Unable to read spare "
130 "Media Header\n"); 137 "Media Header\n");
@@ -233,7 +240,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
233 */ 240 */
234 instr->addr = ip->Reserved0 * inftl->EraseSize; 241 instr->addr = ip->Reserved0 * inftl->EraseSize;
235 instr->len = inftl->EraseSize; 242 instr->len = inftl->EraseSize;
236 MTD_ERASE(inftl->mbd.mtd, instr); 243 mtd->erase(mtd, instr);
237 } 244 }
238 if ((ip->lastUnit - ip->firstUnit + 1) < ip->virtualUnits) { 245 if ((ip->lastUnit - ip->firstUnit + 1) < ip->virtualUnits) {
239 printk(KERN_WARNING "INFTL: Media Header " 246 printk(KERN_WARNING "INFTL: Media Header "
@@ -350,21 +357,21 @@ static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
350 int len, int check_oob) 357 int len, int check_oob)
351{ 358{
352 u8 buf[SECTORSIZE + inftl->mbd.mtd->oobsize]; 359 u8 buf[SECTORSIZE + inftl->mbd.mtd->oobsize];
360 struct mtd_info *mtd = inftl->mbd.mtd;
353 size_t retlen; 361 size_t retlen;
354 int i; 362 int i;
355 363
356 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: check_free_sectors(inftl=%p,"
357 "address=0x%x,len=%d,check_oob=%d)\n", inftl,
358 address, len, check_oob);
359
360 for (i = 0; i < len; i += SECTORSIZE) { 364 for (i = 0; i < len; i += SECTORSIZE) {
361 if (MTD_READECC(inftl->mbd.mtd, address, SECTORSIZE, &retlen, buf, &buf[SECTORSIZE], &inftl->oobinfo) < 0) 365 if (mtd->read(mtd, address, SECTORSIZE, &retlen, buf))
362 return -1; 366 return -1;
363 if (memcmpb(buf, 0xff, SECTORSIZE) != 0) 367 if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
364 return -1; 368 return -1;
365 369
366 if (check_oob) { 370 if (check_oob) {
367 if (memcmpb(buf + SECTORSIZE, 0xff, inftl->mbd.mtd->oobsize) != 0) 371 if(inftl_read_oob(mtd, address, mtd->oobsize,
372 &retlen, &buf[SECTORSIZE]) < 0)
373 return -1;
374 if (memcmpb(buf + SECTORSIZE, 0xff, mtd->oobsize) != 0)
368 return -1; 375 return -1;
369 } 376 }
370 address += SECTORSIZE; 377 address += SECTORSIZE;
@@ -387,6 +394,7 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block)
387 size_t retlen; 394 size_t retlen;
388 struct inftl_unittail uci; 395 struct inftl_unittail uci;
389 struct erase_info *instr = &inftl->instr; 396 struct erase_info *instr = &inftl->instr;
397 struct mtd_info *mtd = inftl->mbd.mtd;
390 int physblock; 398 int physblock;
391 399
392 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_formatblock(inftl=%p," 400 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_formatblock(inftl=%p,"
@@ -404,8 +412,9 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block)
404 /* Erase one physical eraseblock at a time, even though the NAND api 412 /* Erase one physical eraseblock at a time, even though the NAND api
405 allows us to group them. This way we if we have a failure, we can 413 allows us to group them. This way we if we have a failure, we can
406 mark only the failed block in the bbt. */ 414 mark only the failed block in the bbt. */
407 for (physblock = 0; physblock < inftl->EraseSize; physblock += instr->len, instr->addr += instr->len) { 415 for (physblock = 0; physblock < inftl->EraseSize;
408 MTD_ERASE(inftl->mbd.mtd, instr); 416 physblock += instr->len, instr->addr += instr->len) {
417 mtd->erase(inftl->mbd.mtd, instr);
409 418
410 if (instr->state == MTD_ERASE_FAILED) { 419 if (instr->state == MTD_ERASE_FAILED) {
411 printk(KERN_WARNING "INFTL: error while formatting block %d\n", 420 printk(KERN_WARNING "INFTL: error while formatting block %d\n",
@@ -414,10 +423,10 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block)
414 } 423 }
415 424
416 /* 425 /*
417 * Check the "freeness" of Erase Unit before updating metadata. 426 * Check the "freeness" of Erase Unit before updating metadata.
418 * FixMe: is this check really necessary? Since we have check the 427 * FixMe: is this check really necessary? Since we have check
419 * return code after the erase operation. 428 * the return code after the erase operation.
420 */ 429 */
421 if (check_free_sectors(inftl, instr->addr, instr->len, 1) != 0) 430 if (check_free_sectors(inftl, instr->addr, instr->len, 1) != 0)
422 goto fail; 431 goto fail;
423 } 432 }
@@ -429,8 +438,7 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block)
429 uci.Reserved[2] = 0; 438 uci.Reserved[2] = 0;
430 uci.Reserved[3] = 0; 439 uci.Reserved[3] = 0;
431 instr->addr = block * inftl->EraseSize + SECTORSIZE * 2; 440 instr->addr = block * inftl->EraseSize + SECTORSIZE * 2;
432 if (MTD_WRITEOOB(inftl->mbd.mtd, instr->addr + 441 if (inftl_write_oob(mtd, instr->addr + 8, 8, &retlen, (char *)&uci) < 0)
433 8, 8, &retlen, (char *)&uci) < 0)
434 goto fail; 442 goto fail;
435 return 0; 443 return 0;
436fail: 444fail:
@@ -549,6 +557,7 @@ void INFTL_dumpVUchains(struct INFTLrecord *s)
549 557
550int INFTL_mount(struct INFTLrecord *s) 558int INFTL_mount(struct INFTLrecord *s)
551{ 559{
560 struct mtd_info *mtd = s->mbd.mtd;
552 unsigned int block, first_block, prev_block, last_block; 561 unsigned int block, first_block, prev_block, last_block;
553 unsigned int first_logical_block, logical_block, erase_mark; 562 unsigned int first_logical_block, logical_block, erase_mark;
554 int chain_length, do_format_chain; 563 int chain_length, do_format_chain;
@@ -607,10 +616,11 @@ int INFTL_mount(struct INFTLrecord *s)
607 break; 616 break;
608 } 617 }
609 618
610 if (MTD_READOOB(s->mbd.mtd, block * s->EraseSize + 8, 619 if (inftl_read_oob(mtd, block * s->EraseSize + 8,
611 8, &retlen, (char *)&h0) < 0 || 620 8, &retlen, (char *)&h0) < 0 ||
612 MTD_READOOB(s->mbd.mtd, block * s->EraseSize + 621 inftl_read_oob(mtd, block * s->EraseSize +
613 2 * SECTORSIZE + 8, 8, &retlen, (char *)&h1) < 0) { 622 2 * SECTORSIZE + 8, 8, &retlen,
623 (char *)&h1) < 0) {
614 /* Should never happen? */ 624 /* Should never happen? */
615 do_format_chain++; 625 do_format_chain++;
616 break; 626 break;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 7abd7fee0dda..6bdaacc6d6f9 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -37,7 +37,7 @@ config MTD_PHYSMAP_START
37config MTD_PHYSMAP_LEN 37config MTD_PHYSMAP_LEN
38 hex "Physical length of flash mapping" 38 hex "Physical length of flash mapping"
39 depends on MTD_PHYSMAP 39 depends on MTD_PHYSMAP
40 default "0x4000000" 40 default "0"
41 help 41 help
42 This is the total length of the mapping of the flash chips on 42 This is the total length of the mapping of the flash chips on
43 your particular board. If there is space, or aliases, in the 43 your particular board. If there is space, or aliases, in the
@@ -78,7 +78,7 @@ config MTD_PNC2000
78 78
79config MTD_SC520CDP 79config MTD_SC520CDP
80 tristate "CFI Flash device mapped on AMD SC520 CDP" 80 tristate "CFI Flash device mapped on AMD SC520 CDP"
81 depends on X86 && MTD_CFI 81 depends on X86 && MTD_CFI && MTD_CONCAT
82 help 82 help
83 The SC520 CDP board has two banks of CFI-compliant chips and one 83 The SC520 CDP board has two banks of CFI-compliant chips and one
84 Dual-in-line JEDEC chip. This 'mapping' driver supports that 84 Dual-in-line JEDEC chip. This 'mapping' driver supports that
@@ -109,7 +109,7 @@ config MTD_TS5500
109 mtd1 allows you to reprogram your BIOS. BE VERY CAREFUL. 109 mtd1 allows you to reprogram your BIOS. BE VERY CAREFUL.
110 110
111 Note that jumper 3 ("Write Enable Drive A") must be set 111 Note that jumper 3 ("Write Enable Drive A") must be set
112 otherwise detection won't succeeed. 112 otherwise detection won't succeed.
113 113
114config MTD_SBC_GXX 114config MTD_SBC_GXX
115 tristate "CFI Flash device mapped on Arcom SBC-GXx boards" 115 tristate "CFI Flash device mapped on Arcom SBC-GXx boards"
@@ -200,8 +200,8 @@ config MTD_TSUNAMI
200 Support for the flash chip on Tsunami TIG bus. 200 Support for the flash chip on Tsunami TIG bus.
201 201
202config MTD_LASAT 202config MTD_LASAT
203 tristate "Flash chips on LASAT board" 203 tristate "LASAT flash device"
204 depends on LASAT 204 depends on LASAT && MTD_CFI
205 help 205 help
206 Support for the flash chips on the Lasat 100 and 200 boards. 206 Support for the flash chips on the Lasat 100 and 200 boards.
207 207
@@ -561,7 +561,6 @@ config MTD_PCMCIA
561config MTD_PCMCIA_ANONYMOUS 561config MTD_PCMCIA_ANONYMOUS
562 bool "Use PCMCIA MTD drivers for anonymous PCMCIA cards" 562 bool "Use PCMCIA MTD drivers for anonymous PCMCIA cards"
563 depends on MTD_PCMCIA 563 depends on MTD_PCMCIA
564 default N
565 help 564 help
566 If this option is enabled, PCMCIA cards which do not report 565 If this option is enabled, PCMCIA cards which do not report
567 anything about themselves are assumed to be MTD cards. 566 anything about themselves are assumed to be MTD cards.
diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c
index fd0f0d3187de..92b5d883d7b0 100644
--- a/drivers/mtd/maps/cfi_flagadm.c
+++ b/drivers/mtd/maps/cfi_flagadm.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright © 2001 Flaga hf. Medical Devices, Kári Davíðsson <kd@flaga.is> 2 * Copyright © 2001 Flaga hf. Medical Devices, Kári Davíðsson <kd@flaga.is>
3 * 3 *
4 * $Id: cfi_flagadm.c,v 1.15 2005/11/07 11:14:26 gleixner Exp $ 4 * $Id: cfi_flagadm.c,v 1.15 2005/11/07 11:14:26 gleixner Exp $
5 * 5 *
@@ -135,5 +135,5 @@ module_exit(cleanup_flagadm);
135 135
136 136
137MODULE_LICENSE("GPL"); 137MODULE_LICENSE("GPL");
138MODULE_AUTHOR("Kári Davíðsson <kd@flaga.is>"); 138MODULE_AUTHOR("Kári Davíðsson <kd@flaga.is>");
139MODULE_DESCRIPTION("MTD map driver for Flaga digital module"); 139MODULE_DESCRIPTION("MTD map driver for Flaga digital module");
diff --git a/drivers/mtd/maps/dbox2-flash.c b/drivers/mtd/maps/dbox2-flash.c
index 652813cd6c2d..85c2a9e22b1e 100644
--- a/drivers/mtd/maps/dbox2-flash.c
+++ b/drivers/mtd/maps/dbox2-flash.c
@@ -122,5 +122,5 @@ module_exit(cleanup_dbox2_flash);
122 122
123 123
124MODULE_LICENSE("GPL"); 124MODULE_LICENSE("GPL");
125MODULE_AUTHOR("Kári Davíðsson <kd@flaga.is>, Bastian Blank <waldi@tuxbox.org>, Alexander Wild <wild@te-elektronik.com>"); 125MODULE_AUTHOR("Kári Davíðsson <kd@flaga.is>, Bastian Blank <waldi@tuxbox.org>, Alexander Wild <wild@te-elektronik.com>");
126MODULE_DESCRIPTION("MTD map driver for D-Box 2 board"); 126MODULE_DESCRIPTION("MTD map driver for D-Box 2 board");
diff --git a/drivers/mtd/maps/mtx-1_flash.c b/drivers/mtd/maps/mtx-1_flash.c
index d1e66e186746..5c25d4e552c6 100644
--- a/drivers/mtd/maps/mtx-1_flash.c
+++ b/drivers/mtd/maps/mtx-1_flash.c
@@ -4,7 +4,7 @@
4 * $Id: mtx-1_flash.c,v 1.2 2005/11/07 11:14:27 gleixner Exp $ 4 * $Id: mtx-1_flash.c,v 1.2 2005/11/07 11:14:27 gleixner Exp $
5 * 5 *
6 * (C) 2005 Bruno Randolf <bruno.randolf@4g-systems.biz> 6 * (C) 2005 Bruno Randolf <bruno.randolf@4g-systems.biz>
7 * (C) 2005 Jörn Engel <joern@wohnheim.fh-wedel.de> 7 * (C) 2005 Jörn Engel <joern@wohnheim.fh-wedel.de>
8 * 8 *
9 */ 9 */
10 10
diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c
index 54a3102ab19a..0994b5b2e331 100644
--- a/drivers/mtd/maps/nettel.c
+++ b/drivers/mtd/maps/nettel.c
@@ -20,6 +20,8 @@
20#include <linux/mtd/partitions.h> 20#include <linux/mtd/partitions.h>
21#include <linux/mtd/cfi.h> 21#include <linux/mtd/cfi.h>
22#include <linux/reboot.h> 22#include <linux/reboot.h>
23#include <linux/kdev_t.h>
24#include <linux/root_dev.h>
23#include <asm/io.h> 25#include <asm/io.h>
24 26
25/****************************************************************************/ 27/****************************************************************************/
@@ -188,7 +190,7 @@ int nettel_eraseconfig(void)
188 set_current_state(TASK_INTERRUPTIBLE); 190 set_current_state(TASK_INTERRUPTIBLE);
189 add_wait_queue(&wait_q, &wait); 191 add_wait_queue(&wait_q, &wait);
190 192
191 ret = MTD_ERASE(mtd, &nettel_erase); 193 ret = mtd->erase(mtd, &nettel_erase);
192 if (ret) { 194 if (ret) {
193 set_current_state(TASK_RUNNING); 195 set_current_state(TASK_RUNNING);
194 remove_wait_queue(&wait_q, &wait); 196 remove_wait_queue(&wait_q, &wait);
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index d27f4129afd3..c861134cbc48 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -713,6 +713,7 @@ static void pcmciamtd_detach(struct pcmcia_device *link)
713 713
714 if(dev->mtd_info) { 714 if(dev->mtd_info) {
715 del_mtd_device(dev->mtd_info); 715 del_mtd_device(dev->mtd_info);
716 map_destroy(dev->mtd_info);
716 info("mtd%d: Removed", dev->mtd_info->index); 717 info("mtd%d: Removed", dev->mtd_info->index);
717 } 718 }
718 719
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index f49ebc3c4606..433c3cac3ca9 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -14,112 +14,229 @@
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <asm/io.h> 17#include <linux/device.h>
18#include <linux/platform_device.h>
18#include <linux/mtd/mtd.h> 19#include <linux/mtd/mtd.h>
19#include <linux/mtd/map.h> 20#include <linux/mtd/map.h>
20#include <linux/config.h> 21#include <linux/config.h>
21#include <linux/mtd/partitions.h> 22#include <linux/mtd/partitions.h>
22#include <linux/mtd/physmap.h> 23#include <linux/mtd/physmap.h>
24#include <asm/io.h>
23 25
24static struct mtd_info *mymtd; 26struct physmap_flash_info {
25 27 struct mtd_info *mtd;
26struct map_info physmap_map = { 28 struct map_info map;
27 .name = "phys_mapped_flash", 29 struct resource *res;
28 .phys = CONFIG_MTD_PHYSMAP_START, 30#ifdef CONFIG_MTD_PARTITIONS
29 .size = CONFIG_MTD_PHYSMAP_LEN, 31 int nr_parts;
30 .bankwidth = CONFIG_MTD_PHYSMAP_BANKWIDTH, 32 struct mtd_partition *parts;
33#endif
31}; 34};
32 35
36
37static int physmap_flash_remove(struct platform_device *dev)
38{
39 struct physmap_flash_info *info;
40 struct physmap_flash_data *physmap_data;
41
42 info = platform_get_drvdata(dev);
43 if (info == NULL)
44 return 0;
45 platform_set_drvdata(dev, NULL);
46
47 physmap_data = dev->dev.platform_data;
48
49 if (info->mtd != NULL) {
33#ifdef CONFIG_MTD_PARTITIONS 50#ifdef CONFIG_MTD_PARTITIONS
34static struct mtd_partition *mtd_parts; 51 if (info->nr_parts) {
35static int mtd_parts_nb; 52 del_mtd_partitions(info->mtd);
53 kfree(info->parts);
54 } else if (physmap_data->nr_parts) {
55 del_mtd_partitions(info->mtd);
56 } else {
57 del_mtd_device(info->mtd);
58 }
59#else
60 del_mtd_device(info->mtd);
61#endif
62 map_destroy(info->mtd);
63 }
36 64
37static int num_physmap_partitions; 65 if (info->map.virt != NULL)
38static struct mtd_partition *physmap_partitions; 66 iounmap((void *)info->map.virt);
39 67
40static const char *part_probes[] __initdata = {"cmdlinepart", "RedBoot", NULL}; 68 if (info->res != NULL) {
69 release_resource(info->res);
70 kfree(info->res);
71 }
41 72
42void physmap_set_partitions(struct mtd_partition *parts, int num_parts) 73 return 0;
43{
44 physmap_partitions=parts;
45 num_physmap_partitions=num_parts;
46} 74}
47#endif /* CONFIG_MTD_PARTITIONS */
48 75
49static int __init init_physmap(void) 76static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL };
77#ifdef CONFIG_MTD_PARTITIONS
78static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL };
79#endif
80
81static int physmap_flash_probe(struct platform_device *dev)
50{ 82{
51 static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL }; 83 struct physmap_flash_data *physmap_data;
52 const char **type; 84 struct physmap_flash_info *info;
85 const char **probe_type;
86 int err;
87
88 physmap_data = dev->dev.platform_data;
89 if (physmap_data == NULL)
90 return -ENODEV;
91
92 printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n",
93 (unsigned long long)dev->resource->end - dev->resource->start + 1,
94 (unsigned long long)dev->resource->start);
95
96 info = kmalloc(sizeof(struct physmap_flash_info), GFP_KERNEL);
97 if (info == NULL) {
98 err = -ENOMEM;
99 goto err_out;
100 }
101 memset(info, 0, sizeof(*info));
53 102
54 printk(KERN_NOTICE "physmap flash device: %lx at %lx\n", physmap_map.size, physmap_map.phys); 103 platform_set_drvdata(dev, info);
55 physmap_map.virt = ioremap(physmap_map.phys, physmap_map.size);
56 104
57 if (!physmap_map.virt) { 105 info->res = request_mem_region(dev->resource->start,
58 printk("Failed to ioremap\n"); 106 dev->resource->end - dev->resource->start + 1,
59 return -EIO; 107 dev->dev.bus_id);
108 if (info->res == NULL) {
109 dev_err(&dev->dev, "Could not reserve memory region\n");
110 err = -ENOMEM;
111 goto err_out;
60 } 112 }
61 113
62 simple_map_init(&physmap_map); 114 info->map.name = dev->dev.bus_id;
115 info->map.phys = dev->resource->start;
116 info->map.size = dev->resource->end - dev->resource->start + 1;
117 info->map.bankwidth = physmap_data->width;
118 info->map.set_vpp = physmap_data->set_vpp;
119
120 info->map.virt = ioremap(info->map.phys, info->map.size);
121 if (info->map.virt == NULL) {
122 dev_err(&dev->dev, "Failed to ioremap flash region\n");
123 err = EIO;
124 goto err_out;
125 }
63 126
64 mymtd = NULL; 127 simple_map_init(&info->map);
65 type = rom_probe_types; 128
66 for(; !mymtd && *type; type++) { 129 probe_type = rom_probe_types;
67 mymtd = do_map_probe(*type, &physmap_map); 130 for (; info->mtd == NULL && *probe_type != NULL; probe_type++)
131 info->mtd = do_map_probe(*probe_type, &info->map);
132 if (info->mtd == NULL) {
133 dev_err(&dev->dev, "map_probe failed\n");
134 err = -ENXIO;
135 goto err_out;
68 } 136 }
69 if (mymtd) { 137 info->mtd->owner = THIS_MODULE;
70 mymtd->owner = THIS_MODULE;
71 138
72#ifdef CONFIG_MTD_PARTITIONS 139#ifdef CONFIG_MTD_PARTITIONS
73 mtd_parts_nb = parse_mtd_partitions(mymtd, part_probes, 140 err = parse_mtd_partitions(info->mtd, part_probe_types, &info->parts, 0);
74 &mtd_parts, 0); 141 if (err > 0) {
142 add_mtd_partitions(info->mtd, info->parts, err);
143 return 0;
144 }
75 145
76 if (mtd_parts_nb > 0) 146 if (physmap_data->nr_parts) {
77 { 147 printk(KERN_NOTICE "Using physmap partition information\n");
78 add_mtd_partitions (mymtd, mtd_parts, mtd_parts_nb); 148 add_mtd_partitions(info->mtd, physmap_data->parts,
79 return 0; 149 physmap_data->nr_parts);
80 } 150 return 0;
151 }
152#endif
153
154 add_mtd_device(info->mtd);
155 return 0;
156
157err_out:
158 physmap_flash_remove(dev);
159 return err;
160}
161
162static struct platform_driver physmap_flash_driver = {
163 .probe = physmap_flash_probe,
164 .remove = physmap_flash_remove,
165 .driver = {
166 .name = "physmap-flash",
167 },
168};
81 169
82 if (num_physmap_partitions != 0)
83 {
84 printk(KERN_NOTICE
85 "Using physmap partition definition\n");
86 add_mtd_partitions (mymtd, physmap_partitions, num_physmap_partitions);
87 return 0;
88 }
89 170
171#ifdef CONFIG_MTD_PHYSMAP_LEN
172#if CONFIG_MTD_PHYSMAP_LEN != 0
173#warning using PHYSMAP compat code
174#define PHYSMAP_COMPAT
175#endif
90#endif 176#endif
91 add_mtd_device(mymtd);
92 177
93 return 0; 178#ifdef PHYSMAP_COMPAT
94 } 179static struct physmap_flash_data physmap_flash_data = {
180 .width = CONFIG_MTD_PHYSMAP_BANKWIDTH,
181};
95 182
96 iounmap(physmap_map.virt); 183static struct resource physmap_flash_resource = {
97 return -ENXIO; 184 .start = CONFIG_MTD_PHYSMAP_START,
98} 185 .end = CONFIG_MTD_PHYSMAP_START + CONFIG_MTD_PHYSMAP_LEN,
186 .flags = IORESOURCE_MEM,
187};
99 188
100static void __exit cleanup_physmap(void) 189static struct platform_device physmap_flash = {
190 .name = "physmap-flash",
191 .id = 0,
192 .dev = {
193 .platform_data = &physmap_flash_data,
194 },
195 .num_resources = 1,
196 .resource = &physmap_flash_resource,
197};
198
199void physmap_configure(unsigned long addr, unsigned long size,
200 int bankwidth, void (*set_vpp)(struct map_info *, int))
101{ 201{
202 physmap_flash_resource.start = addr;
203 physmap_flash_resource.end = addr + size - 1;
204 physmap_flash_data.width = bankwidth;
205 physmap_flash_data.set_vpp = set_vpp;
206}
207
102#ifdef CONFIG_MTD_PARTITIONS 208#ifdef CONFIG_MTD_PARTITIONS
103 if (mtd_parts_nb) { 209void physmap_set_partitions(struct mtd_partition *parts, int num_parts)
104 del_mtd_partitions(mymtd); 210{
105 kfree(mtd_parts); 211 physmap_flash_data.nr_parts = num_parts;
106 } else if (num_physmap_partitions) { 212 physmap_flash_data.parts = parts;
107 del_mtd_partitions(mymtd); 213}
108 } else { 214#endif
109 del_mtd_device(mymtd);
110 }
111#else
112 del_mtd_device(mymtd);
113#endif 215#endif
114 map_destroy(mymtd);
115 216
116 iounmap(physmap_map.virt); 217static int __init physmap_init(void)
117 physmap_map.virt = NULL; 218{
219 int err;
220
221 err = platform_driver_register(&physmap_flash_driver);
222#ifdef PHYSMAP_COMPAT
223 if (err == 0)
224 platform_device_register(&physmap_flash);
225#endif
226
227 return err;
118} 228}
119 229
120module_init(init_physmap); 230static void __exit physmap_exit(void)
121module_exit(cleanup_physmap); 231{
232#ifdef PHYSMAP_COMPAT
233 platform_device_unregister(&physmap_flash);
234#endif
235 platform_driver_unregister(&physmap_flash_driver);
236}
122 237
238module_init(physmap_init);
239module_exit(physmap_exit);
123 240
124MODULE_LICENSE("GPL"); 241MODULE_LICENSE("GPL");
125MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 242MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 2cef280e388c..e5c78463ebfd 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -71,7 +71,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
71 set_current_state(TASK_INTERRUPTIBLE); 71 set_current_state(TASK_INTERRUPTIBLE);
72 add_wait_queue(&wait_q, &wait); 72 add_wait_queue(&wait_q, &wait);
73 73
74 ret = MTD_ERASE(mtd, &erase); 74 ret = mtd->erase(mtd, &erase);
75 if (ret) { 75 if (ret) {
76 set_current_state(TASK_RUNNING); 76 set_current_state(TASK_RUNNING);
77 remove_wait_queue(&wait_q, &wait); 77 remove_wait_queue(&wait_q, &wait);
@@ -88,7 +88,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
88 * Next, writhe data to flash. 88 * Next, writhe data to flash.
89 */ 89 */
90 90
91 ret = MTD_WRITE (mtd, pos, len, &retlen, buf); 91 ret = mtd->write(mtd, pos, len, &retlen, buf);
92 if (ret) 92 if (ret)
93 return ret; 93 return ret;
94 if (retlen != len) 94 if (retlen != len)
@@ -138,7 +138,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
138 mtd->name, pos, len); 138 mtd->name, pos, len);
139 139
140 if (!sect_size) 140 if (!sect_size)
141 return MTD_WRITE (mtd, pos, len, &retlen, buf); 141 return mtd->write(mtd, pos, len, &retlen, buf);
142 142
143 while (len > 0) { 143 while (len > 0) {
144 unsigned long sect_start = (pos/sect_size)*sect_size; 144 unsigned long sect_start = (pos/sect_size)*sect_size;
@@ -170,7 +170,8 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
170 mtdblk->cache_offset != sect_start) { 170 mtdblk->cache_offset != sect_start) {
171 /* fill the cache with the current sector */ 171 /* fill the cache with the current sector */
172 mtdblk->cache_state = STATE_EMPTY; 172 mtdblk->cache_state = STATE_EMPTY;
173 ret = MTD_READ(mtd, sect_start, sect_size, &retlen, mtdblk->cache_data); 173 ret = mtd->read(mtd, sect_start, sect_size,
174 &retlen, mtdblk->cache_data);
174 if (ret) 175 if (ret)
175 return ret; 176 return ret;
176 if (retlen != sect_size) 177 if (retlen != sect_size)
@@ -207,7 +208,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
207 mtd->name, pos, len); 208 mtd->name, pos, len);
208 209
209 if (!sect_size) 210 if (!sect_size)
210 return MTD_READ (mtd, pos, len, &retlen, buf); 211 return mtd->read(mtd, pos, len, &retlen, buf);
211 212
212 while (len > 0) { 213 while (len > 0) {
213 unsigned long sect_start = (pos/sect_size)*sect_size; 214 unsigned long sect_start = (pos/sect_size)*sect_size;
@@ -226,7 +227,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
226 mtdblk->cache_offset == sect_start) { 227 mtdblk->cache_offset == sect_start) {
227 memcpy (buf, mtdblk->cache_data + offset, size); 228 memcpy (buf, mtdblk->cache_data + offset, size);
228 } else { 229 } else {
229 ret = MTD_READ (mtd, pos, size, &retlen, buf); 230 ret = mtd->read(mtd, pos, size, &retlen, buf);
230 if (ret) 231 if (ret)
231 return ret; 232 return ret;
232 if (retlen != size) 233 if (retlen != size)
@@ -288,8 +289,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
288 289
289 mutex_init(&mtdblk->cache_mutex); 290 mutex_init(&mtdblk->cache_mutex);
290 mtdblk->cache_state = STATE_EMPTY; 291 mtdblk->cache_state = STATE_EMPTY;
291 if ((mtdblk->mtd->flags & MTD_CAP_RAM) != MTD_CAP_RAM && 292 if ( !(mtdblk->mtd->flags & MTD_NO_ERASE) && mtdblk->mtd->erasesize) {
292 mtdblk->mtd->erasesize) {
293 mtdblk->cache_size = mtdblk->mtd->erasesize; 293 mtdblk->cache_size = mtdblk->mtd->erasesize;
294 mtdblk->cache_data = NULL; 294 mtdblk->cache_data = NULL;
295 } 295 }
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index 0c830ba41ef0..29563ed258a4 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -45,9 +45,7 @@ static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
45 dev->blksize = 512; 45 dev->blksize = 512;
46 dev->size = mtd->size >> 9; 46 dev->size = mtd->size >> 9;
47 dev->tr = tr; 47 dev->tr = tr;
48 if ((mtd->flags & (MTD_CLEAR_BITS|MTD_SET_BITS|MTD_WRITEABLE)) != 48 dev->readonly = 1;
49 (MTD_CLEAR_BITS|MTD_SET_BITS|MTD_WRITEABLE))
50 dev->readonly = 1;
51 49
52 add_mtd_blktrans_dev(dev); 50 add_mtd_blktrans_dev(dev);
53} 51}
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 6f044584bdc6..aa18d45b264b 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -49,24 +49,18 @@ static struct mtd_notifier notifier = {
49}; 49};
50 50
51/* 51/*
52 * We use file->private_data to store a pointer to the MTDdevice. 52 * Data structure to hold the pointer to the mtd device as well
53 * Since alighment is at least 32 bits, we have 2 bits free for OTP 53 * as mode information ofr various use cases.
54 * modes as well.
55 */ 54 */
56 55struct mtd_file_info {
57#define TO_MTD(file) (struct mtd_info *)((long)((file)->private_data) & ~3L) 56 struct mtd_info *mtd;
58 57 enum mtd_file_modes mode;
59#define MTD_MODE_OTP_FACT 1 58};
60#define MTD_MODE_OTP_USER 2
61#define MTD_MODE(file) ((long)((file)->private_data) & 3)
62
63#define SET_MTD_MODE(file, mode) \
64 do { long __p = (long)((file)->private_data); \
65 (file)->private_data = (void *)((__p & ~3L) | mode); } while (0)
66 59
67static loff_t mtd_lseek (struct file *file, loff_t offset, int orig) 60static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
68{ 61{
69 struct mtd_info *mtd = TO_MTD(file); 62 struct mtd_file_info *mfi = file->private_data;
63 struct mtd_info *mtd = mfi->mtd;
70 64
71 switch (orig) { 65 switch (orig) {
72 case 0: 66 case 0:
@@ -97,6 +91,7 @@ static int mtd_open(struct inode *inode, struct file *file)
97 int minor = iminor(inode); 91 int minor = iminor(inode);
98 int devnum = minor >> 1; 92 int devnum = minor >> 1;
99 struct mtd_info *mtd; 93 struct mtd_info *mtd;
94 struct mtd_file_info *mfi;
100 95
101 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n"); 96 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
102 97
@@ -117,14 +112,20 @@ static int mtd_open(struct inode *inode, struct file *file)
117 return -ENODEV; 112 return -ENODEV;
118 } 113 }
119 114
120 file->private_data = mtd;
121
122 /* You can't open it RW if it's not a writeable device */ 115 /* You can't open it RW if it's not a writeable device */
123 if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) { 116 if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) {
124 put_mtd_device(mtd); 117 put_mtd_device(mtd);
125 return -EACCES; 118 return -EACCES;
126 } 119 }
127 120
121 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
122 if (!mfi) {
123 put_mtd_device(mtd);
124 return -ENOMEM;
125 }
126 mfi->mtd = mtd;
127 file->private_data = mfi;
128
128 return 0; 129 return 0;
129} /* mtd_open */ 130} /* mtd_open */
130 131
@@ -132,16 +133,17 @@ static int mtd_open(struct inode *inode, struct file *file)
132 133
133static int mtd_close(struct inode *inode, struct file *file) 134static int mtd_close(struct inode *inode, struct file *file)
134{ 135{
135 struct mtd_info *mtd; 136 struct mtd_file_info *mfi = file->private_data;
137 struct mtd_info *mtd = mfi->mtd;
136 138
137 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n"); 139 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
138 140
139 mtd = TO_MTD(file);
140
141 if (mtd->sync) 141 if (mtd->sync)
142 mtd->sync(mtd); 142 mtd->sync(mtd);
143 143
144 put_mtd_device(mtd); 144 put_mtd_device(mtd);
145 file->private_data = NULL;
146 kfree(mfi);
145 147
146 return 0; 148 return 0;
147} /* mtd_close */ 149} /* mtd_close */
@@ -153,7 +155,8 @@ static int mtd_close(struct inode *inode, struct file *file)
153 155
154static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos) 156static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
155{ 157{
156 struct mtd_info *mtd = TO_MTD(file); 158 struct mtd_file_info *mfi = file->private_data;
159 struct mtd_info *mtd = mfi->mtd;
157 size_t retlen=0; 160 size_t retlen=0;
158 size_t total_retlen=0; 161 size_t total_retlen=0;
159 int ret=0; 162 int ret=0;
@@ -170,36 +173,58 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
170 173
171 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers 174 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
172 and pass them directly to the MTD functions */ 175 and pass them directly to the MTD functions */
176
177 if (count > MAX_KMALLOC_SIZE)
178 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
179 else
180 kbuf=kmalloc(count, GFP_KERNEL);
181
182 if (!kbuf)
183 return -ENOMEM;
184
173 while (count) { 185 while (count) {
186
174 if (count > MAX_KMALLOC_SIZE) 187 if (count > MAX_KMALLOC_SIZE)
175 len = MAX_KMALLOC_SIZE; 188 len = MAX_KMALLOC_SIZE;
176 else 189 else
177 len = count; 190 len = count;
178 191
179 kbuf=kmalloc(len,GFP_KERNEL); 192 switch (mfi->mode) {
180 if (!kbuf) 193 case MTD_MODE_OTP_FACTORY:
181 return -ENOMEM;
182
183 switch (MTD_MODE(file)) {
184 case MTD_MODE_OTP_FACT:
185 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf); 194 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
186 break; 195 break;
187 case MTD_MODE_OTP_USER: 196 case MTD_MODE_OTP_USER:
188 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 197 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
189 break; 198 break;
199 case MTD_MODE_RAW:
200 {
201 struct mtd_oob_ops ops;
202
203 ops.mode = MTD_OOB_RAW;
204 ops.datbuf = kbuf;
205 ops.oobbuf = NULL;
206 ops.len = len;
207
208 ret = mtd->read_oob(mtd, *ppos, &ops);
209 retlen = ops.retlen;
210 break;
211 }
190 default: 212 default:
191 ret = MTD_READ(mtd, *ppos, len, &retlen, kbuf); 213 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
192 } 214 }
193 /* Nand returns -EBADMSG on ecc errors, but it returns 215 /* Nand returns -EBADMSG on ecc errors, but it returns
194 * the data. For our userspace tools it is important 216 * the data. For our userspace tools it is important
195 * to dump areas with ecc errors ! 217 * to dump areas with ecc errors !
218 * For kernel internal usage it also might return -EUCLEAN
219 * to signal the caller that a bitflip has occured and has
220 * been corrected by the ECC algorithm.
196 * Userspace software which accesses NAND this way 221 * Userspace software which accesses NAND this way
197 * must be aware of the fact that it deals with NAND 222 * must be aware of the fact that it deals with NAND
198 */ 223 */
199 if (!ret || (ret == -EBADMSG)) { 224 if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
200 *ppos += retlen; 225 *ppos += retlen;
201 if (copy_to_user(buf, kbuf, retlen)) { 226 if (copy_to_user(buf, kbuf, retlen)) {
202 kfree(kbuf); 227 kfree(kbuf);
203 return -EFAULT; 228 return -EFAULT;
204 } 229 }
205 else 230 else
@@ -215,15 +240,16 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
215 return ret; 240 return ret;
216 } 241 }
217 242
218 kfree(kbuf);
219 } 243 }
220 244
245 kfree(kbuf);
221 return total_retlen; 246 return total_retlen;
222} /* mtd_read */ 247} /* mtd_read */
223 248
224static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos) 249static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
225{ 250{
226 struct mtd_info *mtd = TO_MTD(file); 251 struct mtd_file_info *mfi = file->private_data;
252 struct mtd_info *mtd = mfi->mtd;
227 char *kbuf; 253 char *kbuf;
228 size_t retlen; 254 size_t retlen;
229 size_t total_retlen=0; 255 size_t total_retlen=0;
@@ -241,25 +267,28 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
241 if (!count) 267 if (!count)
242 return 0; 268 return 0;
243 269
270 if (count > MAX_KMALLOC_SIZE)
271 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
272 else
273 kbuf=kmalloc(count, GFP_KERNEL);
274
275 if (!kbuf)
276 return -ENOMEM;
277
244 while (count) { 278 while (count) {
279
245 if (count > MAX_KMALLOC_SIZE) 280 if (count > MAX_KMALLOC_SIZE)
246 len = MAX_KMALLOC_SIZE; 281 len = MAX_KMALLOC_SIZE;
247 else 282 else
248 len = count; 283 len = count;
249 284
250 kbuf=kmalloc(len,GFP_KERNEL);
251 if (!kbuf) {
252 printk("kmalloc is null\n");
253 return -ENOMEM;
254 }
255
256 if (copy_from_user(kbuf, buf, len)) { 285 if (copy_from_user(kbuf, buf, len)) {
257 kfree(kbuf); 286 kfree(kbuf);
258 return -EFAULT; 287 return -EFAULT;
259 } 288 }
260 289
261 switch (MTD_MODE(file)) { 290 switch (mfi->mode) {
262 case MTD_MODE_OTP_FACT: 291 case MTD_MODE_OTP_FACTORY:
263 ret = -EROFS; 292 ret = -EROFS;
264 break; 293 break;
265 case MTD_MODE_OTP_USER: 294 case MTD_MODE_OTP_USER:
@@ -269,6 +298,21 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
269 } 298 }
270 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf); 299 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
271 break; 300 break;
301
302 case MTD_MODE_RAW:
303 {
304 struct mtd_oob_ops ops;
305
306 ops.mode = MTD_OOB_RAW;
307 ops.datbuf = kbuf;
308 ops.oobbuf = NULL;
309 ops.len = len;
310
311 ret = mtd->write_oob(mtd, *ppos, &ops);
312 retlen = ops.retlen;
313 break;
314 }
315
272 default: 316 default:
273 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf); 317 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
274 } 318 }
@@ -282,10 +326,9 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
282 kfree(kbuf); 326 kfree(kbuf);
283 return ret; 327 return ret;
284 } 328 }
285
286 kfree(kbuf);
287 } 329 }
288 330
331 kfree(kbuf);
289 return total_retlen; 332 return total_retlen;
290} /* mtd_write */ 333} /* mtd_write */
291 334
@@ -299,13 +342,45 @@ static void mtdchar_erase_callback (struct erase_info *instr)
299 wake_up((wait_queue_head_t *)instr->priv); 342 wake_up((wait_queue_head_t *)instr->priv);
300} 343}
301 344
345#if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP)
346static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
347{
348 struct mtd_info *mtd = mfi->mtd;
349 int ret = 0;
350
351 switch (mode) {
352 case MTD_OTP_FACTORY:
353 if (!mtd->read_fact_prot_reg)
354 ret = -EOPNOTSUPP;
355 else
356 mfi->mode = MTD_MODE_OTP_FACTORY;
357 break;
358 case MTD_OTP_USER:
359 if (!mtd->read_fact_prot_reg)
360 ret = -EOPNOTSUPP;
361 else
362 mfi->mode = MTD_MODE_OTP_USER;
363 break;
364 default:
365 ret = -EINVAL;
366 case MTD_OTP_OFF:
367 break;
368 }
369 return ret;
370}
371#else
372# define otp_select_filemode(f,m) -EOPNOTSUPP
373#endif
374
302static int mtd_ioctl(struct inode *inode, struct file *file, 375static int mtd_ioctl(struct inode *inode, struct file *file,
303 u_int cmd, u_long arg) 376 u_int cmd, u_long arg)
304{ 377{
305 struct mtd_info *mtd = TO_MTD(file); 378 struct mtd_file_info *mfi = file->private_data;
379 struct mtd_info *mtd = mfi->mtd;
306 void __user *argp = (void __user *)arg; 380 void __user *argp = (void __user *)arg;
307 int ret = 0; 381 int ret = 0;
308 u_long size; 382 u_long size;
383 struct mtd_info_user info;
309 384
310 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n"); 385 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
311 386
@@ -341,7 +416,15 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
341 } 416 }
342 417
343 case MEMGETINFO: 418 case MEMGETINFO:
344 if (copy_to_user(argp, mtd, sizeof(struct mtd_info_user))) 419 info.type = mtd->type;
420 info.flags = mtd->flags;
421 info.size = mtd->size;
422 info.erasesize = mtd->erasesize;
423 info.writesize = mtd->writesize;
424 info.oobsize = mtd->oobsize;
425 info.ecctype = mtd->ecctype;
426 info.eccsize = mtd->eccsize;
427 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
345 return -EFAULT; 428 return -EFAULT;
346 break; 429 break;
347 430
@@ -400,8 +483,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
400 case MEMWRITEOOB: 483 case MEMWRITEOOB:
401 { 484 {
402 struct mtd_oob_buf buf; 485 struct mtd_oob_buf buf;
403 void *databuf; 486 struct mtd_oob_ops ops;
404 ssize_t retlen;
405 487
406 if(!(file->f_mode & 2)) 488 if(!(file->f_mode & 2))
407 return -EPERM; 489 return -EPERM;
@@ -409,7 +491,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
409 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf))) 491 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
410 return -EFAULT; 492 return -EFAULT;
411 493
412 if (buf.length > 0x4096) 494 if (buf.length > 4096)
413 return -EINVAL; 495 return -EINVAL;
414 496
415 if (!mtd->write_oob) 497 if (!mtd->write_oob)
@@ -421,21 +503,32 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
421 if (ret) 503 if (ret)
422 return ret; 504 return ret;
423 505
424 databuf = kmalloc(buf.length, GFP_KERNEL); 506 ops.len = buf.length;
425 if (!databuf) 507 ops.ooblen = buf.length;
508 ops.ooboffs = buf.start & (mtd->oobsize - 1);
509 ops.datbuf = NULL;
510 ops.mode = MTD_OOB_PLACE;
511
512 if (ops.ooboffs && ops.len > (mtd->oobsize - ops.ooboffs))
513 return -EINVAL;
514
515 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
516 if (!ops.oobbuf)
426 return -ENOMEM; 517 return -ENOMEM;
427 518
428 if (copy_from_user(databuf, buf.ptr, buf.length)) { 519 if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) {
429 kfree(databuf); 520 kfree(ops.oobbuf);
430 return -EFAULT; 521 return -EFAULT;
431 } 522 }
432 523
433 ret = (mtd->write_oob)(mtd, buf.start, buf.length, &retlen, databuf); 524 buf.start &= ~(mtd->oobsize - 1);
525 ret = mtd->write_oob(mtd, buf.start, &ops);
434 526
435 if (copy_to_user(argp + sizeof(uint32_t), &retlen, sizeof(uint32_t))) 527 if (copy_to_user(argp + sizeof(uint32_t), &ops.retlen,
528 sizeof(uint32_t)))
436 ret = -EFAULT; 529 ret = -EFAULT;
437 530
438 kfree(databuf); 531 kfree(ops.oobbuf);
439 break; 532 break;
440 533
441 } 534 }
@@ -443,13 +536,12 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
443 case MEMREADOOB: 536 case MEMREADOOB:
444 { 537 {
445 struct mtd_oob_buf buf; 538 struct mtd_oob_buf buf;
446 void *databuf; 539 struct mtd_oob_ops ops;
447 ssize_t retlen;
448 540
449 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf))) 541 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
450 return -EFAULT; 542 return -EFAULT;
451 543
452 if (buf.length > 0x4096) 544 if (buf.length > 4096)
453 return -EINVAL; 545 return -EINVAL;
454 546
455 if (!mtd->read_oob) 547 if (!mtd->read_oob)
@@ -457,22 +549,32 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
457 else 549 else
458 ret = access_ok(VERIFY_WRITE, buf.ptr, 550 ret = access_ok(VERIFY_WRITE, buf.ptr,
459 buf.length) ? 0 : -EFAULT; 551 buf.length) ? 0 : -EFAULT;
460
461 if (ret) 552 if (ret)
462 return ret; 553 return ret;
463 554
464 databuf = kmalloc(buf.length, GFP_KERNEL); 555 ops.len = buf.length;
465 if (!databuf) 556 ops.ooblen = buf.length;
557 ops.ooboffs = buf.start & (mtd->oobsize - 1);
558 ops.datbuf = NULL;
559 ops.mode = MTD_OOB_PLACE;
560
561 if (ops.ooboffs && ops.len > (mtd->oobsize - ops.ooboffs))
562 return -EINVAL;
563
564 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
565 if (!ops.oobbuf)
466 return -ENOMEM; 566 return -ENOMEM;
467 567
468 ret = (mtd->read_oob)(mtd, buf.start, buf.length, &retlen, databuf); 568 buf.start &= ~(mtd->oobsize - 1);
569 ret = mtd->read_oob(mtd, buf.start, &ops);
469 570
470 if (put_user(retlen, (uint32_t __user *)argp)) 571 if (put_user(ops.retlen, (uint32_t __user *)argp))
471 ret = -EFAULT; 572 ret = -EFAULT;
472 else if (retlen && copy_to_user(buf.ptr, databuf, retlen)) 573 else if (ops.retlen && copy_to_user(buf.ptr, ops.oobbuf,
574 ops.retlen))
473 ret = -EFAULT; 575 ret = -EFAULT;
474 576
475 kfree(databuf); 577 kfree(ops.oobbuf);
476 break; 578 break;
477 } 579 }
478 580
@@ -504,16 +606,22 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
504 break; 606 break;
505 } 607 }
506 608
507 case MEMSETOOBSEL: 609 /* Legacy interface */
508 {
509 if (copy_from_user(&mtd->oobinfo, argp, sizeof(struct nand_oobinfo)))
510 return -EFAULT;
511 break;
512 }
513
514 case MEMGETOOBSEL: 610 case MEMGETOOBSEL:
515 { 611 {
516 if (copy_to_user(argp, &(mtd->oobinfo), sizeof(struct nand_oobinfo))) 612 struct nand_oobinfo oi;
613
614 if (!mtd->ecclayout)
615 return -EOPNOTSUPP;
616 if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
617 return -EINVAL;
618
619 oi.useecc = MTD_NANDECC_AUTOPLACE;
620 memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
621 memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
622 sizeof(oi.oobfree));
623
624 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
517 return -EFAULT; 625 return -EFAULT;
518 break; 626 break;
519 } 627 }
@@ -544,31 +652,17 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
544 break; 652 break;
545 } 653 }
546 654
547#ifdef CONFIG_MTD_OTP 655#if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP)
548 case OTPSELECT: 656 case OTPSELECT:
549 { 657 {
550 int mode; 658 int mode;
551 if (copy_from_user(&mode, argp, sizeof(int))) 659 if (copy_from_user(&mode, argp, sizeof(int)))
552 return -EFAULT; 660 return -EFAULT;
553 SET_MTD_MODE(file, 0); 661
554 switch (mode) { 662 mfi->mode = MTD_MODE_NORMAL;
555 case MTD_OTP_FACTORY: 663
556 if (!mtd->read_fact_prot_reg) 664 ret = otp_select_filemode(mfi, mode);
557 ret = -EOPNOTSUPP; 665
558 else
559 SET_MTD_MODE(file, MTD_MODE_OTP_FACT);
560 break;
561 case MTD_OTP_USER:
562 if (!mtd->read_fact_prot_reg)
563 ret = -EOPNOTSUPP;
564 else
565 SET_MTD_MODE(file, MTD_MODE_OTP_USER);
566 break;
567 default:
568 ret = -EINVAL;
569 case MTD_OTP_OFF:
570 break;
571 }
572 file->f_pos = 0; 666 file->f_pos = 0;
573 break; 667 break;
574 } 668 }
@@ -580,8 +674,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
580 if (!buf) 674 if (!buf)
581 return -ENOMEM; 675 return -ENOMEM;
582 ret = -EOPNOTSUPP; 676 ret = -EOPNOTSUPP;
583 switch (MTD_MODE(file)) { 677 switch (mfi->mode) {
584 case MTD_MODE_OTP_FACT: 678 case MTD_MODE_OTP_FACTORY:
585 if (mtd->get_fact_prot_info) 679 if (mtd->get_fact_prot_info)
586 ret = mtd->get_fact_prot_info(mtd, buf, 4096); 680 ret = mtd->get_fact_prot_info(mtd, buf, 4096);
587 break; 681 break;
@@ -589,6 +683,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
589 if (mtd->get_user_prot_info) 683 if (mtd->get_user_prot_info)
590 ret = mtd->get_user_prot_info(mtd, buf, 4096); 684 ret = mtd->get_user_prot_info(mtd, buf, 4096);
591 break; 685 break;
686 default:
687 break;
592 } 688 }
593 if (ret >= 0) { 689 if (ret >= 0) {
594 if (cmd == OTPGETREGIONCOUNT) { 690 if (cmd == OTPGETREGIONCOUNT) {
@@ -607,7 +703,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
607 { 703 {
608 struct otp_info info; 704 struct otp_info info;
609 705
610 if (MTD_MODE(file) != MTD_MODE_OTP_USER) 706 if (mfi->mode != MTD_MODE_OTP_USER)
611 return -EINVAL; 707 return -EINVAL;
612 if (copy_from_user(&info, argp, sizeof(info))) 708 if (copy_from_user(&info, argp, sizeof(info)))
613 return -EFAULT; 709 return -EFAULT;
@@ -618,6 +714,49 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
618 } 714 }
619#endif 715#endif
620 716
717 case ECCGETLAYOUT:
718 {
719 if (!mtd->ecclayout)
720 return -EOPNOTSUPP;
721
722 if (copy_to_user(argp, &mtd->ecclayout,
723 sizeof(struct nand_ecclayout)))
724 return -EFAULT;
725 break;
726 }
727
728 case ECCGETSTATS:
729 {
730 if (copy_to_user(argp, &mtd->ecc_stats,
731 sizeof(struct mtd_ecc_stats)))
732 return -EFAULT;
733 break;
734 }
735
736 case MTDFILEMODE:
737 {
738 mfi->mode = 0;
739
740 switch(arg) {
741 case MTD_MODE_OTP_FACTORY:
742 case MTD_MODE_OTP_USER:
743 ret = otp_select_filemode(mfi, arg);
744 break;
745
746 case MTD_MODE_RAW:
747 if (!mtd->read_oob || !mtd->write_oob)
748 return -EOPNOTSUPP;
749 mfi->mode = arg;
750
751 case MTD_MODE_NORMAL:
752 break;
753 default:
754 ret = -EINVAL;
755 }
756 file->f_pos = 0;
757 break;
758 }
759
621 default: 760 default:
622 ret = -ENOTTY; 761 ret = -ENOTTY;
623 } 762 }
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 9af840364a74..1fea631b5852 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -19,6 +19,8 @@
19#include <linux/mtd/mtd.h> 19#include <linux/mtd/mtd.h>
20#include <linux/mtd/concat.h> 20#include <linux/mtd/concat.h>
21 21
22#include <asm/div64.h>
23
22/* 24/*
23 * Our storage structure: 25 * Our storage structure:
24 * Subdev points to an array of pointers to struct mtd_info objects 26 * Subdev points to an array of pointers to struct mtd_info objects
@@ -54,7 +56,7 @@ concat_read(struct mtd_info *mtd, loff_t from, size_t len,
54 size_t * retlen, u_char * buf) 56 size_t * retlen, u_char * buf)
55{ 57{
56 struct mtd_concat *concat = CONCAT(mtd); 58 struct mtd_concat *concat = CONCAT(mtd);
57 int err = -EINVAL; 59 int ret = 0, err;
58 int i; 60 int i;
59 61
60 *retlen = 0; 62 *retlen = 0;
@@ -78,19 +80,29 @@ concat_read(struct mtd_info *mtd, loff_t from, size_t len,
78 80
79 err = subdev->read(subdev, from, size, &retsize, buf); 81 err = subdev->read(subdev, from, size, &retsize, buf);
80 82
81 if (err) 83 /* Save information about bitflips! */
82 break; 84 if (unlikely(err)) {
85 if (err == -EBADMSG) {
86 mtd->ecc_stats.failed++;
87 ret = err;
88 } else if (err == -EUCLEAN) {
89 mtd->ecc_stats.corrected++;
90 /* Do not overwrite -EBADMSG !! */
91 if (!ret)
92 ret = err;
93 } else
94 return err;
95 }
83 96
84 *retlen += retsize; 97 *retlen += retsize;
85 len -= size; 98 len -= size;
86 if (len == 0) 99 if (len == 0)
87 break; 100 return ret;
88 101
89 err = -EINVAL;
90 buf += size; 102 buf += size;
91 from = 0; 103 from = 0;
92 } 104 }
93 return err; 105 return -EINVAL;
94} 106}
95 107
96static int 108static int
@@ -141,211 +153,185 @@ concat_write(struct mtd_info *mtd, loff_t to, size_t len,
141} 153}
142 154
143static int 155static int
144concat_read_ecc(struct mtd_info *mtd, loff_t from, size_t len, 156concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
145 size_t * retlen, u_char * buf, u_char * eccbuf, 157 unsigned long count, loff_t to, size_t * retlen)
146 struct nand_oobinfo *oobsel)
147{ 158{
148 struct mtd_concat *concat = CONCAT(mtd); 159 struct mtd_concat *concat = CONCAT(mtd);
149 int err = -EINVAL; 160 struct kvec *vecs_copy;
161 unsigned long entry_low, entry_high;
162 size_t total_len = 0;
150 int i; 163 int i;
164 int err = -EINVAL;
151 165
152 *retlen = 0; 166 if (!(mtd->flags & MTD_WRITEABLE))
153 167 return -EROFS;
154 for (i = 0; i < concat->num_subdev; i++) {
155 struct mtd_info *subdev = concat->subdev[i];
156 size_t size, retsize;
157
158 if (from >= subdev->size) {
159 /* Not destined for this subdev */
160 size = 0;
161 from -= subdev->size;
162 continue;
163 }
164
165 if (from + len > subdev->size)
166 /* First part goes into this subdev */
167 size = subdev->size - from;
168 else
169 /* Entire transaction goes into this subdev */
170 size = len;
171 168
172 if (subdev->read_ecc) 169 *retlen = 0;
173 err = subdev->read_ecc(subdev, from, size,
174 &retsize, buf, eccbuf, oobsel);
175 else
176 err = -EINVAL;
177 170
178 if (err) 171 /* Calculate total length of data */
179 break; 172 for (i = 0; i < count; i++)
173 total_len += vecs[i].iov_len;
180 174
181 *retlen += retsize; 175 /* Do not allow write past end of device */
182 len -= size; 176 if ((to + total_len) > mtd->size)
183 if (len == 0) 177 return -EINVAL;
184 break;
185 178
186 err = -EINVAL; 179 /* Check alignment */
187 buf += size; 180 if (mtd->writesize > 1) {
188 if (eccbuf) { 181 loff_t __to = to;
189 eccbuf += subdev->oobsize; 182 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
190 /* in nand.c at least, eccbufs are 183 return -EINVAL;
191 tagged with 2 (int)eccstatus'; we
192 must account for these */
193 eccbuf += 2 * (sizeof (int));
194 }
195 from = 0;
196 } 184 }
197 return err;
198}
199 185
200static int 186 /* make a copy of vecs */
201concat_write_ecc(struct mtd_info *mtd, loff_t to, size_t len, 187 vecs_copy = kmalloc(sizeof(struct kvec) * count, GFP_KERNEL);
202 size_t * retlen, const u_char * buf, u_char * eccbuf, 188 if (!vecs_copy)
203 struct nand_oobinfo *oobsel) 189 return -ENOMEM;
204{ 190 memcpy(vecs_copy, vecs, sizeof(struct kvec) * count);
205 struct mtd_concat *concat = CONCAT(mtd);
206 int err = -EINVAL;
207 int i;
208
209 if (!(mtd->flags & MTD_WRITEABLE))
210 return -EROFS;
211
212 *retlen = 0;
213 191
192 entry_low = 0;
214 for (i = 0; i < concat->num_subdev; i++) { 193 for (i = 0; i < concat->num_subdev; i++) {
215 struct mtd_info *subdev = concat->subdev[i]; 194 struct mtd_info *subdev = concat->subdev[i];
216 size_t size, retsize; 195 size_t size, wsize, retsize, old_iov_len;
217 196
218 if (to >= subdev->size) { 197 if (to >= subdev->size) {
219 size = 0;
220 to -= subdev->size; 198 to -= subdev->size;
221 continue; 199 continue;
222 } 200 }
223 if (to + len > subdev->size) 201
224 size = subdev->size - to; 202 size = min(total_len, (size_t)(subdev->size - to));
225 else 203 wsize = size; /* store for future use */
226 size = len; 204
205 entry_high = entry_low;
206 while (entry_high < count) {
207 if (size <= vecs_copy[entry_high].iov_len)
208 break;
209 size -= vecs_copy[entry_high++].iov_len;
210 }
211
212 old_iov_len = vecs_copy[entry_high].iov_len;
213 vecs_copy[entry_high].iov_len = size;
227 214
228 if (!(subdev->flags & MTD_WRITEABLE)) 215 if (!(subdev->flags & MTD_WRITEABLE))
229 err = -EROFS; 216 err = -EROFS;
230 else if (subdev->write_ecc)
231 err = subdev->write_ecc(subdev, to, size,
232 &retsize, buf, eccbuf, oobsel);
233 else 217 else
234 err = -EINVAL; 218 err = subdev->writev(subdev, &vecs_copy[entry_low],
219 entry_high - entry_low + 1, to, &retsize);
220
221 vecs_copy[entry_high].iov_len = old_iov_len - size;
222 vecs_copy[entry_high].iov_base += size;
223
224 entry_low = entry_high;
235 225
236 if (err) 226 if (err)
237 break; 227 break;
238 228
239 *retlen += retsize; 229 *retlen += retsize;
240 len -= size; 230 total_len -= wsize;
241 if (len == 0) 231
232 if (total_len == 0)
242 break; 233 break;
243 234
244 err = -EINVAL; 235 err = -EINVAL;
245 buf += size;
246 if (eccbuf)
247 eccbuf += subdev->oobsize;
248 to = 0; 236 to = 0;
249 } 237 }
238
239 kfree(vecs_copy);
250 return err; 240 return err;
251} 241}
252 242
253static int 243static int
254concat_read_oob(struct mtd_info *mtd, loff_t from, size_t len, 244concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
255 size_t * retlen, u_char * buf)
256{ 245{
257 struct mtd_concat *concat = CONCAT(mtd); 246 struct mtd_concat *concat = CONCAT(mtd);
258 int err = -EINVAL; 247 struct mtd_oob_ops devops = *ops;
259 int i; 248 int i, err, ret = 0;
260 249
261 *retlen = 0; 250 ops->retlen = 0;
262 251
263 for (i = 0; i < concat->num_subdev; i++) { 252 for (i = 0; i < concat->num_subdev; i++) {
264 struct mtd_info *subdev = concat->subdev[i]; 253 struct mtd_info *subdev = concat->subdev[i];
265 size_t size, retsize;
266 254
267 if (from >= subdev->size) { 255 if (from >= subdev->size) {
268 /* Not destined for this subdev */
269 size = 0;
270 from -= subdev->size; 256 from -= subdev->size;
271 continue; 257 continue;
272 } 258 }
273 if (from + len > subdev->size)
274 /* First part goes into this subdev */
275 size = subdev->size - from;
276 else
277 /* Entire transaction goes into this subdev */
278 size = len;
279 259
280 if (subdev->read_oob) 260 /* partial read ? */
281 err = subdev->read_oob(subdev, from, size, 261 if (from + devops.len > subdev->size)
282 &retsize, buf); 262 devops.len = subdev->size - from;
283 else 263
284 err = -EINVAL; 264 err = subdev->read_oob(subdev, from, &devops);
265 ops->retlen += devops.retlen;
266
267 /* Save information about bitflips! */
268 if (unlikely(err)) {
269 if (err == -EBADMSG) {
270 mtd->ecc_stats.failed++;
271 ret = err;
272 } else if (err == -EUCLEAN) {
273 mtd->ecc_stats.corrected++;
274 /* Do not overwrite -EBADMSG !! */
275 if (!ret)
276 ret = err;
277 } else
278 return err;
279 }
285 280
286 if (err) 281 devops.len = ops->len - ops->retlen;
287 break; 282 if (!devops.len)
283 return ret;
288 284
289 *retlen += retsize; 285 if (devops.datbuf)
290 len -= size; 286 devops.datbuf += devops.retlen;
291 if (len == 0) 287 if (devops.oobbuf)
292 break; 288 devops.oobbuf += devops.ooblen;
293 289
294 err = -EINVAL;
295 buf += size;
296 from = 0; 290 from = 0;
297 } 291 }
298 return err; 292 return -EINVAL;
299} 293}
300 294
301static int 295static int
302concat_write_oob(struct mtd_info *mtd, loff_t to, size_t len, 296concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
303 size_t * retlen, const u_char * buf)
304{ 297{
305 struct mtd_concat *concat = CONCAT(mtd); 298 struct mtd_concat *concat = CONCAT(mtd);
306 int err = -EINVAL; 299 struct mtd_oob_ops devops = *ops;
307 int i; 300 int i, err;
308 301
309 if (!(mtd->flags & MTD_WRITEABLE)) 302 if (!(mtd->flags & MTD_WRITEABLE))
310 return -EROFS; 303 return -EROFS;
311 304
312 *retlen = 0; 305 ops->retlen = 0;
313 306
314 for (i = 0; i < concat->num_subdev; i++) { 307 for (i = 0; i < concat->num_subdev; i++) {
315 struct mtd_info *subdev = concat->subdev[i]; 308 struct mtd_info *subdev = concat->subdev[i];
316 size_t size, retsize;
317 309
318 if (to >= subdev->size) { 310 if (to >= subdev->size) {
319 size = 0;
320 to -= subdev->size; 311 to -= subdev->size;
321 continue; 312 continue;
322 } 313 }
323 if (to + len > subdev->size)
324 size = subdev->size - to;
325 else
326 size = len;
327 314
328 if (!(subdev->flags & MTD_WRITEABLE)) 315 /* partial write ? */
329 err = -EROFS; 316 if (to + devops.len > subdev->size)
330 else if (subdev->write_oob) 317 devops.len = subdev->size - to;
331 err = subdev->write_oob(subdev, to, size, &retsize,
332 buf);
333 else
334 err = -EINVAL;
335 318
319 err = subdev->write_oob(subdev, to, &devops);
320 ops->retlen += devops.retlen;
336 if (err) 321 if (err)
337 break; 322 return err;
338 323
339 *retlen += retsize; 324 devops.len = ops->len - ops->retlen;
340 len -= size; 325 if (!devops.len)
341 if (len == 0) 326 return 0;
342 break;
343 327
344 err = -EINVAL; 328 if (devops.datbuf)
345 buf += size; 329 devops.datbuf += devops.retlen;
330 if (devops.oobbuf)
331 devops.oobbuf += devops.ooblen;
346 to = 0; 332 to = 0;
347 } 333 }
348 return err; 334 return -EINVAL;
349} 335}
350 336
351static void concat_erase_callback(struct erase_info *instr) 337static void concat_erase_callback(struct erase_info *instr)
@@ -636,6 +622,60 @@ static void concat_resume(struct mtd_info *mtd)
636 } 622 }
637} 623}
638 624
625static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
626{
627 struct mtd_concat *concat = CONCAT(mtd);
628 int i, res = 0;
629
630 if (!concat->subdev[0]->block_isbad)
631 return res;
632
633 if (ofs > mtd->size)
634 return -EINVAL;
635
636 for (i = 0; i < concat->num_subdev; i++) {
637 struct mtd_info *subdev = concat->subdev[i];
638
639 if (ofs >= subdev->size) {
640 ofs -= subdev->size;
641 continue;
642 }
643
644 res = subdev->block_isbad(subdev, ofs);
645 break;
646 }
647
648 return res;
649}
650
651static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
652{
653 struct mtd_concat *concat = CONCAT(mtd);
654 int i, err = -EINVAL;
655
656 if (!concat->subdev[0]->block_markbad)
657 return 0;
658
659 if (ofs > mtd->size)
660 return -EINVAL;
661
662 for (i = 0; i < concat->num_subdev; i++) {
663 struct mtd_info *subdev = concat->subdev[i];
664
665 if (ofs >= subdev->size) {
666 ofs -= subdev->size;
667 continue;
668 }
669
670 err = subdev->block_markbad(subdev, ofs);
671 if (!err)
672 mtd->ecc_stats.badblocks++;
673 break;
674 }
675
676 return err;
677}
678
639/* 679/*
640 * This function constructs a virtual MTD device by concatenating 680 * This function constructs a virtual MTD device by concatenating
641 * num_devs MTD devices. A pointer to the new device object is 681 * num_devs MTD devices. A pointer to the new device object is
@@ -677,18 +717,22 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
677 concat->mtd.flags = subdev[0]->flags; 717 concat->mtd.flags = subdev[0]->flags;
678 concat->mtd.size = subdev[0]->size; 718 concat->mtd.size = subdev[0]->size;
679 concat->mtd.erasesize = subdev[0]->erasesize; 719 concat->mtd.erasesize = subdev[0]->erasesize;
680 concat->mtd.oobblock = subdev[0]->oobblock; 720 concat->mtd.writesize = subdev[0]->writesize;
681 concat->mtd.oobsize = subdev[0]->oobsize; 721 concat->mtd.oobsize = subdev[0]->oobsize;
682 concat->mtd.ecctype = subdev[0]->ecctype; 722 concat->mtd.ecctype = subdev[0]->ecctype;
683 concat->mtd.eccsize = subdev[0]->eccsize; 723 concat->mtd.eccsize = subdev[0]->eccsize;
684 if (subdev[0]->read_ecc) 724 if (subdev[0]->writev)
685 concat->mtd.read_ecc = concat_read_ecc; 725 concat->mtd.writev = concat_writev;
686 if (subdev[0]->write_ecc)
687 concat->mtd.write_ecc = concat_write_ecc;
688 if (subdev[0]->read_oob) 726 if (subdev[0]->read_oob)
689 concat->mtd.read_oob = concat_read_oob; 727 concat->mtd.read_oob = concat_read_oob;
690 if (subdev[0]->write_oob) 728 if (subdev[0]->write_oob)
691 concat->mtd.write_oob = concat_write_oob; 729 concat->mtd.write_oob = concat_write_oob;
730 if (subdev[0]->block_isbad)
731 concat->mtd.block_isbad = concat_block_isbad;
732 if (subdev[0]->block_markbad)
733 concat->mtd.block_markbad = concat_block_markbad;
734
735 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
692 736
693 concat->subdev[0] = subdev[0]; 737 concat->subdev[0] = subdev[0];
694 738
@@ -717,12 +761,12 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
717 subdev[i]->flags & MTD_WRITEABLE; 761 subdev[i]->flags & MTD_WRITEABLE;
718 } 762 }
719 concat->mtd.size += subdev[i]->size; 763 concat->mtd.size += subdev[i]->size;
720 if (concat->mtd.oobblock != subdev[i]->oobblock || 764 concat->mtd.ecc_stats.badblocks +=
765 subdev[i]->ecc_stats.badblocks;
766 if (concat->mtd.writesize != subdev[i]->writesize ||
721 concat->mtd.oobsize != subdev[i]->oobsize || 767 concat->mtd.oobsize != subdev[i]->oobsize ||
722 concat->mtd.ecctype != subdev[i]->ecctype || 768 concat->mtd.ecctype != subdev[i]->ecctype ||
723 concat->mtd.eccsize != subdev[i]->eccsize || 769 concat->mtd.eccsize != subdev[i]->eccsize ||
724 !concat->mtd.read_ecc != !subdev[i]->read_ecc ||
725 !concat->mtd.write_ecc != !subdev[i]->write_ecc ||
726 !concat->mtd.read_oob != !subdev[i]->read_oob || 770 !concat->mtd.read_oob != !subdev[i]->read_oob ||
727 !concat->mtd.write_oob != !subdev[i]->write_oob) { 771 !concat->mtd.write_oob != !subdev[i]->write_oob) {
728 kfree(concat); 772 kfree(concat);
@@ -734,14 +778,11 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
734 778
735 } 779 }
736 780
781 concat->mtd.ecclayout = subdev[0]->ecclayout;
782
737 concat->num_subdev = num_devs; 783 concat->num_subdev = num_devs;
738 concat->mtd.name = name; 784 concat->mtd.name = name;
739 785
740 /*
741 * NOTE: for now, we do not provide any readv()/writev() methods
742 * because they are messy to implement and they are not
743 * used to a great extent anyway.
744 */
745 concat->mtd.erase = concat_erase; 786 concat->mtd.erase = concat_erase;
746 concat->mtd.read = concat_read; 787 concat->mtd.read = concat_read;
747 concat->mtd.write = concat_write; 788 concat->mtd.write = concat_write;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 9905870f56e5..16a952dd486a 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -47,6 +47,7 @@ int add_mtd_device(struct mtd_info *mtd)
47{ 47{
48 int i; 48 int i;
49 49
50 BUG_ON(mtd->writesize == 0);
50 mutex_lock(&mtd_table_mutex); 51 mutex_lock(&mtd_table_mutex);
51 52
52 for (i=0; i < MAX_MTD_DEVICES; i++) 53 for (i=0; i < MAX_MTD_DEVICES; i++)
@@ -254,37 +255,6 @@ int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
254 return ret; 255 return ret;
255} 256}
256 257
257
258/* default_mtd_readv - default mtd readv method for MTD devices that dont
259 * implement their own
260 */
261
262int default_mtd_readv(struct mtd_info *mtd, struct kvec *vecs,
263 unsigned long count, loff_t from, size_t *retlen)
264{
265 unsigned long i;
266 size_t totlen = 0, thislen;
267 int ret = 0;
268
269 if(!mtd->read) {
270 ret = -EIO;
271 } else {
272 for (i=0; i<count; i++) {
273 if (!vecs[i].iov_len)
274 continue;
275 ret = mtd->read(mtd, from, vecs[i].iov_len, &thislen, vecs[i].iov_base);
276 totlen += thislen;
277 if (ret || thislen != vecs[i].iov_len)
278 break;
279 from += vecs[i].iov_len;
280 }
281 }
282 if (retlen)
283 *retlen = totlen;
284 return ret;
285}
286
287
288EXPORT_SYMBOL(add_mtd_device); 258EXPORT_SYMBOL(add_mtd_device);
289EXPORT_SYMBOL(del_mtd_device); 259EXPORT_SYMBOL(del_mtd_device);
290EXPORT_SYMBOL(get_mtd_device); 260EXPORT_SYMBOL(get_mtd_device);
@@ -292,7 +262,6 @@ EXPORT_SYMBOL(put_mtd_device);
292EXPORT_SYMBOL(register_mtd_user); 262EXPORT_SYMBOL(register_mtd_user);
293EXPORT_SYMBOL(unregister_mtd_user); 263EXPORT_SYMBOL(unregister_mtd_user);
294EXPORT_SYMBOL(default_mtd_writev); 264EXPORT_SYMBOL(default_mtd_writev);
295EXPORT_SYMBOL(default_mtd_readv);
296 265
297#ifdef CONFIG_PROC_FS 266#ifdef CONFIG_PROC_FS
298 267
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 99395911d26f..77a7123a5c56 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -51,16 +51,21 @@ static int part_read (struct mtd_info *mtd, loff_t from, size_t len,
51 size_t *retlen, u_char *buf) 51 size_t *retlen, u_char *buf)
52{ 52{
53 struct mtd_part *part = PART(mtd); 53 struct mtd_part *part = PART(mtd);
54 int res;
55
54 if (from >= mtd->size) 56 if (from >= mtd->size)
55 len = 0; 57 len = 0;
56 else if (from + len > mtd->size) 58 else if (from + len > mtd->size)
57 len = mtd->size - from; 59 len = mtd->size - from;
58 if (part->master->read_ecc == NULL) 60 res = part->master->read (part->master, from + part->offset,
59 return part->master->read (part->master, from + part->offset, 61 len, retlen, buf);
60 len, retlen, buf); 62 if (unlikely(res)) {
61 else 63 if (res == -EUCLEAN)
62 return part->master->read_ecc (part->master, from + part->offset, 64 mtd->ecc_stats.corrected++;
63 len, retlen, buf, NULL, &mtd->oobinfo); 65 if (res == -EBADMSG)
66 mtd->ecc_stats.failed++;
67 }
68 return res;
64} 69}
65 70
66static int part_point (struct mtd_info *mtd, loff_t from, size_t len, 71static int part_point (struct mtd_info *mtd, loff_t from, size_t len,
@@ -74,6 +79,7 @@ static int part_point (struct mtd_info *mtd, loff_t from, size_t len,
74 return part->master->point (part->master, from + part->offset, 79 return part->master->point (part->master, from + part->offset,
75 len, retlen, buf); 80 len, retlen, buf);
76} 81}
82
77static void part_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len) 83static void part_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
78{ 84{
79 struct mtd_part *part = PART(mtd); 85 struct mtd_part *part = PART(mtd);
@@ -81,31 +87,25 @@ static void part_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_
81 part->master->unpoint (part->master, addr, from + part->offset, len); 87 part->master->unpoint (part->master, addr, from + part->offset, len);
82} 88}
83 89
84 90static int part_read_oob(struct mtd_info *mtd, loff_t from,
85static int part_read_ecc (struct mtd_info *mtd, loff_t from, size_t len, 91 struct mtd_oob_ops *ops)
86 size_t *retlen, u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel)
87{ 92{
88 struct mtd_part *part = PART(mtd); 93 struct mtd_part *part = PART(mtd);
89 if (oobsel == NULL) 94 int res;
90 oobsel = &mtd->oobinfo;
91 if (from >= mtd->size)
92 len = 0;
93 else if (from + len > mtd->size)
94 len = mtd->size - from;
95 return part->master->read_ecc (part->master, from + part->offset,
96 len, retlen, buf, eccbuf, oobsel);
97}
98 95
99static int part_read_oob (struct mtd_info *mtd, loff_t from, size_t len,
100 size_t *retlen, u_char *buf)
101{
102 struct mtd_part *part = PART(mtd);
103 if (from >= mtd->size) 96 if (from >= mtd->size)
104 len = 0; 97 return -EINVAL;
105 else if (from + len > mtd->size) 98 if (from + ops->len > mtd->size)
106 len = mtd->size - from; 99 return -EINVAL;
107 return part->master->read_oob (part->master, from + part->offset, 100 res = part->master->read_oob(part->master, from + part->offset, ops);
108 len, retlen, buf); 101
102 if (unlikely(res)) {
103 if (res == -EUCLEAN)
104 mtd->ecc_stats.corrected++;
105 if (res == -EBADMSG)
106 mtd->ecc_stats.failed++;
107 }
108 return res;
109} 109}
110 110
111static int part_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, 111static int part_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
@@ -148,44 +148,23 @@ static int part_write (struct mtd_info *mtd, loff_t to, size_t len,
148 len = 0; 148 len = 0;
149 else if (to + len > mtd->size) 149 else if (to + len > mtd->size)
150 len = mtd->size - to; 150 len = mtd->size - to;
151 if (part->master->write_ecc == NULL) 151 return part->master->write (part->master, to + part->offset,
152 return part->master->write (part->master, to + part->offset, 152 len, retlen, buf);
153 len, retlen, buf);
154 else
155 return part->master->write_ecc (part->master, to + part->offset,
156 len, retlen, buf, NULL, &mtd->oobinfo);
157
158} 153}
159 154
160static int part_write_ecc (struct mtd_info *mtd, loff_t to, size_t len, 155static int part_write_oob(struct mtd_info *mtd, loff_t to,
161 size_t *retlen, const u_char *buf, 156 struct mtd_oob_ops *ops)
162 u_char *eccbuf, struct nand_oobinfo *oobsel)
163{ 157{
164 struct mtd_part *part = PART(mtd); 158 struct mtd_part *part = PART(mtd);
165 if (!(mtd->flags & MTD_WRITEABLE))
166 return -EROFS;
167 if (oobsel == NULL)
168 oobsel = &mtd->oobinfo;
169 if (to >= mtd->size)
170 len = 0;
171 else if (to + len > mtd->size)
172 len = mtd->size - to;
173 return part->master->write_ecc (part->master, to + part->offset,
174 len, retlen, buf, eccbuf, oobsel);
175}
176 159
177static int part_write_oob (struct mtd_info *mtd, loff_t to, size_t len,
178 size_t *retlen, const u_char *buf)
179{
180 struct mtd_part *part = PART(mtd);
181 if (!(mtd->flags & MTD_WRITEABLE)) 160 if (!(mtd->flags & MTD_WRITEABLE))
182 return -EROFS; 161 return -EROFS;
162
183 if (to >= mtd->size) 163 if (to >= mtd->size)
184 len = 0; 164 return -EINVAL;
185 else if (to + len > mtd->size) 165 if (to + ops->len > mtd->size)
186 len = mtd->size - to; 166 return -EINVAL;
187 return part->master->write_oob (part->master, to + part->offset, 167 return part->master->write_oob(part->master, to + part->offset, ops);
188 len, retlen, buf);
189} 168}
190 169
191static int part_write_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, 170static int part_write_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
@@ -208,52 +187,8 @@ static int part_writev (struct mtd_info *mtd, const struct kvec *vecs,
208 struct mtd_part *part = PART(mtd); 187 struct mtd_part *part = PART(mtd);
209 if (!(mtd->flags & MTD_WRITEABLE)) 188 if (!(mtd->flags & MTD_WRITEABLE))
210 return -EROFS; 189 return -EROFS;
211 if (part->master->writev_ecc == NULL) 190 return part->master->writev (part->master, vecs, count,
212 return part->master->writev (part->master, vecs, count,
213 to + part->offset, retlen); 191 to + part->offset, retlen);
214 else
215 return part->master->writev_ecc (part->master, vecs, count,
216 to + part->offset, retlen,
217 NULL, &mtd->oobinfo);
218}
219
220static int part_readv (struct mtd_info *mtd, struct kvec *vecs,
221 unsigned long count, loff_t from, size_t *retlen)
222{
223 struct mtd_part *part = PART(mtd);
224 if (part->master->readv_ecc == NULL)
225 return part->master->readv (part->master, vecs, count,
226 from + part->offset, retlen);
227 else
228 return part->master->readv_ecc (part->master, vecs, count,
229 from + part->offset, retlen,
230 NULL, &mtd->oobinfo);
231}
232
233static int part_writev_ecc (struct mtd_info *mtd, const struct kvec *vecs,
234 unsigned long count, loff_t to, size_t *retlen,
235 u_char *eccbuf, struct nand_oobinfo *oobsel)
236{
237 struct mtd_part *part = PART(mtd);
238 if (!(mtd->flags & MTD_WRITEABLE))
239 return -EROFS;
240 if (oobsel == NULL)
241 oobsel = &mtd->oobinfo;
242 return part->master->writev_ecc (part->master, vecs, count,
243 to + part->offset, retlen,
244 eccbuf, oobsel);
245}
246
247static int part_readv_ecc (struct mtd_info *mtd, struct kvec *vecs,
248 unsigned long count, loff_t from, size_t *retlen,
249 u_char *eccbuf, struct nand_oobinfo *oobsel)
250{
251 struct mtd_part *part = PART(mtd);
252 if (oobsel == NULL)
253 oobsel = &mtd->oobinfo;
254 return part->master->readv_ecc (part->master, vecs, count,
255 from + part->offset, retlen,
256 eccbuf, oobsel);
257} 192}
258 193
259static int part_erase (struct mtd_info *mtd, struct erase_info *instr) 194static int part_erase (struct mtd_info *mtd, struct erase_info *instr)
@@ -329,12 +264,17 @@ static int part_block_isbad (struct mtd_info *mtd, loff_t ofs)
329static int part_block_markbad (struct mtd_info *mtd, loff_t ofs) 264static int part_block_markbad (struct mtd_info *mtd, loff_t ofs)
330{ 265{
331 struct mtd_part *part = PART(mtd); 266 struct mtd_part *part = PART(mtd);
267 int res;
268
332 if (!(mtd->flags & MTD_WRITEABLE)) 269 if (!(mtd->flags & MTD_WRITEABLE))
333 return -EROFS; 270 return -EROFS;
334 if (ofs >= mtd->size) 271 if (ofs >= mtd->size)
335 return -EINVAL; 272 return -EINVAL;
336 ofs += part->offset; 273 ofs += part->offset;
337 return part->master->block_markbad(part->master, ofs); 274 res = part->master->block_markbad(part->master, ofs);
275 if (!res)
276 mtd->ecc_stats.badblocks++;
277 return res;
338} 278}
339 279
340/* 280/*
@@ -398,7 +338,7 @@ int add_mtd_partitions(struct mtd_info *master,
398 slave->mtd.type = master->type; 338 slave->mtd.type = master->type;
399 slave->mtd.flags = master->flags & ~parts[i].mask_flags; 339 slave->mtd.flags = master->flags & ~parts[i].mask_flags;
400 slave->mtd.size = parts[i].size; 340 slave->mtd.size = parts[i].size;
401 slave->mtd.oobblock = master->oobblock; 341 slave->mtd.writesize = master->writesize;
402 slave->mtd.oobsize = master->oobsize; 342 slave->mtd.oobsize = master->oobsize;
403 slave->mtd.ecctype = master->ecctype; 343 slave->mtd.ecctype = master->ecctype;
404 slave->mtd.eccsize = master->eccsize; 344 slave->mtd.eccsize = master->eccsize;
@@ -415,10 +355,6 @@ int add_mtd_partitions(struct mtd_info *master,
415 slave->mtd.unpoint = part_unpoint; 355 slave->mtd.unpoint = part_unpoint;
416 } 356 }
417 357
418 if (master->read_ecc)
419 slave->mtd.read_ecc = part_read_ecc;
420 if (master->write_ecc)
421 slave->mtd.write_ecc = part_write_ecc;
422 if (master->read_oob) 358 if (master->read_oob)
423 slave->mtd.read_oob = part_read_oob; 359 slave->mtd.read_oob = part_read_oob;
424 if (master->write_oob) 360 if (master->write_oob)
@@ -443,12 +379,6 @@ int add_mtd_partitions(struct mtd_info *master,
443 } 379 }
444 if (master->writev) 380 if (master->writev)
445 slave->mtd.writev = part_writev; 381 slave->mtd.writev = part_writev;
446 if (master->readv)
447 slave->mtd.readv = part_readv;
448 if (master->writev_ecc)
449 slave->mtd.writev_ecc = part_writev_ecc;
450 if (master->readv_ecc)
451 slave->mtd.readv_ecc = part_readv_ecc;
452 if (master->lock) 382 if (master->lock)
453 slave->mtd.lock = part_lock; 383 slave->mtd.lock = part_lock;
454 if (master->unlock) 384 if (master->unlock)
@@ -528,8 +458,17 @@ int add_mtd_partitions(struct mtd_info *master,
528 parts[i].name); 458 parts[i].name);
529 } 459 }
530 460
531 /* copy oobinfo from master */ 461 slave->mtd.ecclayout = master->ecclayout;
532 memcpy(&slave->mtd.oobinfo, &master->oobinfo, sizeof(slave->mtd.oobinfo)); 462 if (master->block_isbad) {
463 uint32_t offs = 0;
464
465 while(offs < slave->mtd.size) {
466 if (master->block_isbad(master,
467 offs + slave->offset))
468 slave->mtd.ecc_stats.badblocks++;
469 offs += slave->mtd.erasesize;
470 }
471 }
533 472
534 if(parts[i].mtdp) 473 if(parts[i].mtdp)
535 { /* store the object pointer (caller may or may not register it */ 474 { /* store the object pointer (caller may or may not register it */
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index cfe288a6e853..3db77eec0ed2 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -23,6 +23,14 @@ config MTD_NAND_VERIFY_WRITE
23 device thinks the write was successful, a bit could have been 23 device thinks the write was successful, a bit could have been
24 flipped accidentaly due to device wear or something else. 24 flipped accidentaly due to device wear or something else.
25 25
26config MTD_NAND_ECC_SMC
27 bool "NAND ECC Smart Media byte order"
28 depends on MTD_NAND
29 default n
30 help
31 Software ECC according to the Smart Media Specification.
32 The original Linux implementation had byte 0 and 1 swapped.
33
26config MTD_NAND_AUTCPU12 34config MTD_NAND_AUTCPU12
27 tristate "SmartMediaCard on autronix autcpu12 board" 35 tristate "SmartMediaCard on autronix autcpu12 board"
28 depends on MTD_NAND && ARCH_AUTCPU12 36 depends on MTD_NAND && ARCH_AUTCPU12
@@ -49,12 +57,24 @@ config MTD_NAND_SPIA
49 help 57 help
50 If you had to ask, you don't have one. Say 'N'. 58 If you had to ask, you don't have one. Say 'N'.
51 59
60config MTD_NAND_AMS_DELTA
61 tristate "NAND Flash device on Amstrad E3"
62 depends on MACH_AMS_DELTA && MTD_NAND
63 help
64 Support for NAND flash on Amstrad E3 (Delta).
65
52config MTD_NAND_TOTO 66config MTD_NAND_TOTO
53 tristate "NAND Flash device on TOTO board" 67 tristate "NAND Flash device on TOTO board"
54 depends on ARCH_OMAP && MTD_NAND 68 depends on ARCH_OMAP && MTD_NAND && BROKEN
55 help 69 help
56 Support for NAND flash on Texas Instruments Toto platform. 70 Support for NAND flash on Texas Instruments Toto platform.
57 71
72config MTD_NAND_TS7250
73 tristate "NAND Flash device on TS-7250 board"
74 depends on MACH_TS72XX && MTD_NAND
75 help
76 Support for NAND flash on Technologic Systems TS-7250 platform.
77
58config MTD_NAND_IDS 78config MTD_NAND_IDS
59 tristate 79 tristate
60 80
@@ -76,7 +96,7 @@ config MTD_NAND_RTC_FROM4
76 96
77config MTD_NAND_PPCHAMELEONEVB 97config MTD_NAND_PPCHAMELEONEVB
78 tristate "NAND Flash device on PPChameleonEVB board" 98 tristate "NAND Flash device on PPChameleonEVB board"
79 depends on PPCHAMELEONEVB && MTD_NAND 99 depends on PPCHAMELEONEVB && MTD_NAND && BROKEN
80 help 100 help
81 This enables the NAND flash driver on the PPChameleon EVB Board. 101 This enables the NAND flash driver on the PPChameleon EVB Board.
82 102
@@ -87,7 +107,7 @@ config MTD_NAND_S3C2410
87 This enables the NAND flash controller on the S3C2410 and S3C2440 107 This enables the NAND flash controller on the S3C2410 and S3C2440
88 SoCs 108 SoCs
89 109
90 No board specfic support is done by this driver, each board 110 No board specific support is done by this driver, each board
91 must advertise a platform_device for the driver to attach. 111 must advertise a platform_device for the driver to attach.
92 112
93config MTD_NAND_S3C2410_DEBUG 113config MTD_NAND_S3C2410_DEBUG
@@ -109,6 +129,22 @@ config MTD_NAND_S3C2410_HWECC
109 currently not be able to switch to software, as there is no 129 currently not be able to switch to software, as there is no
110 implementation for ECC method used by the S3C2410 130 implementation for ECC method used by the S3C2410
111 131
132config MTD_NAND_NDFC
133 tristate "NDFC NanD Flash Controller"
134 depends on MTD_NAND && 44x
135 help
136 NDFC Nand Flash Controllers are integrated in EP44x SoCs
137
138config MTD_NAND_S3C2410_CLKSTOP
139 bool "S3C2410 NAND IDLE clock stop"
140 depends on MTD_NAND_S3C2410
141 default n
142 help
143 Stop the clock to the NAND controller when there is no chip
144 selected to save power. This will mean there is a small delay
145 when the is NAND chip selected or released, but will save
146 approximately 5mA of power when there is nothing happening.
147
112config MTD_NAND_DISKONCHIP 148config MTD_NAND_DISKONCHIP
113 tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)" 149 tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation) (EXPERIMENTAL)"
114 depends on MTD_NAND && EXPERIMENTAL 150 depends on MTD_NAND && EXPERIMENTAL
@@ -183,11 +219,24 @@ config MTD_NAND_SHARPSL
183 tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)" 219 tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
184 depends on MTD_NAND && ARCH_PXA 220 depends on MTD_NAND && ARCH_PXA
185 221
222config MTD_NAND_CS553X
223 tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
224 depends on MTD_NAND && X86_32 && (X86_PC || X86_GENERICARCH)
225 help
226 The CS553x companion chips for the AMD Geode processor
227 include NAND flash controllers with built-in hardware ECC
228 capabilities; enabling this option will allow you to use
229 these. The driver will check the MSRs to verify that the
230 controller is enabled for NAND, and currently requires that
231 the controller be in MMIO mode.
232
233 If you say "m", the module will be called "cs553x_nand.ko".
234
186config MTD_NAND_NANDSIM 235config MTD_NAND_NANDSIM
187 tristate "Support for NAND Flash Simulator" 236 tristate "Support for NAND Flash Simulator"
188 depends on MTD_NAND && MTD_PARTITIONS 237 depends on MTD_NAND && MTD_PARTITIONS
189 help 238 help
190 The simulator may simulate verious NAND flash chips for the 239 The simulator may simulate various NAND flash chips for the
191 MTD nand layer. 240 MTD nand layer.
192 241
193endmenu 242endmenu
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 41742026a52e..f74759351c91 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_MTD_NAND) += nand.o nand_ecc.o
7obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o 7obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
8 8
9obj-$(CONFIG_MTD_NAND_SPIA) += spia.o 9obj-$(CONFIG_MTD_NAND_SPIA) += spia.o
10obj-$(CONFIG_MTD_NAND_AMS_DELTA) += ams-delta.o
10obj-$(CONFIG_MTD_NAND_TOTO) += toto.o 11obj-$(CONFIG_MTD_NAND_TOTO) += toto.o
11obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o 12obj-$(CONFIG_MTD_NAND_AUTCPU12) += autcpu12.o
12obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o 13obj-$(CONFIG_MTD_NAND_EDB7312) += edb7312.o
@@ -17,6 +18,9 @@ obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
17obj-$(CONFIG_MTD_NAND_H1900) += h1910.o 18obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
18obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o 19obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
19obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o 20obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
21obj-$(CONFIG_MTD_NAND_TS7250) += ts7250.o
20obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o 22obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
23obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
24obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
21 25
22nand-objs = nand_base.o nand_bbt.o 26nand-objs = nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
new file mode 100644
index 000000000000..d7897dc6b3c8
--- /dev/null
+++ b/drivers/mtd/nand/ams-delta.c
@@ -0,0 +1,237 @@
1/*
2 * drivers/mtd/nand/ams-delta.c
3 *
4 * Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
5 *
6 * Derived from drivers/mtd/toto.c
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * Overview:
13 * This is a device driver for the NAND flash device found on the
14 * Amstrad E3 (Delta).
15 */
16
17#include <linux/slab.h>
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/delay.h>
21#include <linux/mtd/mtd.h>
22#include <linux/mtd/nand.h>
23#include <linux/mtd/partitions.h>
24#include <asm/io.h>
25#include <asm/arch/hardware.h>
26#include <asm/sizes.h>
27#include <asm/arch/gpio.h>
28#include <asm/arch/board-ams-delta.h>
29
30/*
31 * MTD structure for E3 (Delta)
32 */
33static struct mtd_info *ams_delta_mtd = NULL;
34
35#define NAND_MASK (AMS_DELTA_LATCH2_NAND_NRE | AMS_DELTA_LATCH2_NAND_NWE | AMS_DELTA_LATCH2_NAND_CLE | AMS_DELTA_LATCH2_NAND_ALE | AMS_DELTA_LATCH2_NAND_NCE | AMS_DELTA_LATCH2_NAND_NWP)
36
37/*
38 * Define partitions for flash devices
39 */
40
41static struct mtd_partition partition_info[] = {
42 { .name = "Kernel",
43 .offset = 0,
44 .size = 3 * SZ_1M + SZ_512K },
45 { .name = "u-boot",
46 .offset = 3 * SZ_1M + SZ_512K,
47 .size = SZ_256K },
48 { .name = "u-boot params",
49 .offset = 3 * SZ_1M + SZ_512K + SZ_256K,
50 .size = SZ_256K },
51 { .name = "Amstrad LDR",
52 .offset = 4 * SZ_1M,
53 .size = SZ_256K },
54 { .name = "File system",
55 .offset = 4 * SZ_1M + 1 * SZ_256K,
56 .size = 27 * SZ_1M },
57 { .name = "PBL reserved",
58 .offset = 32 * SZ_1M - 3 * SZ_256K,
59 .size = 3 * SZ_256K },
60};
61
62static void ams_delta_write_byte(struct mtd_info *mtd, u_char byte)
63{
64 struct nand_chip *this = mtd->priv;
65
66 omap_writew(0, (OMAP_MPUIO_BASE + OMAP_MPUIO_IO_CNTL));
67 omap_writew(byte, this->IO_ADDR_W);
68 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE, 0);
69 ndelay(40);
70 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NWE,
71 AMS_DELTA_LATCH2_NAND_NWE);
72}
73
74static u_char ams_delta_read_byte(struct mtd_info *mtd)
75{
76 u_char res;
77 struct nand_chip *this = mtd->priv;
78
79 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE, 0);
80 ndelay(40);
81 omap_writew(~0, (OMAP_MPUIO_BASE + OMAP_MPUIO_IO_CNTL));
82 res = omap_readw(this->IO_ADDR_R);
83 ams_delta_latch2_write(AMS_DELTA_LATCH2_NAND_NRE,
84 AMS_DELTA_LATCH2_NAND_NRE);
85
86 return res;
87}
88
89static void ams_delta_write_buf(struct mtd_info *mtd, const u_char *buf,
90 int len)
91{
92 int i;
93
94 for (i=0; i<len; i++)
95 ams_delta_write_byte(mtd, buf[i]);
96}
97
98static void ams_delta_read_buf(struct mtd_info *mtd, u_char *buf, int len)
99{
100 int i;
101
102 for (i=0; i<len; i++)
103 buf[i] = ams_delta_read_byte(mtd);
104}
105
106static int ams_delta_verify_buf(struct mtd_info *mtd, const u_char *buf,
107 int len)
108{
109 int i;
110
111 for (i=0; i<len; i++)
112 if (buf[i] != ams_delta_read_byte(mtd))
113 return -EFAULT;
114
115 return 0;
116}
117
118/*
119 * Command control function
120 *
121 * ctrl:
122 * NAND_NCE: bit 0 -> bit 2
123 * NAND_CLE: bit 1 -> bit 7
124 * NAND_ALE: bit 2 -> bit 6
125 */
126static void ams_delta_hwcontrol(struct mtd_info *mtd, int cmd,
127 unsigned int ctrl)
128{
129
130 if (ctrl & NAND_CTRL_CHANGE) {
131 unsigned long bits;
132
133 bits = (~ctrl & NAND_NCE) << 2;
134 bits |= (ctrl & NAND_CLE) << 7;
135 bits |= (ctrl & NAND_ALE) << 6;
136
137 ams_delta_latch2_write(0xC2, bits);
138 }
139
140 if (cmd != NAND_CMD_NONE)
141 ams_delta_write_byte(mtd, cmd);
142}
143
144static int ams_delta_nand_ready(struct mtd_info *mtd)
145{
146 return omap_get_gpio_datain(AMS_DELTA_GPIO_PIN_NAND_RB);
147}
148
149/*
150 * Main initialization routine
151 */
152static int __init ams_delta_init(void)
153{
154 struct nand_chip *this;
155 int err = 0;
156
157 /* Allocate memory for MTD device structure and private data */
158 ams_delta_mtd = kmalloc(sizeof(struct mtd_info) +
159 sizeof(struct nand_chip), GFP_KERNEL);
160 if (!ams_delta_mtd) {
161 printk (KERN_WARNING "Unable to allocate E3 NAND MTD device structure.\n");
162 err = -ENOMEM;
163 goto out;
164 }
165
166 ams_delta_mtd->owner = THIS_MODULE;
167
168 /* Get pointer to private data */
169 this = (struct nand_chip *) (&ams_delta_mtd[1]);
170
171 /* Initialize structures */
172 memset(ams_delta_mtd, 0, sizeof(struct mtd_info));
173 memset(this, 0, sizeof(struct nand_chip));
174
175 /* Link the private data with the MTD structure */
176 ams_delta_mtd->priv = this;
177
178 /* Set address of NAND IO lines */
179 this->IO_ADDR_R = (OMAP_MPUIO_BASE + OMAP_MPUIO_INPUT_LATCH);
180 this->IO_ADDR_W = (OMAP_MPUIO_BASE + OMAP_MPUIO_OUTPUT);
181 this->read_byte = ams_delta_read_byte;
182 this->write_buf = ams_delta_write_buf;
183 this->read_buf = ams_delta_read_buf;
184 this->verify_buf = ams_delta_verify_buf;
185 this->cmd_ctrl = ams_delta_hwcontrol;
186 if (!omap_request_gpio(AMS_DELTA_GPIO_PIN_NAND_RB)) {
187 this->dev_ready = ams_delta_nand_ready;
188 } else {
189 this->dev_ready = NULL;
190 printk(KERN_NOTICE "Couldn't request gpio for Delta NAND ready.\n");
191 }
192 /* 25 us command delay time */
193 this->chip_delay = 30;
194 this->ecc.mode = NAND_ECC_SOFT;
195
196 /* Set chip enabled, but */
197 ams_delta_latch2_write(NAND_MASK, AMS_DELTA_LATCH2_NAND_NRE |
198 AMS_DELTA_LATCH2_NAND_NWE |
199 AMS_DELTA_LATCH2_NAND_NCE |
200 AMS_DELTA_LATCH2_NAND_NWP);
201
202 /* Scan to find existance of the device */
203 if (nand_scan(ams_delta_mtd, 1)) {
204 err = -ENXIO;
205 goto out_mtd;
206 }
207
208 /* Register the partitions */
209 add_mtd_partitions(ams_delta_mtd, partition_info,
210 ARRAY_SIZE(partition_info));
211
212 goto out;
213
214 out_mtd:
215 kfree(ams_delta_mtd);
216 out:
217 return err;
218}
219
220module_init(ams_delta_init);
221
222/*
223 * Clean up routine
224 */
225static void __exit ams_delta_cleanup(void)
226{
227 /* Release resources, unregister device */
228 nand_release(ams_delta_mtd);
229
230 /* Free the MTD device structure */
231 kfree(ams_delta_mtd);
232}
233module_exit(ams_delta_cleanup);
234
235MODULE_LICENSE("GPL");
236MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
237MODULE_DESCRIPTION("Glue layer for NAND flash on Amstrad E3 (Delta)");
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index bde3550910a2..31228334da12 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/interrupt.h>
17#include <linux/mtd/mtd.h> 18#include <linux/mtd/mtd.h>
18#include <linux/mtd/nand.h> 19#include <linux/mtd/nand.h>
19#include <linux/mtd/partitions.h> 20#include <linux/mtd/partitions.h>
@@ -38,22 +39,21 @@
38 */ 39 */
39static struct mtd_info *au1550_mtd = NULL; 40static struct mtd_info *au1550_mtd = NULL;
40static void __iomem *p_nand; 41static void __iomem *p_nand;
41static int nand_width = 1; /* default x8*/ 42static int nand_width = 1; /* default x8 */
43static void (*au1550_write_byte)(struct mtd_info *, u_char);
42 44
43/* 45/*
44 * Define partitions for flash device 46 * Define partitions for flash device
45 */ 47 */
46static const struct mtd_partition partition_info[] = { 48static const struct mtd_partition partition_info[] = {
47 { 49 {
48 .name = "NAND FS 0", 50 .name = "NAND FS 0",
49 .offset = 0, 51 .offset = 0,
50 .size = 8*1024*1024 52 .size = 8 * 1024 * 1024},
51 },
52 { 53 {
53 .name = "NAND FS 1", 54 .name = "NAND FS 1",
54 .offset = MTDPART_OFS_APPEND, 55 .offset = MTDPART_OFS_APPEND,
55 .size = MTDPART_SIZ_FULL 56 .size = MTDPART_SIZ_FULL}
56 }
57}; 57};
58 58
59/** 59/**
@@ -130,21 +130,6 @@ static u16 au_read_word(struct mtd_info *mtd)
130} 130}
131 131
132/** 132/**
133 * au_write_word - write one word to the chip
134 * @mtd: MTD device structure
135 * @word: data word to write
136 *
137 * write function for 16bit buswith without
138 * endianess conversion
139 */
140static void au_write_word(struct mtd_info *mtd, u16 word)
141{
142 struct nand_chip *this = mtd->priv;
143 writew(word, this->IO_ADDR_W);
144 au_sync();
145}
146
147/**
148 * au_write_buf - write buffer to chip 133 * au_write_buf - write buffer to chip
149 * @mtd: MTD device structure 134 * @mtd: MTD device structure
150 * @buf: data buffer 135 * @buf: data buffer
@@ -157,7 +142,7 @@ static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
157 int i; 142 int i;
158 struct nand_chip *this = mtd->priv; 143 struct nand_chip *this = mtd->priv;
159 144
160 for (i=0; i<len; i++) { 145 for (i = 0; i < len; i++) {
161 writeb(buf[i], this->IO_ADDR_W); 146 writeb(buf[i], this->IO_ADDR_W);
162 au_sync(); 147 au_sync();
163 } 148 }
@@ -176,7 +161,7 @@ static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
176 int i; 161 int i;
177 struct nand_chip *this = mtd->priv; 162 struct nand_chip *this = mtd->priv;
178 163
179 for (i=0; i<len; i++) { 164 for (i = 0; i < len; i++) {
180 buf[i] = readb(this->IO_ADDR_R); 165 buf[i] = readb(this->IO_ADDR_R);
181 au_sync(); 166 au_sync();
182 } 167 }
@@ -195,7 +180,7 @@ static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
195 int i; 180 int i;
196 struct nand_chip *this = mtd->priv; 181 struct nand_chip *this = mtd->priv;
197 182
198 for (i=0; i<len; i++) { 183 for (i = 0; i < len; i++) {
199 if (buf[i] != readb(this->IO_ADDR_R)) 184 if (buf[i] != readb(this->IO_ADDR_R))
200 return -EFAULT; 185 return -EFAULT;
201 au_sync(); 186 au_sync();
@@ -219,7 +204,7 @@ static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
219 u16 *p = (u16 *) buf; 204 u16 *p = (u16 *) buf;
220 len >>= 1; 205 len >>= 1;
221 206
222 for (i=0; i<len; i++) { 207 for (i = 0; i < len; i++) {
223 writew(p[i], this->IO_ADDR_W); 208 writew(p[i], this->IO_ADDR_W);
224 au_sync(); 209 au_sync();
225 } 210 }
@@ -241,7 +226,7 @@ static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
241 u16 *p = (u16 *) buf; 226 u16 *p = (u16 *) buf;
242 len >>= 1; 227 len >>= 1;
243 228
244 for (i=0; i<len; i++) { 229 for (i = 0; i < len; i++) {
245 p[i] = readw(this->IO_ADDR_R); 230 p[i] = readw(this->IO_ADDR_R);
246 au_sync(); 231 au_sync();
247 } 232 }
@@ -262,7 +247,7 @@ static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len)
262 u16 *p = (u16 *) buf; 247 u16 *p = (u16 *) buf;
263 len >>= 1; 248 len >>= 1;
264 249
265 for (i=0; i<len; i++) { 250 for (i = 0; i < len; i++) {
266 if (p[i] != readw(this->IO_ADDR_R)) 251 if (p[i] != readw(this->IO_ADDR_R))
267 return -EFAULT; 252 return -EFAULT;
268 au_sync(); 253 au_sync();
@@ -270,32 +255,52 @@ static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len)
270 return 0; 255 return 0;
271} 256}
272 257
258/* Select the chip by setting nCE to low */
259#define NAND_CTL_SETNCE 1
260/* Deselect the chip by setting nCE to high */
261#define NAND_CTL_CLRNCE 2
262/* Select the command latch by setting CLE to high */
263#define NAND_CTL_SETCLE 3
264/* Deselect the command latch by setting CLE to low */
265#define NAND_CTL_CLRCLE 4
266/* Select the address latch by setting ALE to high */
267#define NAND_CTL_SETALE 5
268/* Deselect the address latch by setting ALE to low */
269#define NAND_CTL_CLRALE 6
273 270
274static void au1550_hwcontrol(struct mtd_info *mtd, int cmd) 271static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
275{ 272{
276 register struct nand_chip *this = mtd->priv; 273 register struct nand_chip *this = mtd->priv;
277 274
278 switch(cmd){ 275 switch (cmd) {
279 276
280 case NAND_CTL_SETCLE: this->IO_ADDR_W = p_nand + MEM_STNAND_CMD; break; 277 case NAND_CTL_SETCLE:
281 case NAND_CTL_CLRCLE: this->IO_ADDR_W = p_nand + MEM_STNAND_DATA; break; 278 this->IO_ADDR_W = p_nand + MEM_STNAND_CMD;
279 break;
280
281 case NAND_CTL_CLRCLE:
282 this->IO_ADDR_W = p_nand + MEM_STNAND_DATA;
283 break;
284
285 case NAND_CTL_SETALE:
286 this->IO_ADDR_W = p_nand + MEM_STNAND_ADDR;
287 break;
282 288
283 case NAND_CTL_SETALE: this->IO_ADDR_W = p_nand + MEM_STNAND_ADDR; break;
284 case NAND_CTL_CLRALE: 289 case NAND_CTL_CLRALE:
285 this->IO_ADDR_W = p_nand + MEM_STNAND_DATA; 290 this->IO_ADDR_W = p_nand + MEM_STNAND_DATA;
286 /* FIXME: Nobody knows why this is neccecary, 291 /* FIXME: Nobody knows why this is necessary,
287 * but it works only that way */ 292 * but it works only that way */
288 udelay(1); 293 udelay(1);
289 break; 294 break;
290 295
291 case NAND_CTL_SETNCE: 296 case NAND_CTL_SETNCE:
292 /* assert (force assert) chip enable */ 297 /* assert (force assert) chip enable */
293 au_writel((1<<(4+NAND_CS)) , MEM_STNDCTL); break; 298 au_writel((1 << (4 + NAND_CS)), MEM_STNDCTL);
294 break; 299 break;
295 300
296 case NAND_CTL_CLRNCE: 301 case NAND_CTL_CLRNCE:
297 /* deassert chip enable */ 302 /* deassert chip enable */
298 au_writel(0, MEM_STNDCTL); break; 303 au_writel(0, MEM_STNDCTL);
299 break; 304 break;
300 } 305 }
301 306
@@ -312,69 +317,200 @@ int au1550_device_ready(struct mtd_info *mtd)
312 return ret; 317 return ret;
313} 318}
314 319
320/**
321 * au1550_select_chip - control -CE line
322 * Forbid driving -CE manually permitting the NAND controller to do this.
323 * Keeping -CE asserted during the whole sector reads interferes with the
324 * NOR flash and PCMCIA drivers as it causes contention on the static bus.
325 * We only have to hold -CE low for the NAND read commands since the flash
326 * chip needs it to be asserted during chip not ready time but the NAND
327 * controller keeps it released.
328 *
329 * @mtd: MTD device structure
330 * @chip: chipnumber to select, -1 for deselect
331 */
332static void au1550_select_chip(struct mtd_info *mtd, int chip)
333{
334}
335
336/**
337 * au1550_command - Send command to NAND device
338 * @mtd: MTD device structure
339 * @command: the command to be sent
340 * @column: the column address for this command, -1 if none
341 * @page_addr: the page address for this command, -1 if none
342 */
343static void au1550_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
344{
345 register struct nand_chip *this = mtd->priv;
346 int ce_override = 0, i;
347 ulong flags;
348
349 /* Begin command latch cycle */
350 au1550_hwcontrol(mtd, NAND_CTL_SETCLE);
351 /*
352 * Write out the command to the device.
353 */
354 if (command == NAND_CMD_SEQIN) {
355 int readcmd;
356
357 if (column >= mtd->writesize) {
358 /* OOB area */
359 column -= mtd->writesize;
360 readcmd = NAND_CMD_READOOB;
361 } else if (column < 256) {
362 /* First 256 bytes --> READ0 */
363 readcmd = NAND_CMD_READ0;
364 } else {
365 column -= 256;
366 readcmd = NAND_CMD_READ1;
367 }
368 au1550_write_byte(mtd, readcmd);
369 }
370 au1550_write_byte(mtd, command);
371
372 /* Set ALE and clear CLE to start address cycle */
373 au1550_hwcontrol(mtd, NAND_CTL_CLRCLE);
374
375 if (column != -1 || page_addr != -1) {
376 au1550_hwcontrol(mtd, NAND_CTL_SETALE);
377
378 /* Serially input address */
379 if (column != -1) {
380 /* Adjust columns for 16 bit buswidth */
381 if (this->options & NAND_BUSWIDTH_16)
382 column >>= 1;
383 au1550_write_byte(mtd, column);
384 }
385 if (page_addr != -1) {
386 au1550_write_byte(mtd, (u8)(page_addr & 0xff));
387
388 if (command == NAND_CMD_READ0 ||
389 command == NAND_CMD_READ1 ||
390 command == NAND_CMD_READOOB) {
391 /*
392 * NAND controller will release -CE after
393 * the last address byte is written, so we'll
394 * have to forcibly assert it. No interrupts
395 * are allowed while we do this as we don't
396 * want the NOR flash or PCMCIA drivers to
397 * steal our precious bytes of data...
398 */
399 ce_override = 1;
400 local_irq_save(flags);
401 au1550_hwcontrol(mtd, NAND_CTL_SETNCE);
402 }
403
404 au1550_write_byte(mtd, (u8)(page_addr >> 8));
405
406 /* One more address cycle for devices > 32MiB */
407 if (this->chipsize > (32 << 20))
408 au1550_write_byte(mtd, (u8)((page_addr >> 16) & 0x0f));
409 }
410 /* Latch in address */
411 au1550_hwcontrol(mtd, NAND_CTL_CLRALE);
412 }
413
414 /*
415 * Program and erase have their own busy handlers.
416 * Status and sequential in need no delay.
417 */
418 switch (command) {
419
420 case NAND_CMD_PAGEPROG:
421 case NAND_CMD_ERASE1:
422 case NAND_CMD_ERASE2:
423 case NAND_CMD_SEQIN:
424 case NAND_CMD_STATUS:
425 return;
426
427 case NAND_CMD_RESET:
428 break;
429
430 case NAND_CMD_READ0:
431 case NAND_CMD_READ1:
432 case NAND_CMD_READOOB:
433 /* Check if we're really driving -CE low (just in case) */
434 if (unlikely(!ce_override))
435 break;
436
437 /* Apply a short delay always to ensure that we do wait tWB. */
438 ndelay(100);
439 /* Wait for a chip to become ready... */
440 for (i = this->chip_delay; !this->dev_ready(mtd) && i > 0; --i)
441 udelay(1);
442
443 /* Release -CE and re-enable interrupts. */
444 au1550_hwcontrol(mtd, NAND_CTL_CLRNCE);
445 local_irq_restore(flags);
446 return;
447 }
448 /* Apply this short delay always to ensure that we do wait tWB. */
449 ndelay(100);
450
451 while(!this->dev_ready(mtd));
452}
453
454
315/* 455/*
316 * Main initialization routine 456 * Main initialization routine
317 */ 457 */
318int __init au1xxx_nand_init (void) 458static int __init au1xxx_nand_init(void)
319{ 459{
320 struct nand_chip *this; 460 struct nand_chip *this;
321 u16 boot_swapboot = 0; /* default value */ 461 u16 boot_swapboot = 0; /* default value */
322 int retval; 462 int retval;
323 u32 mem_staddr; 463 u32 mem_staddr;
324 u32 nand_phys; 464 u32 nand_phys;
325 465
326 /* Allocate memory for MTD device structure and private data */ 466 /* Allocate memory for MTD device structure and private data */
327 au1550_mtd = kmalloc (sizeof(struct mtd_info) + 467 au1550_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
328 sizeof (struct nand_chip), GFP_KERNEL);
329 if (!au1550_mtd) { 468 if (!au1550_mtd) {
330 printk ("Unable to allocate NAND MTD dev structure.\n"); 469 printk("Unable to allocate NAND MTD dev structure.\n");
331 return -ENOMEM; 470 return -ENOMEM;
332 } 471 }
333 472
334 /* Get pointer to private data */ 473 /* Get pointer to private data */
335 this = (struct nand_chip *) (&au1550_mtd[1]); 474 this = (struct nand_chip *)(&au1550_mtd[1]);
336 475
337 /* Initialize structures */ 476 /* Initialize structures */
338 memset((char *) au1550_mtd, 0, sizeof(struct mtd_info)); 477 memset(au1550_mtd, 0, sizeof(struct mtd_info));
339 memset((char *) this, 0, sizeof(struct nand_chip)); 478 memset(this, 0, sizeof(struct nand_chip));
340 479
341 /* Link the private data with the MTD structure */ 480 /* Link the private data with the MTD structure */
342 au1550_mtd->priv = this; 481 au1550_mtd->priv = this;
482 au1550_mtd->owner = THIS_MODULE;
343 483
344 484
345 /* disable interrupts */ 485 /* MEM_STNDCTL: disable ints, disable nand boot */
346 au_writel(au_readl(MEM_STNDCTL) & ~(1<<8), MEM_STNDCTL); 486 au_writel(0, MEM_STNDCTL);
347
348 /* disable NAND boot */
349 au_writel(au_readl(MEM_STNDCTL) & ~(1<<0), MEM_STNDCTL);
350 487
351#ifdef CONFIG_MIPS_PB1550 488#ifdef CONFIG_MIPS_PB1550
352 /* set gpio206 high */ 489 /* set gpio206 high */
353 au_writel(au_readl(GPIO2_DIR) & ~(1<<6), GPIO2_DIR); 490 au_writel(au_readl(GPIO2_DIR) & ~(1 << 6), GPIO2_DIR);
354 491
355 boot_swapboot = (au_readl(MEM_STSTAT) & (0x7<<1)) | 492 boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr->status >> 6) & 0x1);
356 ((bcsr->status >> 6) & 0x1);
357 switch (boot_swapboot) { 493 switch (boot_swapboot) {
358 case 0: 494 case 0:
359 case 2: 495 case 2:
360 case 8: 496 case 8:
361 case 0xC: 497 case 0xC:
362 case 0xD: 498 case 0xD:
363 /* x16 NAND Flash */ 499 /* x16 NAND Flash */
364 nand_width = 0; 500 nand_width = 0;
365 break; 501 break;
366 case 1: 502 case 1:
367 case 9: 503 case 9:
368 case 3: 504 case 3:
369 case 0xE: 505 case 0xE:
370 case 0xF: 506 case 0xF:
371 /* x8 NAND Flash */ 507 /* x8 NAND Flash */
372 nand_width = 1; 508 nand_width = 1;
373 break; 509 break;
374 default: 510 default:
375 printk("Pb1550 NAND: bad boot:swap\n"); 511 printk("Pb1550 NAND: bad boot:swap\n");
376 retval = -EINVAL; 512 retval = -EINVAL;
377 goto outmem; 513 goto outmem;
378 } 514 }
379#endif 515#endif
380 516
@@ -424,21 +560,22 @@ int __init au1xxx_nand_init (void)
424 560
425 /* make controller and MTD agree */ 561 /* make controller and MTD agree */
426 if (NAND_CS == 0) 562 if (NAND_CS == 0)
427 nand_width = au_readl(MEM_STCFG0) & (1<<22); 563 nand_width = au_readl(MEM_STCFG0) & (1 << 22);
428 if (NAND_CS == 1) 564 if (NAND_CS == 1)
429 nand_width = au_readl(MEM_STCFG1) & (1<<22); 565 nand_width = au_readl(MEM_STCFG1) & (1 << 22);
430 if (NAND_CS == 2) 566 if (NAND_CS == 2)
431 nand_width = au_readl(MEM_STCFG2) & (1<<22); 567 nand_width = au_readl(MEM_STCFG2) & (1 << 22);
432 if (NAND_CS == 3) 568 if (NAND_CS == 3)
433 nand_width = au_readl(MEM_STCFG3) & (1<<22); 569 nand_width = au_readl(MEM_STCFG3) & (1 << 22);
434
435 570
436 /* Set address of hardware control function */ 571 /* Set address of hardware control function */
437 this->hwcontrol = au1550_hwcontrol;
438 this->dev_ready = au1550_device_ready; 572 this->dev_ready = au1550_device_ready;
573 this->select_chip = au1550_select_chip;
574 this->cmdfunc = au1550_command;
575
439 /* 30 us command delay time */ 576 /* 30 us command delay time */
440 this->chip_delay = 30; 577 this->chip_delay = 30;
441 this->eccmode = NAND_ECC_SOFT; 578 this->ecc.mode = NAND_ECC_SOFT;
442 579
443 this->options = NAND_NO_AUTOINCR; 580 this->options = NAND_NO_AUTOINCR;
444 581
@@ -446,15 +583,14 @@ int __init au1xxx_nand_init (void)
446 this->options |= NAND_BUSWIDTH_16; 583 this->options |= NAND_BUSWIDTH_16;
447 584
448 this->read_byte = (!nand_width) ? au_read_byte16 : au_read_byte; 585 this->read_byte = (!nand_width) ? au_read_byte16 : au_read_byte;
449 this->write_byte = (!nand_width) ? au_write_byte16 : au_write_byte; 586 au1550_write_byte = (!nand_width) ? au_write_byte16 : au_write_byte;
450 this->write_word = au_write_word;
451 this->read_word = au_read_word; 587 this->read_word = au_read_word;
452 this->write_buf = (!nand_width) ? au_write_buf16 : au_write_buf; 588 this->write_buf = (!nand_width) ? au_write_buf16 : au_write_buf;
453 this->read_buf = (!nand_width) ? au_read_buf16 : au_read_buf; 589 this->read_buf = (!nand_width) ? au_read_buf16 : au_read_buf;
454 this->verify_buf = (!nand_width) ? au_verify_buf16 : au_verify_buf; 590 this->verify_buf = (!nand_width) ? au_verify_buf16 : au_verify_buf;
455 591
456 /* Scan to find existence of the device */ 592 /* Scan to find existence of the device */
457 if (nand_scan (au1550_mtd, 1)) { 593 if (nand_scan(au1550_mtd, 1)) {
458 retval = -ENXIO; 594 retval = -ENXIO;
459 goto outio; 595 goto outio;
460 } 596 }
@@ -465,10 +601,10 @@ int __init au1xxx_nand_init (void)
465 return 0; 601 return 0;
466 602
467 outio: 603 outio:
468 iounmap ((void *)p_nand); 604 iounmap((void *)p_nand);
469 605
470 outmem: 606 outmem:
471 kfree (au1550_mtd); 607 kfree(au1550_mtd);
472 return retval; 608 return retval;
473} 609}
474 610
@@ -477,22 +613,21 @@ module_init(au1xxx_nand_init);
477/* 613/*
478 * Clean up routine 614 * Clean up routine
479 */ 615 */
480#ifdef MODULE 616static void __exit au1550_cleanup(void)
481static void __exit au1550_cleanup (void)
482{ 617{
483 struct nand_chip *this = (struct nand_chip *) &au1550_mtd[1]; 618 struct nand_chip *this = (struct nand_chip *)&au1550_mtd[1];
484 619
485 /* Release resources, unregister device */ 620 /* Release resources, unregister device */
486 nand_release (au1550_mtd); 621 nand_release(au1550_mtd);
487 622
488 /* Free the MTD device structure */ 623 /* Free the MTD device structure */
489 kfree (au1550_mtd); 624 kfree(au1550_mtd);
490 625
491 /* Unmap */ 626 /* Unmap */
492 iounmap ((void *)p_nand); 627 iounmap((void *)p_nand);
493} 628}
629
494module_exit(au1550_cleanup); 630module_exit(au1550_cleanup);
495#endif
496 631
497MODULE_LICENSE("GPL"); 632MODULE_LICENSE("GPL");
498MODULE_AUTHOR("Embedded Edge, LLC"); 633MODULE_AUTHOR("Embedded Edge, LLC");
diff --git a/drivers/mtd/nand/autcpu12.c b/drivers/mtd/nand/autcpu12.c
index a3c7fea404d0..fe94ae9ae1f2 100644
--- a/drivers/mtd/nand/autcpu12.c
+++ b/drivers/mtd/nand/autcpu12.c
@@ -4,7 +4,7 @@
4 * Copyright (c) 2002 Thomas Gleixner <tgxl@linutronix.de> 4 * Copyright (c) 2002 Thomas Gleixner <tgxl@linutronix.de>
5 * 5 *
6 * Derived from drivers/mtd/spia.c 6 * Derived from drivers/mtd/spia.c
7 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) 7 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
8 * 8 *
9 * $Id: autcpu12.c,v 1.23 2005/11/07 11:14:30 gleixner Exp $ 9 * $Id: autcpu12.c,v 1.23 2005/11/07 11:14:30 gleixner Exp $
10 * 10 *
@@ -42,12 +42,7 @@
42 * MTD structure for AUTCPU12 board 42 * MTD structure for AUTCPU12 board
43 */ 43 */
44static struct mtd_info *autcpu12_mtd = NULL; 44static struct mtd_info *autcpu12_mtd = NULL;
45 45static void __iomem *autcpu12_fio_base;
46static int autcpu12_io_base = CS89712_VIRT_BASE;
47static int autcpu12_fio_pbase = AUTCPU12_PHYS_SMC;
48static int autcpu12_fio_ctrl = AUTCPU12_SMC_SELECT_OFFSET;
49static int autcpu12_pedr = AUTCPU12_SMC_PORT_OFFSET;
50static void __iomem * autcpu12_fio_base;
51 46
52/* 47/*
53 * Define partitions for flash devices 48 * Define partitions for flash devices
@@ -94,108 +89,131 @@ static struct mtd_partition partition_info128k[] = {
94#define NUM_PARTITIONS128K 2 89#define NUM_PARTITIONS128K 2
95/* 90/*
96 * hardware specific access to control-lines 91 * hardware specific access to control-lines
97*/ 92 *
98static void autcpu12_hwcontrol(struct mtd_info *mtd, int cmd) 93 * ALE bit 4 autcpu12_pedr
94 * CLE bit 5 autcpu12_pedr
95 * NCE bit 0 fio_ctrl
96 *
97 */
98static void autcpu12_hwcontrol(struct mtd_info *mtd, int cmd,
99 unsigned int ctrl)
99{ 100{
101 struct nand_chip *chip = mtd->priv;
100 102
101 switch(cmd){ 103 if (ctrl & NAND_CTRL_CHANGE) {
102 104 void __iomem *addr
103 case NAND_CTL_SETCLE: (*(volatile unsigned char *) (autcpu12_io_base + autcpu12_pedr)) |= AUTCPU12_SMC_CLE; break; 105 unsigned char bits;
104 case NAND_CTL_CLRCLE: (*(volatile unsigned char *) (autcpu12_io_base + autcpu12_pedr)) &= ~AUTCPU12_SMC_CLE; break;
105 106
106 case NAND_CTL_SETALE: (*(volatile unsigned char *) (autcpu12_io_base + autcpu12_pedr)) |= AUTCPU12_SMC_ALE; break; 107 addr = CS89712_VIRT_BASE + AUTCPU12_SMC_PORT_OFFSET;
107 case NAND_CTL_CLRALE: (*(volatile unsigned char *) (autcpu12_io_base + autcpu12_pedr)) &= ~AUTCPU12_SMC_ALE; break; 108 bits = (ctrl & NAND_CLE) << 4;
109 bits |= (ctrl & NAND_ALE) << 2;
110 writeb((readb(addr) & ~0x30) | bits, addr);
108 111
109 case NAND_CTL_SETNCE: (*(volatile unsigned char *) (autcpu12_fio_base + autcpu12_fio_ctrl)) = 0x01; break; 112 addr = autcpu12_fio_base + AUTCPU12_SMC_SELECT_OFFSET;
110 case NAND_CTL_CLRNCE: (*(volatile unsigned char *) (autcpu12_fio_base + autcpu12_fio_ctrl)) = 0x00; break; 113 writeb((readb(addr) & ~0x1) | (ctrl & NAND_NCE), addr);
111 } 114 }
115
116 if (cmd != NAND_CMD_NONE)
117 writeb(cmd, chip->IO_ADDR_W);
112} 118}
113 119
114/* 120/*
115* read device ready pin 121 * read device ready pin
116*/ 122 */
117int autcpu12_device_ready(struct mtd_info *mtd) 123int autcpu12_device_ready(struct mtd_info *mtd)
118{ 124{
125 void __iomem *addr = CS89712_VIRT_BASE + AUTCPU12_SMC_PORT_OFFSET;
119 126
120 return ( (*(volatile unsigned char *) (autcpu12_io_base + autcpu12_pedr)) & AUTCPU12_SMC_RDY) ? 1 : 0; 127 return readb(addr) & AUTCPU12_SMC_RDY;
121
122} 128}
123 129
124/* 130/*
125 * Main initialization routine 131 * Main initialization routine
126 */ 132 */
127int __init autcpu12_init (void) 133static int __init autcpu12_init(void)
128{ 134{
129 struct nand_chip *this; 135 struct nand_chip *this;
130 int err = 0; 136 int err = 0;
131 137
132 /* Allocate memory for MTD device structure and private data */ 138 /* Allocate memory for MTD device structure and private data */
133 autcpu12_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip), 139 autcpu12_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip),
134 GFP_KERNEL); 140 GFP_KERNEL);
135 if (!autcpu12_mtd) { 141 if (!autcpu12_mtd) {
136 printk ("Unable to allocate AUTCPU12 NAND MTD device structure.\n"); 142 printk("Unable to allocate AUTCPU12 NAND MTD device structure.\n");
137 err = -ENOMEM; 143 err = -ENOMEM;
138 goto out; 144 goto out;
139 } 145 }
140 146
141 /* map physical adress */ 147 /* map physical adress */
142 autcpu12_fio_base = ioremap(autcpu12_fio_pbase,SZ_1K); 148 autcpu12_fio_base = ioremap(AUTCPU12_PHYS_SMC, SZ_1K);
143 if(!autcpu12_fio_base){ 149 if (!autcpu12_fio_base) {
144 printk("Ioremap autcpu12 SmartMedia Card failed\n"); 150 printk("Ioremap autcpu12 SmartMedia Card failed\n");
145 err = -EIO; 151 err = -EIO;
146 goto out_mtd; 152 goto out_mtd;
147 } 153 }
148 154
149 /* Get pointer to private data */ 155 /* Get pointer to private data */
150 this = (struct nand_chip *) (&autcpu12_mtd[1]); 156 this = (struct nand_chip *)(&autcpu12_mtd[1]);
151 157
152 /* Initialize structures */ 158 /* Initialize structures */
153 memset((char *) autcpu12_mtd, 0, sizeof(struct mtd_info)); 159 memset(autcpu12_mtd, 0, sizeof(struct mtd_info));
154 memset((char *) this, 0, sizeof(struct nand_chip)); 160 memset(this, 0, sizeof(struct nand_chip));
155 161
156 /* Link the private data with the MTD structure */ 162 /* Link the private data with the MTD structure */
157 autcpu12_mtd->priv = this; 163 autcpu12_mtd->priv = this;
164 autcpu12_mtd->owner = THIS_MODULE;
158 165
159 /* Set address of NAND IO lines */ 166 /* Set address of NAND IO lines */
160 this->IO_ADDR_R = autcpu12_fio_base; 167 this->IO_ADDR_R = autcpu12_fio_base;
161 this->IO_ADDR_W = autcpu12_fio_base; 168 this->IO_ADDR_W = autcpu12_fio_base;
162 this->hwcontrol = autcpu12_hwcontrol; 169 this->cmd_ctrl = autcpu12_hwcontrol;
163 this->dev_ready = autcpu12_device_ready; 170 this->dev_ready = autcpu12_device_ready;
164 /* 20 us command delay time */ 171 /* 20 us command delay time */
165 this->chip_delay = 20; 172 this->chip_delay = 20;
166 this->eccmode = NAND_ECC_SOFT; 173 this->ecc.mode = NAND_ECC_SOFT;
167 174
168 /* Enable the following for a flash based bad block table */ 175 /* Enable the following for a flash based bad block table */
169 /* 176 /*
170 this->options = NAND_USE_FLASH_BBT; 177 this->options = NAND_USE_FLASH_BBT;
171 */ 178 */
172 this->options = NAND_USE_FLASH_BBT; 179 this->options = NAND_USE_FLASH_BBT;
173 180
174 /* Scan to find existance of the device */ 181 /* Scan to find existance of the device */
175 if (nand_scan (autcpu12_mtd, 1)) { 182 if (nand_scan(autcpu12_mtd, 1)) {
176 err = -ENXIO; 183 err = -ENXIO;
177 goto out_ior; 184 goto out_ior;
178 } 185 }
179 186
180 /* Register the partitions */ 187 /* Register the partitions */
181 switch(autcpu12_mtd->size){ 188 switch (autcpu12_mtd->size) {
182 case SZ_16M: add_mtd_partitions(autcpu12_mtd, partition_info16k, NUM_PARTITIONS16K); break; 189 case SZ_16M:
183 case SZ_32M: add_mtd_partitions(autcpu12_mtd, partition_info32k, NUM_PARTITIONS32K); break; 190 add_mtd_partitions(autcpu12_mtd, partition_info16k,
184 case SZ_64M: add_mtd_partitions(autcpu12_mtd, partition_info64k, NUM_PARTITIONS64K); break; 191 NUM_PARTITIONS16K);
185 case SZ_128M: add_mtd_partitions(autcpu12_mtd, partition_info128k, NUM_PARTITIONS128K); break; 192 break;
186 default: { 193 case SZ_32M:
187 printk ("Unsupported SmartMedia device\n"); 194 add_mtd_partitions(autcpu12_mtd, partition_info32k,
195 NUM_PARTITIONS32K);
196 break;
197 case SZ_64M:
198 add_mtd_partitions(autcpu12_mtd, partition_info64k,
199 NUM_PARTITIONS64K);
200 break;
201 case SZ_128M:
202 add_mtd_partitions(autcpu12_mtd, partition_info128k,
203 NUM_PARTITIONS128K);
204 break;
205 default:
206 printk("Unsupported SmartMedia device\n");
188 err = -ENXIO; 207 err = -ENXIO;
189 goto out_ior; 208 goto out_ior;
190 }
191 } 209 }
192 goto out; 210 goto out;
193 211
194out_ior: 212 out_ior:
195 iounmap((void *)autcpu12_fio_base); 213 iounmap(autcpu12_fio_base);
196out_mtd: 214 out_mtd:
197 kfree (autcpu12_mtd); 215 kfree(autcpu12_mtd);
198out: 216 out:
199 return err; 217 return err;
200} 218}
201 219
@@ -204,20 +222,19 @@ module_init(autcpu12_init);
204/* 222/*
205 * Clean up routine 223 * Clean up routine
206 */ 224 */
207#ifdef MODULE 225static void __exit autcpu12_cleanup(void)
208static void __exit autcpu12_cleanup (void)
209{ 226{
210 /* Release resources, unregister device */ 227 /* Release resources, unregister device */
211 nand_release (autcpu12_mtd); 228 nand_release(autcpu12_mtd);
212 229
213 /* unmap physical adress */ 230 /* unmap physical adress */
214 iounmap((void *)autcpu12_fio_base); 231 iounmap(autcpu12_fio_base);
215 232
216 /* Free the MTD device structure */ 233 /* Free the MTD device structure */
217 kfree (autcpu12_mtd); 234 kfree(autcpu12_mtd);
218} 235}
236
219module_exit(autcpu12_cleanup); 237module_exit(autcpu12_cleanup);
220#endif
221 238
222MODULE_LICENSE("GPL"); 239MODULE_LICENSE("GPL");
223MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>"); 240MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
new file mode 100644
index 000000000000..e0a1d386e581
--- /dev/null
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -0,0 +1,353 @@
1/*
2 * drivers/mtd/nand/cs553x_nand.c
3 *
4 * (C) 2005, 2006 Red Hat Inc.
5 *
6 * Author: David Woodhouse <dwmw2@infradead.org>
7 * Tom Sylla <tom.sylla@amd.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * Overview:
14 * This is a device driver for the NAND flash controller found on
15 * the AMD CS5535/CS5536 companion chipsets for the Geode processor.
16 *
17 */
18
19#include <linux/slab.h>
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/delay.h>
23#include <linux/pci.h>
24#include <linux/mtd/mtd.h>
25#include <linux/mtd/nand.h>
26#include <linux/mtd/nand_ecc.h>
27#include <linux/mtd/partitions.h>
28
29#include <asm/msr.h>
30#include <asm/io.h>
31
32#define NR_CS553X_CONTROLLERS 4
33
34#define MSR_DIVIL_GLD_CAP 0x51400000 /* DIVIL capabilitiies */
35#define CAP_CS5535 0x2df000ULL
36#define CAP_CS5536 0x5df500ULL
37
38/* NAND Timing MSRs */
39#define MSR_NANDF_DATA 0x5140001b /* NAND Flash Data Timing MSR */
40#define MSR_NANDF_CTL 0x5140001c /* NAND Flash Control Timing */
41#define MSR_NANDF_RSVD 0x5140001d /* Reserved */
42
43/* NAND BAR MSRs */
44#define MSR_DIVIL_LBAR_FLSH0 0x51400010 /* Flash Chip Select 0 */
45#define MSR_DIVIL_LBAR_FLSH1 0x51400011 /* Flash Chip Select 1 */
46#define MSR_DIVIL_LBAR_FLSH2 0x51400012 /* Flash Chip Select 2 */
47#define MSR_DIVIL_LBAR_FLSH3 0x51400013 /* Flash Chip Select 3 */
48 /* Each made up of... */
49#define FLSH_LBAR_EN (1ULL<<32)
50#define FLSH_NOR_NAND (1ULL<<33) /* 1 for NAND */
51#define FLSH_MEM_IO (1ULL<<34) /* 1 for MMIO */
52 /* I/O BARs have BASE_ADDR in bits 15:4, IO_MASK in 47:36 */
53 /* MMIO BARs have BASE_ADDR in bits 31:12, MEM_MASK in 63:44 */
54
55/* Pin function selection MSR (IDE vs. flash on the IDE pins) */
56#define MSR_DIVIL_BALL_OPTS 0x51400015
57#define PIN_OPT_IDE (1<<0) /* 0 for flash, 1 for IDE */
58
59/* Registers within the NAND flash controller BAR -- memory mapped */
60#define MM_NAND_DATA 0x00 /* 0 to 0x7ff, in fact */
61#define MM_NAND_CTL 0x800 /* Any even address 0x800-0x80e */
62#define MM_NAND_IO 0x801 /* Any odd address 0x801-0x80f */
63#define MM_NAND_STS 0x810
64#define MM_NAND_ECC_LSB 0x811
65#define MM_NAND_ECC_MSB 0x812
66#define MM_NAND_ECC_COL 0x813
67#define MM_NAND_LAC 0x814
68#define MM_NAND_ECC_CTL 0x815
69
70/* Registers within the NAND flash controller BAR -- I/O mapped */
71#define IO_NAND_DATA 0x00 /* 0 to 3, in fact */
72#define IO_NAND_CTL 0x04
73#define IO_NAND_IO 0x05
74#define IO_NAND_STS 0x06
75#define IO_NAND_ECC_CTL 0x08
76#define IO_NAND_ECC_LSB 0x09
77#define IO_NAND_ECC_MSB 0x0a
78#define IO_NAND_ECC_COL 0x0b
79#define IO_NAND_LAC 0x0c
80
81#define CS_NAND_CTL_DIST_EN (1<<4) /* Enable NAND Distract interrupt */
82#define CS_NAND_CTL_RDY_INT_MASK (1<<3) /* Enable RDY/BUSY# interrupt */
83#define CS_NAND_CTL_ALE (1<<2)
84#define CS_NAND_CTL_CLE (1<<1)
85#define CS_NAND_CTL_CE (1<<0) /* Keep low; 1 to reset */
86
87#define CS_NAND_STS_FLASH_RDY (1<<3)
88#define CS_NAND_CTLR_BUSY (1<<2)
89#define CS_NAND_CMD_COMP (1<<1)
90#define CS_NAND_DIST_ST (1<<0)
91
92#define CS_NAND_ECC_PARITY (1<<2)
93#define CS_NAND_ECC_CLRECC (1<<1)
94#define CS_NAND_ECC_ENECC (1<<0)
95
96static void cs553x_read_buf(struct mtd_info *mtd, u_char *buf, int len)
97{
98 struct nand_chip *this = mtd->priv;
99
100 while (unlikely(len > 0x800)) {
101 memcpy_fromio(buf, this->IO_ADDR_R, 0x800);
102 buf += 0x800;
103 len -= 0x800;
104 }
105 memcpy_fromio(buf, this->IO_ADDR_R, len);
106}
107
108static void cs553x_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
109{
110 struct nand_chip *this = mtd->priv;
111
112 while (unlikely(len > 0x800)) {
113 memcpy_toio(this->IO_ADDR_R, buf, 0x800);
114 buf += 0x800;
115 len -= 0x800;
116 }
117 memcpy_toio(this->IO_ADDR_R, buf, len);
118}
119
120static unsigned char cs553x_read_byte(struct mtd_info *mtd)
121{
122 struct nand_chip *this = mtd->priv;
123 return readb(this->IO_ADDR_R);
124}
125
126static void cs553x_write_byte(struct mtd_info *mtd, u_char byte)
127{
128 struct nand_chip *this = mtd->priv;
129 int i = 100000;
130
131 while (i && readb(this->IO_ADDR_R + MM_NAND_STS) & CS_NAND_CTLR_BUSY) {
132 udelay(1);
133 i--;
134 }
135 writeb(byte, this->IO_ADDR_W + 0x801);
136}
137
138static void cs553x_hwcontrol(struct mtd_info *mtd, int cmd,
139 unsigned int ctrl)
140{
141 struct nand_chip *this = mtd->priv;
142 void __iomem *mmio_base = this->IO_ADDR_R;
143 if (ctrl & NAND_CTRL_CHANGE) {
144 unsigned char ctl = (ctrl & ~NAND_CTRL_CHANGE ) ^ 0x01;
145 writeb(ctl, mmio_base + MM_NAND_CTL);
146 }
147 if (cmd != NAND_CMD_NONE)
148 cs553x_write_byte(mtd, cmd);
149}
150
151static int cs553x_device_ready(struct mtd_info *mtd)
152{
153 struct nand_chip *this = mtd->priv;
154 void __iomem *mmio_base = this->IO_ADDR_R;
155 unsigned char foo = readb(mmio_base + MM_NAND_STS);
156
157 return (foo & CS_NAND_STS_FLASH_RDY) && !(foo & CS_NAND_CTLR_BUSY);
158}
159
160static void cs_enable_hwecc(struct mtd_info *mtd, int mode)
161{
162 struct nand_chip *this = mtd->priv;
163 void __iomem *mmio_base = this->IO_ADDR_R;
164
165 writeb(0x07, mmio_base + MM_NAND_ECC_CTL);
166}
167
168static int cs_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
169{
170 uint32_t ecc;
171 struct nand_chip *this = mtd->priv;
172 void __iomem *mmio_base = this->IO_ADDR_R;
173
174 ecc = readl(mmio_base + MM_NAND_STS);
175
176 ecc_code[1] = ecc >> 8;
177 ecc_code[0] = ecc >> 16;
178 ecc_code[2] = ecc >> 24;
179 return 0;
180}
181
182static struct mtd_info *cs553x_mtd[4];
183
184static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
185{
186 int err = 0;
187 struct nand_chip *this;
188 struct mtd_info *new_mtd;
189
190 printk(KERN_NOTICE "Probing CS553x NAND controller CS#%d at %sIO 0x%08lx\n", cs, mmio?"MM":"P", adr);
191
192 if (!mmio) {
193 printk(KERN_NOTICE "PIO mode not yet implemented for CS553X NAND controller\n");
194 return -ENXIO;
195 }
196
197 /* Allocate memory for MTD device structure and private data */
198 new_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
199 if (!new_mtd) {
200 printk(KERN_WARNING "Unable to allocate CS553X NAND MTD device structure.\n");
201 err = -ENOMEM;
202 goto out;
203 }
204
205 /* Get pointer to private data */
206 this = (struct nand_chip *)(&new_mtd[1]);
207
208 /* Initialize structures */
209 memset(new_mtd, 0, sizeof(struct mtd_info));
210 memset(this, 0, sizeof(struct nand_chip));
211
212 /* Link the private data with the MTD structure */
213 new_mtd->priv = this;
214 new_mtd->owner = THIS_MODULE;
215
216 /* map physical address */
217 this->IO_ADDR_R = this->IO_ADDR_W = ioremap(adr, 4096);
218 if (!this->IO_ADDR_R) {
219 printk(KERN_WARNING "ioremap cs553x NAND @0x%08lx failed\n", adr);
220 err = -EIO;
221 goto out_mtd;
222 }
223
224 this->cmd_ctrl = cs553x_hwcontrol;
225 this->dev_ready = cs553x_device_ready;
226 this->read_byte = cs553x_read_byte;
227 this->read_buf = cs553x_read_buf;
228 this->write_buf = cs553x_write_buf;
229
230 this->chip_delay = 0;
231
232 this->ecc.mode = NAND_ECC_HW;
233 this->ecc.size = 256;
234 this->ecc.bytes = 3;
235 this->ecc.hwctl = cs_enable_hwecc;
236 this->ecc.calculate = cs_calculate_ecc;
237 this->ecc.correct = nand_correct_data;
238
239 /* Enable the following for a flash based bad block table */
240 this->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR;
241
242 /* Scan to find existance of the device */
243 if (nand_scan(new_mtd, 1)) {
244 err = -ENXIO;
245 goto out_ior;
246 }
247
248 cs553x_mtd[cs] = new_mtd;
249 goto out;
250
251out_ior:
252 iounmap((void *)this->IO_ADDR_R);
253out_mtd:
254 kfree(new_mtd);
255out:
256 return err;
257}
258
259static int is_geode(void)
260{
261 /* These are the CPUs which will have a CS553[56] companion chip */
262 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
263 boot_cpu_data.x86 == 5 &&
264 boot_cpu_data.x86_model == 10)
265 return 1; /* Geode LX */
266
267 if ((boot_cpu_data.x86_vendor == X86_VENDOR_NSC ||
268 boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX) &&
269 boot_cpu_data.x86 == 5 &&
270 boot_cpu_data.x86_model == 5)
271 return 1; /* Geode GX (née GX2) */
272
273 return 0;
274}
275
276static int __init cs553x_init(void)
277{
278 int err = -ENXIO;
279 int i;
280 uint64_t val;
281
282 /* If the CPU isn't a Geode GX or LX, abort */
283 if (!is_geode())
284 return -ENXIO;
285
286 /* If it doesn't have the CS553[56], abort */
287 rdmsrl(MSR_DIVIL_GLD_CAP, val);
288 val &= ~0xFFULL;
289 if (val != CAP_CS5535 && val != CAP_CS5536)
290 return -ENXIO;
291
292 /* If it doesn't have the NAND controller enabled, abort */
293 rdmsrl(MSR_DIVIL_BALL_OPTS, val);
294 if (val & 1) {
295 printk(KERN_INFO "CS553x NAND controller: Flash I/O not enabled in MSR_DIVIL_BALL_OPTS.\n");
296 return -ENXIO;
297 }
298
299 for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
300 rdmsrl(MSR_DIVIL_LBAR_FLSH0 + i, val);
301
302 if ((val & (FLSH_LBAR_EN|FLSH_NOR_NAND)) == (FLSH_LBAR_EN|FLSH_NOR_NAND))
303 err = cs553x_init_one(i, !!(val & FLSH_MEM_IO), val & 0xFFFFFFFF);
304 }
305
306 /* Register all devices together here. This means we can easily hack it to
307 do mtdconcat etc. if we want to. */
308 for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
309 if (cs553x_mtd[i]) {
310 add_mtd_device(cs553x_mtd[i]);
311
312 /* If any devices registered, return success. Else the last error. */
313 err = 0;
314 }
315 }
316
317 return err;
318}
319
320module_init(cs553x_init);
321
322static void __exit cs553x_cleanup(void)
323{
324 int i;
325
326 for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
327 struct mtd_info *mtd = cs553x_mtd[i];
328 struct nand_chip *this;
329 void __iomem *mmio_base;
330
331 if (!mtd)
332 break;
333
334 this = cs553x_mtd[i]->priv;
335 mmio_base = this->IO_ADDR_R;
336
337 /* Release resources, unregister device */
338 nand_release(cs553x_mtd[i]);
339 cs553x_mtd[i] = NULL;
340
341 /* unmap physical adress */
342 iounmap(mmio_base);
343
344 /* Free the MTD device structure */
345 kfree(mtd);
346 }
347}
348
349module_exit(cs553x_cleanup);
350
351MODULE_LICENSE("GPL");
352MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
353MODULE_DESCRIPTION("NAND controller driver for AMD CS5535/CS5536 companion chip");
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index ec5e45e4e4ef..6107f532855b 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -58,10 +58,10 @@ static unsigned long __initdata doc_locations[] = {
58 0xe4000000, 58 0xe4000000,
59#elif defined(CONFIG_MOMENCO_OCELOT) 59#elif defined(CONFIG_MOMENCO_OCELOT)
60 0x2f000000, 60 0x2f000000,
61 0xff000000, 61 0xff000000,
62#elif defined(CONFIG_MOMENCO_OCELOT_G) || defined (CONFIG_MOMENCO_OCELOT_C) 62#elif defined(CONFIG_MOMENCO_OCELOT_G) || defined (CONFIG_MOMENCO_OCELOT_C)
63 0xff000000, 63 0xff000000,
64##else 64#else
65#warning Unknown architecture for DiskOnChip. No default probe locations defined 65#warning Unknown architecture for DiskOnChip. No default probe locations defined
66#endif 66#endif
67 0xffffffff }; 67 0xffffffff };
@@ -73,7 +73,7 @@ struct doc_priv {
73 unsigned long physadr; 73 unsigned long physadr;
74 u_char ChipID; 74 u_char ChipID;
75 u_char CDSNControl; 75 u_char CDSNControl;
76 int chips_per_floor; /* The number of chips detected on each floor */ 76 int chips_per_floor; /* The number of chips detected on each floor */
77 int curfloor; 77 int curfloor;
78 int curchip; 78 int curchip;
79 int mh0_page; 79 int mh0_page;
@@ -84,6 +84,7 @@ struct doc_priv {
84/* This is the syndrome computed by the HW ecc generator upon reading an empty 84/* This is the syndrome computed by the HW ecc generator upon reading an empty
85 page, one with all 0xff for data and stored ecc code. */ 85 page, one with all 0xff for data and stored ecc code. */
86static u_char empty_read_syndrome[6] = { 0x26, 0xff, 0x6d, 0x47, 0x73, 0x7a }; 86static u_char empty_read_syndrome[6] = { 0x26, 0xff, 0x6d, 0x47, 0x73, 0x7a };
87
87/* This is the ecc value computed by the HW ecc generator upon writing an empty 88/* This is the ecc value computed by the HW ecc generator upon writing an empty
88 page, one with all 0xff for data. */ 89 page, one with all 0xff for data. */
89static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 }; 90static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 };
@@ -94,28 +95,29 @@ static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 };
94#define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil) 95#define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil)
95#define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k) 96#define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k)
96 97
97static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd); 98static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd,
99 unsigned int bitmask);
98static void doc200x_select_chip(struct mtd_info *mtd, int chip); 100static void doc200x_select_chip(struct mtd_info *mtd, int chip);
99 101
100static int debug=0; 102static int debug = 0;
101module_param(debug, int, 0); 103module_param(debug, int, 0);
102 104
103static int try_dword=1; 105static int try_dword = 1;
104module_param(try_dword, int, 0); 106module_param(try_dword, int, 0);
105 107
106static int no_ecc_failures=0; 108static int no_ecc_failures = 0;
107module_param(no_ecc_failures, int, 0); 109module_param(no_ecc_failures, int, 0);
108 110
109static int no_autopart=0; 111static int no_autopart = 0;
110module_param(no_autopart, int, 0); 112module_param(no_autopart, int, 0);
111 113
112static int show_firmware_partition=0; 114static int show_firmware_partition = 0;
113module_param(show_firmware_partition, int, 0); 115module_param(show_firmware_partition, int, 0);
114 116
115#ifdef MTD_NAND_DISKONCHIP_BBTWRITE 117#ifdef MTD_NAND_DISKONCHIP_BBTWRITE
116static int inftl_bbt_write=1; 118static int inftl_bbt_write = 1;
117#else 119#else
118static int inftl_bbt_write=0; 120static int inftl_bbt_write = 0;
119#endif 121#endif
120module_param(inftl_bbt_write, int, 0); 122module_param(inftl_bbt_write, int, 0);
121 123
@@ -123,7 +125,6 @@ static unsigned long doc_config_location = CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDR
123module_param(doc_config_location, ulong, 0); 125module_param(doc_config_location, ulong, 0);
124MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip"); 126MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip");
125 127
126
127/* Sector size for HW ECC */ 128/* Sector size for HW ECC */
128#define SECTOR_SIZE 512 129#define SECTOR_SIZE 512
129/* The sector bytes are packed into NB_DATA 10 bit words */ 130/* The sector bytes are packed into NB_DATA 10 bit words */
@@ -147,7 +148,7 @@ static struct rs_control *rs_decoder;
147 * some comments, improved a minor bit and converted it to make use 148 * some comments, improved a minor bit and converted it to make use
148 * of the generic Reed-Solomon libary. tglx 149 * of the generic Reed-Solomon libary. tglx
149 */ 150 */
150static int doc_ecc_decode (struct rs_control *rs, uint8_t *data, uint8_t *ecc) 151static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc)
151{ 152{
152 int i, j, nerr, errpos[8]; 153 int i, j, nerr, errpos[8];
153 uint8_t parity; 154 uint8_t parity;
@@ -168,18 +169,18 @@ static int doc_ecc_decode (struct rs_control *rs, uint8_t *data, uint8_t *ecc)
168 * s[i] = ds[3]x^3 + ds[2]x^2 + ds[1]x^1 + ds[0] 169 * s[i] = ds[3]x^3 + ds[2]x^2 + ds[1]x^1 + ds[0]
169 * where x = alpha^(FCR + i) 170 * where x = alpha^(FCR + i)
170 */ 171 */
171 for(j = 1; j < NROOTS; j++) { 172 for (j = 1; j < NROOTS; j++) {
172 if(ds[j] == 0) 173 if (ds[j] == 0)
173 continue; 174 continue;
174 tmp = rs->index_of[ds[j]]; 175 tmp = rs->index_of[ds[j]];
175 for(i = 0; i < NROOTS; i++) 176 for (i = 0; i < NROOTS; i++)
176 s[i] ^= rs->alpha_to[rs_modnn(rs, tmp + (FCR + i) * j)]; 177 s[i] ^= rs->alpha_to[rs_modnn(rs, tmp + (FCR + i) * j)];
177 } 178 }
178 179
179 /* Calc s[i] = s[i] / alpha^(v + i) */ 180 /* Calc s[i] = s[i] / alpha^(v + i) */
180 for (i = 0; i < NROOTS; i++) { 181 for (i = 0; i < NROOTS; i++) {
181 if (syn[i]) 182 if (syn[i])
182 syn[i] = rs_modnn(rs, rs->index_of[s[i]] + (NN - FCR - i)); 183 syn[i] = rs_modnn(rs, rs->index_of[s[i]] + (NN - FCR - i));
183 } 184 }
184 /* Call the decoder library */ 185 /* Call the decoder library */
185 nerr = decode_rs16(rs, NULL, NULL, 1019, syn, 0, errpos, 0, errval); 186 nerr = decode_rs16(rs, NULL, NULL, 1019, syn, 0, errpos, 0, errval);
@@ -193,7 +194,7 @@ static int doc_ecc_decode (struct rs_control *rs, uint8_t *data, uint8_t *ecc)
193 * but they are given by the design of the de/encoder circuit 194 * but they are given by the design of the de/encoder circuit
194 * in the DoC ASIC's. 195 * in the DoC ASIC's.
195 */ 196 */
196 for(i = 0;i < nerr; i++) { 197 for (i = 0; i < nerr; i++) {
197 int index, bitpos, pos = 1015 - errpos[i]; 198 int index, bitpos, pos = 1015 - errpos[i];
198 uint8_t val; 199 uint8_t val;
199 if (pos >= NB_DATA && pos < 1019) 200 if (pos >= NB_DATA && pos < 1019)
@@ -205,8 +206,7 @@ static int doc_ecc_decode (struct rs_control *rs, uint8_t *data, uint8_t *ecc)
205 can be modified since pos is even */ 206 can be modified since pos is even */
206 index = (pos >> 3) ^ 1; 207 index = (pos >> 3) ^ 1;
207 bitpos = pos & 7; 208 bitpos = pos & 7;
208 if ((index >= 0 && index < SECTOR_SIZE) || 209 if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) {
209 index == (SECTOR_SIZE + 1)) {
210 val = (uint8_t) (errval[i] >> (2 + bitpos)); 210 val = (uint8_t) (errval[i] >> (2 + bitpos));
211 parity ^= val; 211 parity ^= val;
212 if (index < SECTOR_SIZE) 212 if (index < SECTOR_SIZE)
@@ -216,9 +216,8 @@ static int doc_ecc_decode (struct rs_control *rs, uint8_t *data, uint8_t *ecc)
216 bitpos = (bitpos + 10) & 7; 216 bitpos = (bitpos + 10) & 7;
217 if (bitpos == 0) 217 if (bitpos == 0)
218 bitpos = 8; 218 bitpos = 8;
219 if ((index >= 0 && index < SECTOR_SIZE) || 219 if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) {
220 index == (SECTOR_SIZE + 1)) { 220 val = (uint8_t) (errval[i] << (8 - bitpos));
221 val = (uint8_t)(errval[i] << (8 - bitpos));
222 parity ^= val; 221 parity ^= val;
223 if (index < SECTOR_SIZE) 222 if (index < SECTOR_SIZE)
224 data[index] ^= val; 223 data[index] ^= val;
@@ -250,10 +249,11 @@ static void DoC_Delay(struct doc_priv *doc, unsigned short cycles)
250/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */ 249/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
251static int _DoC_WaitReady(struct doc_priv *doc) 250static int _DoC_WaitReady(struct doc_priv *doc)
252{ 251{
253 void __iomem *docptr = doc->virtadr; 252 void __iomem *docptr = doc->virtadr;
254 unsigned long timeo = jiffies + (HZ * 10); 253 unsigned long timeo = jiffies + (HZ * 10);
255 254
256 if(debug) printk("_DoC_WaitReady...\n"); 255 if (debug)
256 printk("_DoC_WaitReady...\n");
257 /* Out-of-line routine to wait for chip response */ 257 /* Out-of-line routine to wait for chip response */
258 if (DoC_is_MillenniumPlus(doc)) { 258 if (DoC_is_MillenniumPlus(doc)) {
259 while ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) { 259 while ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
@@ -280,7 +280,7 @@ static int _DoC_WaitReady(struct doc_priv *doc)
280 280
281static inline int DoC_WaitReady(struct doc_priv *doc) 281static inline int DoC_WaitReady(struct doc_priv *doc)
282{ 282{
283 void __iomem *docptr = doc->virtadr; 283 void __iomem *docptr = doc->virtadr;
284 int ret = 0; 284 int ret = 0;
285 285
286 if (DoC_is_MillenniumPlus(doc)) { 286 if (DoC_is_MillenniumPlus(doc)) {
@@ -298,7 +298,8 @@ static inline int DoC_WaitReady(struct doc_priv *doc)
298 DoC_Delay(doc, 2); 298 DoC_Delay(doc, 2);
299 } 299 }
300 300
301 if(debug) printk("DoC_WaitReady OK\n"); 301 if (debug)
302 printk("DoC_WaitReady OK\n");
302 return ret; 303 return ret;
303} 304}
304 305
@@ -306,9 +307,10 @@ static void doc2000_write_byte(struct mtd_info *mtd, u_char datum)
306{ 307{
307 struct nand_chip *this = mtd->priv; 308 struct nand_chip *this = mtd->priv;
308 struct doc_priv *doc = this->priv; 309 struct doc_priv *doc = this->priv;
309 void __iomem *docptr = doc->virtadr; 310 void __iomem *docptr = doc->virtadr;
310 311
311 if(debug)printk("write_byte %02x\n", datum); 312 if (debug)
313 printk("write_byte %02x\n", datum);
312 WriteDOC(datum, docptr, CDSNSlowIO); 314 WriteDOC(datum, docptr, CDSNSlowIO);
313 WriteDOC(datum, docptr, 2k_CDSN_IO); 315 WriteDOC(datum, docptr, 2k_CDSN_IO);
314} 316}
@@ -317,77 +319,78 @@ static u_char doc2000_read_byte(struct mtd_info *mtd)
317{ 319{
318 struct nand_chip *this = mtd->priv; 320 struct nand_chip *this = mtd->priv;
319 struct doc_priv *doc = this->priv; 321 struct doc_priv *doc = this->priv;
320 void __iomem *docptr = doc->virtadr; 322 void __iomem *docptr = doc->virtadr;
321 u_char ret; 323 u_char ret;
322 324
323 ReadDOC(docptr, CDSNSlowIO); 325 ReadDOC(docptr, CDSNSlowIO);
324 DoC_Delay(doc, 2); 326 DoC_Delay(doc, 2);
325 ret = ReadDOC(docptr, 2k_CDSN_IO); 327 ret = ReadDOC(docptr, 2k_CDSN_IO);
326 if (debug) printk("read_byte returns %02x\n", ret); 328 if (debug)
329 printk("read_byte returns %02x\n", ret);
327 return ret; 330 return ret;
328} 331}
329 332
330static void doc2000_writebuf(struct mtd_info *mtd, 333static void doc2000_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
331 const u_char *buf, int len)
332{ 334{
333 struct nand_chip *this = mtd->priv; 335 struct nand_chip *this = mtd->priv;
334 struct doc_priv *doc = this->priv; 336 struct doc_priv *doc = this->priv;
335 void __iomem *docptr = doc->virtadr; 337 void __iomem *docptr = doc->virtadr;
336 int i; 338 int i;
337 if (debug)printk("writebuf of %d bytes: ", len); 339 if (debug)
338 for (i=0; i < len; i++) { 340 printk("writebuf of %d bytes: ", len);
341 for (i = 0; i < len; i++) {
339 WriteDOC_(buf[i], docptr, DoC_2k_CDSN_IO + i); 342 WriteDOC_(buf[i], docptr, DoC_2k_CDSN_IO + i);
340 if (debug && i < 16) 343 if (debug && i < 16)
341 printk("%02x ", buf[i]); 344 printk("%02x ", buf[i]);
342 } 345 }
343 if (debug) printk("\n"); 346 if (debug)
347 printk("\n");
344} 348}
345 349
346static void doc2000_readbuf(struct mtd_info *mtd, 350static void doc2000_readbuf(struct mtd_info *mtd, u_char *buf, int len)
347 u_char *buf, int len)
348{ 351{
349 struct nand_chip *this = mtd->priv; 352 struct nand_chip *this = mtd->priv;
350 struct doc_priv *doc = this->priv; 353 struct doc_priv *doc = this->priv;
351 void __iomem *docptr = doc->virtadr; 354 void __iomem *docptr = doc->virtadr;
352 int i; 355 int i;
353 356
354 if (debug)printk("readbuf of %d bytes: ", len); 357 if (debug)
358 printk("readbuf of %d bytes: ", len);
355 359
356 for (i=0; i < len; i++) { 360 for (i = 0; i < len; i++) {
357 buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i); 361 buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i);
358 } 362 }
359} 363}
360 364
361static void doc2000_readbuf_dword(struct mtd_info *mtd, 365static void doc2000_readbuf_dword(struct mtd_info *mtd, u_char *buf, int len)
362 u_char *buf, int len)
363{ 366{
364 struct nand_chip *this = mtd->priv; 367 struct nand_chip *this = mtd->priv;
365 struct doc_priv *doc = this->priv; 368 struct doc_priv *doc = this->priv;
366 void __iomem *docptr = doc->virtadr; 369 void __iomem *docptr = doc->virtadr;
367 int i; 370 int i;
368 371
369 if (debug) printk("readbuf_dword of %d bytes: ", len); 372 if (debug)
373 printk("readbuf_dword of %d bytes: ", len);
370 374
371 if (unlikely((((unsigned long)buf)|len) & 3)) { 375 if (unlikely((((unsigned long)buf) | len) & 3)) {
372 for (i=0; i < len; i++) { 376 for (i = 0; i < len; i++) {
373 *(uint8_t *)(&buf[i]) = ReadDOC(docptr, 2k_CDSN_IO + i); 377 *(uint8_t *) (&buf[i]) = ReadDOC(docptr, 2k_CDSN_IO + i);
374 } 378 }
375 } else { 379 } else {
376 for (i=0; i < len; i+=4) { 380 for (i = 0; i < len; i += 4) {
377 *(uint32_t*)(&buf[i]) = readl(docptr + DoC_2k_CDSN_IO + i); 381 *(uint32_t *) (&buf[i]) = readl(docptr + DoC_2k_CDSN_IO + i);
378 } 382 }
379 } 383 }
380} 384}
381 385
382static int doc2000_verifybuf(struct mtd_info *mtd, 386static int doc2000_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
383 const u_char *buf, int len)
384{ 387{
385 struct nand_chip *this = mtd->priv; 388 struct nand_chip *this = mtd->priv;
386 struct doc_priv *doc = this->priv; 389 struct doc_priv *doc = this->priv;
387 void __iomem *docptr = doc->virtadr; 390 void __iomem *docptr = doc->virtadr;
388 int i; 391 int i;
389 392
390 for (i=0; i < len; i++) 393 for (i = 0; i < len; i++)
391 if (buf[i] != ReadDOC(docptr, 2k_CDSN_IO)) 394 if (buf[i] != ReadDOC(docptr, 2k_CDSN_IO))
392 return -EFAULT; 395 return -EFAULT;
393 return 0; 396 return 0;
@@ -400,12 +403,10 @@ static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
400 uint16_t ret; 403 uint16_t ret;
401 404
402 doc200x_select_chip(mtd, nr); 405 doc200x_select_chip(mtd, nr);
403 doc200x_hwcontrol(mtd, NAND_CTL_SETCLE); 406 doc200x_hwcontrol(mtd, NAND_CMD_READID,
404 this->write_byte(mtd, NAND_CMD_READID); 407 NAND_CTRL_CLE | NAND_CTRL_CHANGE);
405 doc200x_hwcontrol(mtd, NAND_CTL_CLRCLE); 408 doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
406 doc200x_hwcontrol(mtd, NAND_CTL_SETALE); 409 doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
407 this->write_byte(mtd, 0);
408 doc200x_hwcontrol(mtd, NAND_CTL_CLRALE);
409 410
410 /* We cant' use dev_ready here, but at least we wait for the 411 /* We cant' use dev_ready here, but at least we wait for the
411 * command to complete 412 * command to complete
@@ -423,12 +424,11 @@ static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
423 } ident; 424 } ident;
424 void __iomem *docptr = doc->virtadr; 425 void __iomem *docptr = doc->virtadr;
425 426
426 doc200x_hwcontrol(mtd, NAND_CTL_SETCLE); 427 doc200x_hwcontrol(mtd, NAND_CMD_READID,
427 doc2000_write_byte(mtd, NAND_CMD_READID); 428 NAND_CTRL_CLE | NAND_CTRL_CHANGE);
428 doc200x_hwcontrol(mtd, NAND_CTL_CLRCLE); 429 doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
429 doc200x_hwcontrol(mtd, NAND_CTL_SETALE); 430 doc200x_hwcontrol(mtd, NAND_CMD_NONE,
430 doc2000_write_byte(mtd, 0); 431 NAND_NCE | NAND_CTRL_CHANGE);
431 doc200x_hwcontrol(mtd, NAND_CTL_CLRALE);
432 432
433 udelay(50); 433 udelay(50);
434 434
@@ -464,7 +464,7 @@ static void __init doc2000_count_chips(struct mtd_info *mtd)
464 printk(KERN_DEBUG "Detected %d chips per floor.\n", i); 464 printk(KERN_DEBUG "Detected %d chips per floor.\n", i);
465} 465}
466 466
467static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this, int state) 467static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this)
468{ 468{
469 struct doc_priv *doc = this->priv; 469 struct doc_priv *doc = this->priv;
470 470
@@ -482,7 +482,7 @@ static void doc2001_write_byte(struct mtd_info *mtd, u_char datum)
482{ 482{
483 struct nand_chip *this = mtd->priv; 483 struct nand_chip *this = mtd->priv;
484 struct doc_priv *doc = this->priv; 484 struct doc_priv *doc = this->priv;
485 void __iomem *docptr = doc->virtadr; 485 void __iomem *docptr = doc->virtadr;
486 486
487 WriteDOC(datum, docptr, CDSNSlowIO); 487 WriteDOC(datum, docptr, CDSNSlowIO);
488 WriteDOC(datum, docptr, Mil_CDSN_IO); 488 WriteDOC(datum, docptr, Mil_CDSN_IO);
@@ -493,7 +493,7 @@ static u_char doc2001_read_byte(struct mtd_info *mtd)
493{ 493{
494 struct nand_chip *this = mtd->priv; 494 struct nand_chip *this = mtd->priv;
495 struct doc_priv *doc = this->priv; 495 struct doc_priv *doc = this->priv;
496 void __iomem *docptr = doc->virtadr; 496 void __iomem *docptr = doc->virtadr;
497 497
498 //ReadDOC(docptr, CDSNSlowIO); 498 //ReadDOC(docptr, CDSNSlowIO);
499 /* 11.4.5 -- delay twice to allow extended length cycle */ 499 /* 11.4.5 -- delay twice to allow extended length cycle */
@@ -503,50 +503,47 @@ static u_char doc2001_read_byte(struct mtd_info *mtd)
503 return ReadDOC(docptr, LastDataRead); 503 return ReadDOC(docptr, LastDataRead);
504} 504}
505 505
506static void doc2001_writebuf(struct mtd_info *mtd, 506static void doc2001_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
507 const u_char *buf, int len)
508{ 507{
509 struct nand_chip *this = mtd->priv; 508 struct nand_chip *this = mtd->priv;
510 struct doc_priv *doc = this->priv; 509 struct doc_priv *doc = this->priv;
511 void __iomem *docptr = doc->virtadr; 510 void __iomem *docptr = doc->virtadr;
512 int i; 511 int i;
513 512
514 for (i=0; i < len; i++) 513 for (i = 0; i < len; i++)
515 WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i); 514 WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
516 /* Terminate write pipeline */ 515 /* Terminate write pipeline */
517 WriteDOC(0x00, docptr, WritePipeTerm); 516 WriteDOC(0x00, docptr, WritePipeTerm);
518} 517}
519 518
520static void doc2001_readbuf(struct mtd_info *mtd, 519static void doc2001_readbuf(struct mtd_info *mtd, u_char *buf, int len)
521 u_char *buf, int len)
522{ 520{
523 struct nand_chip *this = mtd->priv; 521 struct nand_chip *this = mtd->priv;
524 struct doc_priv *doc = this->priv; 522 struct doc_priv *doc = this->priv;
525 void __iomem *docptr = doc->virtadr; 523 void __iomem *docptr = doc->virtadr;
526 int i; 524 int i;
527 525
528 /* Start read pipeline */ 526 /* Start read pipeline */
529 ReadDOC(docptr, ReadPipeInit); 527 ReadDOC(docptr, ReadPipeInit);
530 528
531 for (i=0; i < len-1; i++) 529 for (i = 0; i < len - 1; i++)
532 buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff)); 530 buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff));
533 531
534 /* Terminate read pipeline */ 532 /* Terminate read pipeline */
535 buf[i] = ReadDOC(docptr, LastDataRead); 533 buf[i] = ReadDOC(docptr, LastDataRead);
536} 534}
537 535
538static int doc2001_verifybuf(struct mtd_info *mtd, 536static int doc2001_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
539 const u_char *buf, int len)
540{ 537{
541 struct nand_chip *this = mtd->priv; 538 struct nand_chip *this = mtd->priv;
542 struct doc_priv *doc = this->priv; 539 struct doc_priv *doc = this->priv;
543 void __iomem *docptr = doc->virtadr; 540 void __iomem *docptr = doc->virtadr;
544 int i; 541 int i;
545 542
546 /* Start read pipeline */ 543 /* Start read pipeline */
547 ReadDOC(docptr, ReadPipeInit); 544 ReadDOC(docptr, ReadPipeInit);
548 545
549 for (i=0; i < len-1; i++) 546 for (i = 0; i < len - 1; i++)
550 if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) { 547 if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
551 ReadDOC(docptr, LastDataRead); 548 ReadDOC(docptr, LastDataRead);
552 return i; 549 return i;
@@ -560,87 +557,90 @@ static u_char doc2001plus_read_byte(struct mtd_info *mtd)
560{ 557{
561 struct nand_chip *this = mtd->priv; 558 struct nand_chip *this = mtd->priv;
562 struct doc_priv *doc = this->priv; 559 struct doc_priv *doc = this->priv;
563 void __iomem *docptr = doc->virtadr; 560 void __iomem *docptr = doc->virtadr;
564 u_char ret; 561 u_char ret;
565 562
566 ReadDOC(docptr, Mplus_ReadPipeInit); 563 ReadDOC(docptr, Mplus_ReadPipeInit);
567 ReadDOC(docptr, Mplus_ReadPipeInit); 564 ReadDOC(docptr, Mplus_ReadPipeInit);
568 ret = ReadDOC(docptr, Mplus_LastDataRead); 565 ret = ReadDOC(docptr, Mplus_LastDataRead);
569 if (debug) printk("read_byte returns %02x\n", ret); 566 if (debug)
567 printk("read_byte returns %02x\n", ret);
570 return ret; 568 return ret;
571} 569}
572 570
573static void doc2001plus_writebuf(struct mtd_info *mtd, 571static void doc2001plus_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
574 const u_char *buf, int len)
575{ 572{
576 struct nand_chip *this = mtd->priv; 573 struct nand_chip *this = mtd->priv;
577 struct doc_priv *doc = this->priv; 574 struct doc_priv *doc = this->priv;
578 void __iomem *docptr = doc->virtadr; 575 void __iomem *docptr = doc->virtadr;
579 int i; 576 int i;
580 577
581 if (debug)printk("writebuf of %d bytes: ", len); 578 if (debug)
582 for (i=0; i < len; i++) { 579 printk("writebuf of %d bytes: ", len);
580 for (i = 0; i < len; i++) {
583 WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i); 581 WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
584 if (debug && i < 16) 582 if (debug && i < 16)
585 printk("%02x ", buf[i]); 583 printk("%02x ", buf[i]);
586 } 584 }
587 if (debug) printk("\n"); 585 if (debug)
586 printk("\n");
588} 587}
589 588
590static void doc2001plus_readbuf(struct mtd_info *mtd, 589static void doc2001plus_readbuf(struct mtd_info *mtd, u_char *buf, int len)
591 u_char *buf, int len)
592{ 590{
593 struct nand_chip *this = mtd->priv; 591 struct nand_chip *this = mtd->priv;
594 struct doc_priv *doc = this->priv; 592 struct doc_priv *doc = this->priv;
595 void __iomem *docptr = doc->virtadr; 593 void __iomem *docptr = doc->virtadr;
596 int i; 594 int i;
597 595
598 if (debug)printk("readbuf of %d bytes: ", len); 596 if (debug)
597 printk("readbuf of %d bytes: ", len);
599 598
600 /* Start read pipeline */ 599 /* Start read pipeline */
601 ReadDOC(docptr, Mplus_ReadPipeInit); 600 ReadDOC(docptr, Mplus_ReadPipeInit);
602 ReadDOC(docptr, Mplus_ReadPipeInit); 601 ReadDOC(docptr, Mplus_ReadPipeInit);
603 602
604 for (i=0; i < len-2; i++) { 603 for (i = 0; i < len - 2; i++) {
605 buf[i] = ReadDOC(docptr, Mil_CDSN_IO); 604 buf[i] = ReadDOC(docptr, Mil_CDSN_IO);
606 if (debug && i < 16) 605 if (debug && i < 16)
607 printk("%02x ", buf[i]); 606 printk("%02x ", buf[i]);
608 } 607 }
609 608
610 /* Terminate read pipeline */ 609 /* Terminate read pipeline */
611 buf[len-2] = ReadDOC(docptr, Mplus_LastDataRead); 610 buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead);
612 if (debug && i < 16) 611 if (debug && i < 16)
613 printk("%02x ", buf[len-2]); 612 printk("%02x ", buf[len - 2]);
614 buf[len-1] = ReadDOC(docptr, Mplus_LastDataRead); 613 buf[len - 1] = ReadDOC(docptr, Mplus_LastDataRead);
615 if (debug && i < 16) 614 if (debug && i < 16)
616 printk("%02x ", buf[len-1]); 615 printk("%02x ", buf[len - 1]);
617 if (debug) printk("\n"); 616 if (debug)
617 printk("\n");
618} 618}
619 619
620static int doc2001plus_verifybuf(struct mtd_info *mtd, 620static int doc2001plus_verifybuf(struct mtd_info *mtd, const u_char *buf, int len)
621 const u_char *buf, int len)
622{ 621{
623 struct nand_chip *this = mtd->priv; 622 struct nand_chip *this = mtd->priv;
624 struct doc_priv *doc = this->priv; 623 struct doc_priv *doc = this->priv;
625 void __iomem *docptr = doc->virtadr; 624 void __iomem *docptr = doc->virtadr;
626 int i; 625 int i;
627 626
628 if (debug)printk("verifybuf of %d bytes: ", len); 627 if (debug)
628 printk("verifybuf of %d bytes: ", len);
629 629
630 /* Start read pipeline */ 630 /* Start read pipeline */
631 ReadDOC(docptr, Mplus_ReadPipeInit); 631 ReadDOC(docptr, Mplus_ReadPipeInit);
632 ReadDOC(docptr, Mplus_ReadPipeInit); 632 ReadDOC(docptr, Mplus_ReadPipeInit);
633 633
634 for (i=0; i < len-2; i++) 634 for (i = 0; i < len - 2; i++)
635 if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) { 635 if (buf[i] != ReadDOC(docptr, Mil_CDSN_IO)) {
636 ReadDOC(docptr, Mplus_LastDataRead); 636 ReadDOC(docptr, Mplus_LastDataRead);
637 ReadDOC(docptr, Mplus_LastDataRead); 637 ReadDOC(docptr, Mplus_LastDataRead);
638 return i; 638 return i;
639 } 639 }
640 if (buf[len-2] != ReadDOC(docptr, Mplus_LastDataRead)) 640 if (buf[len - 2] != ReadDOC(docptr, Mplus_LastDataRead))
641 return len-2; 641 return len - 2;
642 if (buf[len-1] != ReadDOC(docptr, Mplus_LastDataRead)) 642 if (buf[len - 1] != ReadDOC(docptr, Mplus_LastDataRead))
643 return len-1; 643 return len - 1;
644 return 0; 644 return 0;
645} 645}
646 646
@@ -648,10 +648,11 @@ static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
648{ 648{
649 struct nand_chip *this = mtd->priv; 649 struct nand_chip *this = mtd->priv;
650 struct doc_priv *doc = this->priv; 650 struct doc_priv *doc = this->priv;
651 void __iomem *docptr = doc->virtadr; 651 void __iomem *docptr = doc->virtadr;
652 int floor = 0; 652 int floor = 0;
653 653
654 if(debug)printk("select chip (%d)\n", chip); 654 if (debug)
655 printk("select chip (%d)\n", chip);
655 656
656 if (chip == -1) { 657 if (chip == -1) {
657 /* Disable flash internally */ 658 /* Disable flash internally */
@@ -660,7 +661,7 @@ static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
660 } 661 }
661 662
662 floor = chip / doc->chips_per_floor; 663 floor = chip / doc->chips_per_floor;
663 chip -= (floor * doc->chips_per_floor); 664 chip -= (floor * doc->chips_per_floor);
664 665
665 /* Assert ChipEnable and deassert WriteProtect */ 666 /* Assert ChipEnable and deassert WriteProtect */
666 WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect); 667 WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect);
@@ -674,72 +675,61 @@ static void doc200x_select_chip(struct mtd_info *mtd, int chip)
674{ 675{
675 struct nand_chip *this = mtd->priv; 676 struct nand_chip *this = mtd->priv;
676 struct doc_priv *doc = this->priv; 677 struct doc_priv *doc = this->priv;
677 void __iomem *docptr = doc->virtadr; 678 void __iomem *docptr = doc->virtadr;
678 int floor = 0; 679 int floor = 0;
679 680
680 if(debug)printk("select chip (%d)\n", chip); 681 if (debug)
682 printk("select chip (%d)\n", chip);
681 683
682 if (chip == -1) 684 if (chip == -1)
683 return; 685 return;
684 686
685 floor = chip / doc->chips_per_floor; 687 floor = chip / doc->chips_per_floor;
686 chip -= (floor * doc->chips_per_floor); 688 chip -= (floor * doc->chips_per_floor);
687 689
688 /* 11.4.4 -- deassert CE before changing chip */ 690 /* 11.4.4 -- deassert CE before changing chip */
689 doc200x_hwcontrol(mtd, NAND_CTL_CLRNCE); 691 doc200x_hwcontrol(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
690 692
691 WriteDOC(floor, docptr, FloorSelect); 693 WriteDOC(floor, docptr, FloorSelect);
692 WriteDOC(chip, docptr, CDSNDeviceSelect); 694 WriteDOC(chip, docptr, CDSNDeviceSelect);
693 695
694 doc200x_hwcontrol(mtd, NAND_CTL_SETNCE); 696 doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
695 697
696 doc->curchip = chip; 698 doc->curchip = chip;
697 doc->curfloor = floor; 699 doc->curfloor = floor;
698} 700}
699 701
700static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd) 702#define CDSN_CTRL_MSK (CDSN_CTRL_CE | CDSN_CTRL_CLE | CDSN_CTRL_ALE)
703
704static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd,
705 unsigned int ctrl)
701{ 706{
702 struct nand_chip *this = mtd->priv; 707 struct nand_chip *this = mtd->priv;
703 struct doc_priv *doc = this->priv; 708 struct doc_priv *doc = this->priv;
704 void __iomem *docptr = doc->virtadr; 709 void __iomem *docptr = doc->virtadr;
705 710
706 switch(cmd) { 711 if (ctrl & NAND_CTRL_CHANGE) {
707 case NAND_CTL_SETNCE: 712 doc->CDSNControl &= ~CDSN_CTRL_MSK;
708 doc->CDSNControl |= CDSN_CTRL_CE; 713 doc->CDSNControl |= ctrl & CDSN_CTRL_MSK;
709 break; 714 if (debug)
710 case NAND_CTL_CLRNCE: 715 printk("hwcontrol(%d): %02x\n", cmd, doc->CDSNControl);
711 doc->CDSNControl &= ~CDSN_CTRL_CE; 716 WriteDOC(doc->CDSNControl, docptr, CDSNControl);
712 break; 717 /* 11.4.3 -- 4 NOPs after CSDNControl write */
713 case NAND_CTL_SETCLE: 718 DoC_Delay(doc, 4);
714 doc->CDSNControl |= CDSN_CTRL_CLE; 719 }
715 break; 720 if (cmd != NAND_CMD_NONE) {
716 case NAND_CTL_CLRCLE: 721 if (DoC_is_2000(doc))
717 doc->CDSNControl &= ~CDSN_CTRL_CLE; 722 doc2000_write_byte(mtd, cmd);
718 break; 723 else
719 case NAND_CTL_SETALE: 724 doc2001_write_byte(mtd, cmd);
720 doc->CDSNControl |= CDSN_CTRL_ALE;
721 break;
722 case NAND_CTL_CLRALE:
723 doc->CDSNControl &= ~CDSN_CTRL_ALE;
724 break;
725 case NAND_CTL_SETWP:
726 doc->CDSNControl |= CDSN_CTRL_WP;
727 break;
728 case NAND_CTL_CLRWP:
729 doc->CDSNControl &= ~CDSN_CTRL_WP;
730 break;
731 } 725 }
732 if (debug)printk("hwcontrol(%d): %02x\n", cmd, doc->CDSNControl);
733 WriteDOC(doc->CDSNControl, docptr, CDSNControl);
734 /* 11.4.3 -- 4 NOPs after CSDNControl write */
735 DoC_Delay(doc, 4);
736} 726}
737 727
738static void doc2001plus_command (struct mtd_info *mtd, unsigned command, int column, int page_addr) 728static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
739{ 729{
740 struct nand_chip *this = mtd->priv; 730 struct nand_chip *this = mtd->priv;
741 struct doc_priv *doc = this->priv; 731 struct doc_priv *doc = this->priv;
742 void __iomem *docptr = doc->virtadr; 732 void __iomem *docptr = doc->virtadr;
743 733
744 /* 734 /*
745 * Must terminate write pipeline before sending any commands 735 * Must terminate write pipeline before sending any commands
@@ -756,9 +746,9 @@ static void doc2001plus_command (struct mtd_info *mtd, unsigned command, int col
756 if (command == NAND_CMD_SEQIN) { 746 if (command == NAND_CMD_SEQIN) {
757 int readcmd; 747 int readcmd;
758 748
759 if (column >= mtd->oobblock) { 749 if (column >= mtd->writesize) {
760 /* OOB area */ 750 /* OOB area */
761 column -= mtd->oobblock; 751 column -= mtd->writesize;
762 readcmd = NAND_CMD_READOOB; 752 readcmd = NAND_CMD_READOOB;
763 } else if (column < 256) { 753 } else if (column < 256) {
764 /* First 256 bytes --> READ0 */ 754 /* First 256 bytes --> READ0 */
@@ -782,25 +772,26 @@ static void doc2001plus_command (struct mtd_info *mtd, unsigned command, int col
782 WriteDOC(column, docptr, Mplus_FlashAddress); 772 WriteDOC(column, docptr, Mplus_FlashAddress);
783 } 773 }
784 if (page_addr != -1) { 774 if (page_addr != -1) {
785 WriteDOC((unsigned char) (page_addr & 0xff), docptr, Mplus_FlashAddress); 775 WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress);
786 WriteDOC((unsigned char) ((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress); 776 WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
787 /* One more address cycle for higher density devices */ 777 /* One more address cycle for higher density devices */
788 if (this->chipsize & 0x0c000000) { 778 if (this->chipsize & 0x0c000000) {
789 WriteDOC((unsigned char) ((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress); 779 WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
790 printk("high density\n"); 780 printk("high density\n");
791 } 781 }
792 } 782 }
793 WriteDOC(0, docptr, Mplus_WritePipeTerm); 783 WriteDOC(0, docptr, Mplus_WritePipeTerm);
794 WriteDOC(0, docptr, Mplus_WritePipeTerm); 784 WriteDOC(0, docptr, Mplus_WritePipeTerm);
795 /* deassert ALE */ 785 /* deassert ALE */
796 if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 || command == NAND_CMD_READOOB || command == NAND_CMD_READID) 786 if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 ||
787 command == NAND_CMD_READOOB || command == NAND_CMD_READID)
797 WriteDOC(0, docptr, Mplus_FlashControl); 788 WriteDOC(0, docptr, Mplus_FlashControl);
798 } 789 }
799 790
800 /* 791 /*
801 * program and erase have their own busy handlers 792 * program and erase have their own busy handlers
802 * status and sequential in needs no delay 793 * status and sequential in needs no delay
803 */ 794 */
804 switch (command) { 795 switch (command) {
805 796
806 case NAND_CMD_PAGEPROG: 797 case NAND_CMD_PAGEPROG:
@@ -817,55 +808,57 @@ static void doc2001plus_command (struct mtd_info *mtd, unsigned command, int col
817 WriteDOC(NAND_CMD_STATUS, docptr, Mplus_FlashCmd); 808 WriteDOC(NAND_CMD_STATUS, docptr, Mplus_FlashCmd);
818 WriteDOC(0, docptr, Mplus_WritePipeTerm); 809 WriteDOC(0, docptr, Mplus_WritePipeTerm);
819 WriteDOC(0, docptr, Mplus_WritePipeTerm); 810 WriteDOC(0, docptr, Mplus_WritePipeTerm);
820 while ( !(this->read_byte(mtd) & 0x40)); 811 while (!(this->read_byte(mtd) & 0x40)) ;
821 return; 812 return;
822 813
823 /* This applies to read commands */ 814 /* This applies to read commands */
824 default: 815 default:
825 /* 816 /*
826 * If we don't have access to the busy pin, we apply the given 817 * If we don't have access to the busy pin, we apply the given
827 * command delay 818 * command delay
828 */ 819 */
829 if (!this->dev_ready) { 820 if (!this->dev_ready) {
830 udelay (this->chip_delay); 821 udelay(this->chip_delay);
831 return; 822 return;
832 } 823 }
833 } 824 }
834 825
835 /* Apply this short delay always to ensure that we do wait tWB in 826 /* Apply this short delay always to ensure that we do wait tWB in
836 * any case on any machine. */ 827 * any case on any machine. */
837 ndelay (100); 828 ndelay(100);
838 /* wait until command is processed */ 829 /* wait until command is processed */
839 while (!this->dev_ready(mtd)); 830 while (!this->dev_ready(mtd)) ;
840} 831}
841 832
842static int doc200x_dev_ready(struct mtd_info *mtd) 833static int doc200x_dev_ready(struct mtd_info *mtd)
843{ 834{
844 struct nand_chip *this = mtd->priv; 835 struct nand_chip *this = mtd->priv;
845 struct doc_priv *doc = this->priv; 836 struct doc_priv *doc = this->priv;
846 void __iomem *docptr = doc->virtadr; 837 void __iomem *docptr = doc->virtadr;
847 838
848 if (DoC_is_MillenniumPlus(doc)) { 839 if (DoC_is_MillenniumPlus(doc)) {
849 /* 11.4.2 -- must NOP four times before checking FR/B# */ 840 /* 11.4.2 -- must NOP four times before checking FR/B# */
850 DoC_Delay(doc, 4); 841 DoC_Delay(doc, 4);
851 if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) { 842 if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
852 if(debug) 843 if (debug)
853 printk("not ready\n"); 844 printk("not ready\n");
854 return 0; 845 return 0;
855 } 846 }
856 if (debug)printk("was ready\n"); 847 if (debug)
848 printk("was ready\n");
857 return 1; 849 return 1;
858 } else { 850 } else {
859 /* 11.4.2 -- must NOP four times before checking FR/B# */ 851 /* 11.4.2 -- must NOP four times before checking FR/B# */
860 DoC_Delay(doc, 4); 852 DoC_Delay(doc, 4);
861 if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) { 853 if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
862 if(debug) 854 if (debug)
863 printk("not ready\n"); 855 printk("not ready\n");
864 return 0; 856 return 0;
865 } 857 }
866 /* 11.4.2 -- Must NOP twice if it's ready */ 858 /* 11.4.2 -- Must NOP twice if it's ready */
867 DoC_Delay(doc, 2); 859 DoC_Delay(doc, 2);
868 if (debug)printk("was ready\n"); 860 if (debug)
861 printk("was ready\n");
869 return 1; 862 return 1;
870 } 863 }
871} 864}
@@ -881,10 +874,10 @@ static void doc200x_enable_hwecc(struct mtd_info *mtd, int mode)
881{ 874{
882 struct nand_chip *this = mtd->priv; 875 struct nand_chip *this = mtd->priv;
883 struct doc_priv *doc = this->priv; 876 struct doc_priv *doc = this->priv;
884 void __iomem *docptr = doc->virtadr; 877 void __iomem *docptr = doc->virtadr;
885 878
886 /* Prime the ECC engine */ 879 /* Prime the ECC engine */
887 switch(mode) { 880 switch (mode) {
888 case NAND_ECC_READ: 881 case NAND_ECC_READ:
889 WriteDOC(DOC_ECC_RESET, docptr, ECCConf); 882 WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
890 WriteDOC(DOC_ECC_EN, docptr, ECCConf); 883 WriteDOC(DOC_ECC_EN, docptr, ECCConf);
@@ -900,10 +893,10 @@ static void doc2001plus_enable_hwecc(struct mtd_info *mtd, int mode)
900{ 893{
901 struct nand_chip *this = mtd->priv; 894 struct nand_chip *this = mtd->priv;
902 struct doc_priv *doc = this->priv; 895 struct doc_priv *doc = this->priv;
903 void __iomem *docptr = doc->virtadr; 896 void __iomem *docptr = doc->virtadr;
904 897
905 /* Prime the ECC engine */ 898 /* Prime the ECC engine */
906 switch(mode) { 899 switch (mode) {
907 case NAND_ECC_READ: 900 case NAND_ECC_READ:
908 WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf); 901 WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
909 WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf); 902 WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
@@ -916,12 +909,11 @@ static void doc2001plus_enable_hwecc(struct mtd_info *mtd, int mode)
916} 909}
917 910
918/* This code is only called on write */ 911/* This code is only called on write */
919static int doc200x_calculate_ecc(struct mtd_info *mtd, const u_char *dat, 912static int doc200x_calculate_ecc(struct mtd_info *mtd, const u_char *dat, unsigned char *ecc_code)
920 unsigned char *ecc_code)
921{ 913{
922 struct nand_chip *this = mtd->priv; 914 struct nand_chip *this = mtd->priv;
923 struct doc_priv *doc = this->priv; 915 struct doc_priv *doc = this->priv;
924 void __iomem *docptr = doc->virtadr; 916 void __iomem *docptr = doc->virtadr;
925 int i; 917 int i;
926 int emptymatch = 1; 918 int emptymatch = 1;
927 919
@@ -961,7 +953,8 @@ static int doc200x_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
961 often. It could be optimized away by examining the data in 953 often. It could be optimized away by examining the data in
962 the writebuf routine, and remembering the result. */ 954 the writebuf routine, and remembering the result. */
963 for (i = 0; i < 512; i++) { 955 for (i = 0; i < 512; i++) {
964 if (dat[i] == 0xff) continue; 956 if (dat[i] == 0xff)
957 continue;
965 emptymatch = 0; 958 emptymatch = 0;
966 break; 959 break;
967 } 960 }
@@ -969,17 +962,20 @@ static int doc200x_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
969 /* If emptymatch still =1, we do have an all-0xff data buffer. 962 /* If emptymatch still =1, we do have an all-0xff data buffer.
970 Return all-0xff ecc value instead of the computed one, so 963 Return all-0xff ecc value instead of the computed one, so
971 it'll look just like a freshly-erased page. */ 964 it'll look just like a freshly-erased page. */
972 if (emptymatch) memset(ecc_code, 0xff, 6); 965 if (emptymatch)
966 memset(ecc_code, 0xff, 6);
973#endif 967#endif
974 return 0; 968 return 0;
975} 969}
976 970
977static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc) 971static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat,
972 u_char *read_ecc, u_char *isnull)
978{ 973{
979 int i, ret = 0; 974 int i, ret = 0;
980 struct nand_chip *this = mtd->priv; 975 struct nand_chip *this = mtd->priv;
981 struct doc_priv *doc = this->priv; 976 struct doc_priv *doc = this->priv;
982 void __iomem *docptr = doc->virtadr; 977 void __iomem *docptr = doc->virtadr;
978 uint8_t calc_ecc[6];
983 volatile u_char dummy; 979 volatile u_char dummy;
984 int emptymatch = 1; 980 int emptymatch = 1;
985 981
@@ -1012,18 +1008,20 @@ static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_
1012 all-0xff data and stored ecc block. Check the stored ecc. */ 1008 all-0xff data and stored ecc block. Check the stored ecc. */
1013 if (emptymatch) { 1009 if (emptymatch) {
1014 for (i = 0; i < 6; i++) { 1010 for (i = 0; i < 6; i++) {
1015 if (read_ecc[i] == 0xff) continue; 1011 if (read_ecc[i] == 0xff)
1012 continue;
1016 emptymatch = 0; 1013 emptymatch = 0;
1017 break; 1014 break;
1018 } 1015 }
1019 } 1016 }
1020 /* If emptymatch still =1, check the data block. */ 1017 /* If emptymatch still =1, check the data block. */
1021 if (emptymatch) { 1018 if (emptymatch) {
1022 /* Note: this somewhat expensive test should not be triggered 1019 /* Note: this somewhat expensive test should not be triggered
1023 often. It could be optimized away by examining the data in 1020 often. It could be optimized away by examining the data in
1024 the readbuf routine, and remembering the result. */ 1021 the readbuf routine, and remembering the result. */
1025 for (i = 0; i < 512; i++) { 1022 for (i = 0; i < 512; i++) {
1026 if (dat[i] == 0xff) continue; 1023 if (dat[i] == 0xff)
1024 continue;
1027 emptymatch = 0; 1025 emptymatch = 0;
1028 break; 1026 break;
1029 } 1027 }
@@ -1032,7 +1030,8 @@ static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_
1032 erased block, in which case the ECC will not come out right. 1030 erased block, in which case the ECC will not come out right.
1033 We'll suppress the error and tell the caller everything's 1031 We'll suppress the error and tell the caller everything's
1034 OK. Because it is. */ 1032 OK. Because it is. */
1035 if (!emptymatch) ret = doc_ecc_decode (rs_decoder, dat, calc_ecc); 1033 if (!emptymatch)
1034 ret = doc_ecc_decode(rs_decoder, dat, calc_ecc);
1036 if (ret > 0) 1035 if (ret > 0)
1037 printk(KERN_ERR "doc200x_correct_data corrected %d errors\n", ret); 1036 printk(KERN_ERR "doc200x_correct_data corrected %d errors\n", ret);
1038 } 1037 }
@@ -1059,11 +1058,10 @@ static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_
1059 * safer. The only problem with it is that any code that parses oobfree must 1058 * safer. The only problem with it is that any code that parses oobfree must
1060 * be able to handle out-of-order segments. 1059 * be able to handle out-of-order segments.
1061 */ 1060 */
1062static struct nand_oobinfo doc200x_oobinfo = { 1061static struct nand_ecclayout doc200x_oobinfo = {
1063 .useecc = MTD_NANDECC_AUTOPLACE, 1062 .eccbytes = 6,
1064 .eccbytes = 6, 1063 .eccpos = {0, 1, 2, 3, 4, 5},
1065 .eccpos = {0, 1, 2, 3, 4, 5}, 1064 .oobfree = {{8, 8}, {6, 2}}
1066 .oobfree = { {8, 8}, {6, 2} }
1067}; 1065};
1068 1066
1069/* Find the (I)NFTL Media Header, and optionally also the mirror media header. 1067/* Find the (I)NFTL Media Header, and optionally also the mirror media header.
@@ -1072,8 +1070,7 @@ static struct nand_oobinfo doc200x_oobinfo = {
1072 either "ANAND" or "BNAND". If findmirror=1, also look for the mirror media 1070 either "ANAND" or "BNAND". If findmirror=1, also look for the mirror media
1073 header. The page #s of the found media headers are placed in mh0_page and 1071 header. The page #s of the found media headers are placed in mh0_page and
1074 mh1_page in the DOC private structure. */ 1072 mh1_page in the DOC private structure. */
1075static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, 1073static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const char *id, int findmirror)
1076 const char *id, int findmirror)
1077{ 1074{
1078 struct nand_chip *this = mtd->priv; 1075 struct nand_chip *this = mtd->priv;
1079 struct doc_priv *doc = this->priv; 1076 struct doc_priv *doc = this->priv;
@@ -1082,17 +1079,19 @@ static int __init find_media_headers(struct mtd_info *mtd, u_char *buf,
1082 size_t retlen; 1079 size_t retlen;
1083 1080
1084 for (offs = 0; offs < mtd->size; offs += mtd->erasesize) { 1081 for (offs = 0; offs < mtd->size; offs += mtd->erasesize) {
1085 ret = mtd->read(mtd, offs, mtd->oobblock, &retlen, buf); 1082 ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf);
1086 if (retlen != mtd->oobblock) continue; 1083 if (retlen != mtd->writesize)
1084 continue;
1087 if (ret) { 1085 if (ret) {
1088 printk(KERN_WARNING "ECC error scanning DOC at 0x%x\n", 1086 printk(KERN_WARNING "ECC error scanning DOC at 0x%x\n", offs);
1089 offs);
1090 } 1087 }
1091 if (memcmp(buf, id, 6)) continue; 1088 if (memcmp(buf, id, 6))
1089 continue;
1092 printk(KERN_INFO "Found DiskOnChip %s Media Header at 0x%x\n", id, offs); 1090 printk(KERN_INFO "Found DiskOnChip %s Media Header at 0x%x\n", id, offs);
1093 if (doc->mh0_page == -1) { 1091 if (doc->mh0_page == -1) {
1094 doc->mh0_page = offs >> this->page_shift; 1092 doc->mh0_page = offs >> this->page_shift;
1095 if (!findmirror) return 1; 1093 if (!findmirror)
1094 return 1;
1096 continue; 1095 continue;
1097 } 1096 }
1098 doc->mh1_page = offs >> this->page_shift; 1097 doc->mh1_page = offs >> this->page_shift;
@@ -1105,8 +1104,8 @@ static int __init find_media_headers(struct mtd_info *mtd, u_char *buf,
1105 /* Only one mediaheader was found. We want buf to contain a 1104 /* Only one mediaheader was found. We want buf to contain a
1106 mediaheader on return, so we'll have to re-read the one we found. */ 1105 mediaheader on return, so we'll have to re-read the one we found. */
1107 offs = doc->mh0_page << this->page_shift; 1106 offs = doc->mh0_page << this->page_shift;
1108 ret = mtd->read(mtd, offs, mtd->oobblock, &retlen, buf); 1107 ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf);
1109 if (retlen != mtd->oobblock) { 1108 if (retlen != mtd->writesize) {
1110 /* Insanity. Give up. */ 1109 /* Insanity. Give up. */
1111 printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n"); 1110 printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n");
1112 return 0; 1111 return 0;
@@ -1114,8 +1113,7 @@ static int __init find_media_headers(struct mtd_info *mtd, u_char *buf,
1114 return 1; 1113 return 1;
1115} 1114}
1116 1115
1117static inline int __init nftl_partscan(struct mtd_info *mtd, 1116static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts)
1118 struct mtd_partition *parts)
1119{ 1117{
1120 struct nand_chip *this = mtd->priv; 1118 struct nand_chip *this = mtd->priv;
1121 struct doc_priv *doc = this->priv; 1119 struct doc_priv *doc = this->priv;
@@ -1127,13 +1125,14 @@ static inline int __init nftl_partscan(struct mtd_info *mtd,
1127 unsigned blocks, maxblocks; 1125 unsigned blocks, maxblocks;
1128 int offs, numheaders; 1126 int offs, numheaders;
1129 1127
1130 buf = kmalloc(mtd->oobblock, GFP_KERNEL); 1128 buf = kmalloc(mtd->writesize, GFP_KERNEL);
1131 if (!buf) { 1129 if (!buf) {
1132 printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n"); 1130 printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
1133 return 0; 1131 return 0;
1134 } 1132 }
1135 if (!(numheaders=find_media_headers(mtd, buf, "ANAND", 1))) goto out; 1133 if (!(numheaders = find_media_headers(mtd, buf, "ANAND", 1)))
1136 mh = (struct NFTLMediaHeader *) buf; 1134 goto out;
1135 mh = (struct NFTLMediaHeader *)buf;
1137 1136
1138 mh->NumEraseUnits = le16_to_cpu(mh->NumEraseUnits); 1137 mh->NumEraseUnits = le16_to_cpu(mh->NumEraseUnits);
1139 mh->FirstPhysicalEUN = le16_to_cpu(mh->FirstPhysicalEUN); 1138 mh->FirstPhysicalEUN = le16_to_cpu(mh->FirstPhysicalEUN);
@@ -1155,8 +1154,8 @@ static inline int __init nftl_partscan(struct mtd_info *mtd,
1155 /* Auto-determine UnitSizeFactor. The constraints are: 1154 /* Auto-determine UnitSizeFactor. The constraints are:
1156 - There can be at most 32768 virtual blocks. 1155 - There can be at most 32768 virtual blocks.
1157 - There can be at most (virtual block size - page size) 1156 - There can be at most (virtual block size - page size)
1158 virtual blocks (because MediaHeader+BBT must fit in 1). 1157 virtual blocks (because MediaHeader+BBT must fit in 1).
1159 */ 1158 */
1160 mh->UnitSizeFactor = 0xff; 1159 mh->UnitSizeFactor = 0xff;
1161 while (blocks > maxblocks) { 1160 while (blocks > maxblocks) {
1162 blocks >>= 1; 1161 blocks >>= 1;
@@ -1211,14 +1210,13 @@ static inline int __init nftl_partscan(struct mtd_info *mtd,
1211 } 1210 }
1212 1211
1213 ret = numparts; 1212 ret = numparts;
1214out: 1213 out:
1215 kfree(buf); 1214 kfree(buf);
1216 return ret; 1215 return ret;
1217} 1216}
1218 1217
1219/* This is a stripped-down copy of the code in inftlmount.c */ 1218/* This is a stripped-down copy of the code in inftlmount.c */
1220static inline int __init inftl_partscan(struct mtd_info *mtd, 1219static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts)
1221 struct mtd_partition *parts)
1222{ 1220{
1223 struct nand_chip *this = mtd->priv; 1221 struct nand_chip *this = mtd->priv;
1224 struct doc_priv *doc = this->priv; 1222 struct doc_priv *doc = this->priv;
@@ -1235,15 +1233,16 @@ static inline int __init inftl_partscan(struct mtd_info *mtd,
1235 if (inftl_bbt_write) 1233 if (inftl_bbt_write)
1236 end -= (INFTL_BBT_RESERVED_BLOCKS << this->phys_erase_shift); 1234 end -= (INFTL_BBT_RESERVED_BLOCKS << this->phys_erase_shift);
1237 1235
1238 buf = kmalloc(mtd->oobblock, GFP_KERNEL); 1236 buf = kmalloc(mtd->writesize, GFP_KERNEL);
1239 if (!buf) { 1237 if (!buf) {
1240 printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n"); 1238 printk(KERN_ERR "DiskOnChip mediaheader kmalloc failed!\n");
1241 return 0; 1239 return 0;
1242 } 1240 }
1243 1241
1244 if (!find_media_headers(mtd, buf, "BNAND", 0)) goto out; 1242 if (!find_media_headers(mtd, buf, "BNAND", 0))
1243 goto out;
1245 doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift); 1244 doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift);
1246 mh = (struct INFTLMediaHeader *) buf; 1245 mh = (struct INFTLMediaHeader *)buf;
1247 1246
1248 mh->NoOfBootImageBlocks = le32_to_cpu(mh->NoOfBootImageBlocks); 1247 mh->NoOfBootImageBlocks = le32_to_cpu(mh->NoOfBootImageBlocks);
1249 mh->NoOfBinaryPartitions = le32_to_cpu(mh->NoOfBinaryPartitions); 1248 mh->NoOfBinaryPartitions = le32_to_cpu(mh->NoOfBinaryPartitions);
@@ -1319,8 +1318,10 @@ static inline int __init inftl_partscan(struct mtd_info *mtd,
1319 parts[numparts].offset = ip->firstUnit << vshift; 1318 parts[numparts].offset = ip->firstUnit << vshift;
1320 parts[numparts].size = (1 + ip->lastUnit - ip->firstUnit) << vshift; 1319 parts[numparts].size = (1 + ip->lastUnit - ip->firstUnit) << vshift;
1321 numparts++; 1320 numparts++;
1322 if (ip->lastUnit > lastvunit) lastvunit = ip->lastUnit; 1321 if (ip->lastUnit > lastvunit)
1323 if (ip->flags & INFTL_LAST) break; 1322 lastvunit = ip->lastUnit;
1323 if (ip->flags & INFTL_LAST)
1324 break;
1324 } 1325 }
1325 lastvunit++; 1326 lastvunit++;
1326 if ((lastvunit << vshift) < end) { 1327 if ((lastvunit << vshift) < end) {
@@ -1330,7 +1331,7 @@ static inline int __init inftl_partscan(struct mtd_info *mtd,
1330 numparts++; 1331 numparts++;
1331 } 1332 }
1332 ret = numparts; 1333 ret = numparts;
1333out: 1334 out:
1334 kfree(buf); 1335 kfree(buf);
1335 return ret; 1336 return ret;
1336} 1337}
@@ -1342,11 +1343,12 @@ static int __init nftl_scan_bbt(struct mtd_info *mtd)
1342 struct doc_priv *doc = this->priv; 1343 struct doc_priv *doc = this->priv;
1343 struct mtd_partition parts[2]; 1344 struct mtd_partition parts[2];
1344 1345
1345 memset((char *) parts, 0, sizeof(parts)); 1346 memset((char *)parts, 0, sizeof(parts));
1346 /* On NFTL, we have to find the media headers before we can read the 1347 /* On NFTL, we have to find the media headers before we can read the
1347 BBTs, since they're stored in the media header eraseblocks. */ 1348 BBTs, since they're stored in the media header eraseblocks. */
1348 numparts = nftl_partscan(mtd, parts); 1349 numparts = nftl_partscan(mtd, parts);
1349 if (!numparts) return -EIO; 1350 if (!numparts)
1351 return -EIO;
1350 this->bbt_td->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT | 1352 this->bbt_td->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
1351 NAND_BBT_SAVECONTENT | NAND_BBT_WRITE | 1353 NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
1352 NAND_BBT_VERSION; 1354 NAND_BBT_VERSION;
@@ -1393,8 +1395,7 @@ static int __init inftl_scan_bbt(struct mtd_info *mtd)
1393 this->bbt_td->pages[0] = 2; 1395 this->bbt_td->pages[0] = 2;
1394 this->bbt_md = NULL; 1396 this->bbt_md = NULL;
1395 } else { 1397 } else {
1396 this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | 1398 this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION;
1397 NAND_BBT_VERSION;
1398 if (inftl_bbt_write) 1399 if (inftl_bbt_write)
1399 this->bbt_td->options |= NAND_BBT_WRITE; 1400 this->bbt_td->options |= NAND_BBT_WRITE;
1400 this->bbt_td->offs = 8; 1401 this->bbt_td->offs = 8;
@@ -1404,8 +1405,7 @@ static int __init inftl_scan_bbt(struct mtd_info *mtd)
1404 this->bbt_td->reserved_block_code = 0x01; 1405 this->bbt_td->reserved_block_code = 0x01;
1405 this->bbt_td->pattern = "MSYS_BBT"; 1406 this->bbt_td->pattern = "MSYS_BBT";
1406 1407
1407 this->bbt_md->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | 1408 this->bbt_md->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION;
1408 NAND_BBT_VERSION;
1409 if (inftl_bbt_write) 1409 if (inftl_bbt_write)
1410 this->bbt_md->options |= NAND_BBT_WRITE; 1410 this->bbt_md->options |= NAND_BBT_WRITE;
1411 this->bbt_md->offs = 8; 1411 this->bbt_md->offs = 8;
@@ -1420,12 +1420,13 @@ static int __init inftl_scan_bbt(struct mtd_info *mtd)
1420 At least as nand_bbt.c is currently written. */ 1420 At least as nand_bbt.c is currently written. */
1421 if ((ret = nand_scan_bbt(mtd, NULL))) 1421 if ((ret = nand_scan_bbt(mtd, NULL)))
1422 return ret; 1422 return ret;
1423 memset((char *) parts, 0, sizeof(parts)); 1423 memset((char *)parts, 0, sizeof(parts));
1424 numparts = inftl_partscan(mtd, parts); 1424 numparts = inftl_partscan(mtd, parts);
1425 /* At least for now, require the INFTL Media Header. We could probably 1425 /* At least for now, require the INFTL Media Header. We could probably
1426 do without it for non-INFTL use, since all it gives us is 1426 do without it for non-INFTL use, since all it gives us is
1427 autopartitioning, but I want to give it more thought. */ 1427 autopartitioning, but I want to give it more thought. */
1428 if (!numparts) return -EIO; 1428 if (!numparts)
1429 return -EIO;
1429 add_mtd_device(mtd); 1430 add_mtd_device(mtd);
1430#ifdef CONFIG_MTD_PARTITIONS 1431#ifdef CONFIG_MTD_PARTITIONS
1431 if (!no_autopart) 1432 if (!no_autopart)
@@ -1439,7 +1440,6 @@ static inline int __init doc2000_init(struct mtd_info *mtd)
1439 struct nand_chip *this = mtd->priv; 1440 struct nand_chip *this = mtd->priv;
1440 struct doc_priv *doc = this->priv; 1441 struct doc_priv *doc = this->priv;
1441 1442
1442 this->write_byte = doc2000_write_byte;
1443 this->read_byte = doc2000_read_byte; 1443 this->read_byte = doc2000_read_byte;
1444 this->write_buf = doc2000_writebuf; 1444 this->write_buf = doc2000_writebuf;
1445 this->read_buf = doc2000_readbuf; 1445 this->read_buf = doc2000_readbuf;
@@ -1457,7 +1457,6 @@ static inline int __init doc2001_init(struct mtd_info *mtd)
1457 struct nand_chip *this = mtd->priv; 1457 struct nand_chip *this = mtd->priv;
1458 struct doc_priv *doc = this->priv; 1458 struct doc_priv *doc = this->priv;
1459 1459
1460 this->write_byte = doc2001_write_byte;
1461 this->read_byte = doc2001_read_byte; 1460 this->read_byte = doc2001_read_byte;
1462 this->write_buf = doc2001_writebuf; 1461 this->write_buf = doc2001_writebuf;
1463 this->read_buf = doc2001_readbuf; 1462 this->read_buf = doc2001_readbuf;
@@ -1489,16 +1488,15 @@ static inline int __init doc2001plus_init(struct mtd_info *mtd)
1489 struct nand_chip *this = mtd->priv; 1488 struct nand_chip *this = mtd->priv;
1490 struct doc_priv *doc = this->priv; 1489 struct doc_priv *doc = this->priv;
1491 1490
1492 this->write_byte = NULL;
1493 this->read_byte = doc2001plus_read_byte; 1491 this->read_byte = doc2001plus_read_byte;
1494 this->write_buf = doc2001plus_writebuf; 1492 this->write_buf = doc2001plus_writebuf;
1495 this->read_buf = doc2001plus_readbuf; 1493 this->read_buf = doc2001plus_readbuf;
1496 this->verify_buf = doc2001plus_verifybuf; 1494 this->verify_buf = doc2001plus_verifybuf;
1497 this->scan_bbt = inftl_scan_bbt; 1495 this->scan_bbt = inftl_scan_bbt;
1498 this->hwcontrol = NULL; 1496 this->cmd_ctrl = NULL;
1499 this->select_chip = doc2001plus_select_chip; 1497 this->select_chip = doc2001plus_select_chip;
1500 this->cmdfunc = doc2001plus_command; 1498 this->cmdfunc = doc2001plus_command;
1501 this->enable_hwecc = doc2001plus_enable_hwecc; 1499 this->ecc.hwctl = doc2001plus_enable_hwecc;
1502 1500
1503 doc->chips_per_floor = 1; 1501 doc->chips_per_floor = 1;
1504 mtd->name = "DiskOnChip Millennium Plus"; 1502 mtd->name = "DiskOnChip Millennium Plus";
@@ -1535,20 +1533,16 @@ static int __init doc_probe(unsigned long physadr)
1535 save_control = ReadDOC(virtadr, DOCControl); 1533 save_control = ReadDOC(virtadr, DOCControl);
1536 1534
1537 /* Reset the DiskOnChip ASIC */ 1535 /* Reset the DiskOnChip ASIC */
1538 WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, 1536 WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl);
1539 virtadr, DOCControl); 1537 WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl);
1540 WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET,
1541 virtadr, DOCControl);
1542 1538
1543 /* Enable the DiskOnChip ASIC */ 1539 /* Enable the DiskOnChip ASIC */
1544 WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, 1540 WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl);
1545 virtadr, DOCControl); 1541 WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl);
1546 WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL,
1547 virtadr, DOCControl);
1548 1542
1549 ChipID = ReadDOC(virtadr, ChipID); 1543 ChipID = ReadDOC(virtadr, ChipID);
1550 1544
1551 switch(ChipID) { 1545 switch (ChipID) {
1552 case DOC_ChipID_Doc2k: 1546 case DOC_ChipID_Doc2k:
1553 reg = DoC_2k_ECCStatus; 1547 reg = DoC_2k_ECCStatus;
1554 break; 1548 break;
@@ -1564,15 +1558,13 @@ static int __init doc_probe(unsigned long physadr)
1564 ReadDOC(virtadr, Mplus_Power); 1558 ReadDOC(virtadr, Mplus_Power);
1565 1559
1566 /* Reset the Millennium Plus ASIC */ 1560 /* Reset the Millennium Plus ASIC */
1567 tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | 1561 tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
1568 DOC_MODE_BDECT;
1569 WriteDOC(tmp, virtadr, Mplus_DOCControl); 1562 WriteDOC(tmp, virtadr, Mplus_DOCControl);
1570 WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm); 1563 WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
1571 1564
1572 mdelay(1); 1565 mdelay(1);
1573 /* Enable the Millennium Plus ASIC */ 1566 /* Enable the Millennium Plus ASIC */
1574 tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | 1567 tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
1575 DOC_MODE_BDECT;
1576 WriteDOC(tmp, virtadr, Mplus_DOCControl); 1568 WriteDOC(tmp, virtadr, Mplus_DOCControl);
1577 WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm); 1569 WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
1578 mdelay(1); 1570 mdelay(1);
@@ -1596,7 +1588,7 @@ static int __init doc_probe(unsigned long physadr)
1596 goto notfound; 1588 goto notfound;
1597 } 1589 }
1598 /* Check the TOGGLE bit in the ECC register */ 1590 /* Check the TOGGLE bit in the ECC register */
1599 tmp = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT; 1591 tmp = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
1600 tmpb = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT; 1592 tmpb = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
1601 tmpc = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT; 1593 tmpc = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
1602 if ((tmp == tmpb) || (tmp != tmpc)) { 1594 if ((tmp == tmpb) || (tmp != tmpc)) {
@@ -1626,11 +1618,11 @@ static int __init doc_probe(unsigned long physadr)
1626 if (ChipID == DOC_ChipID_DocMilPlus16) { 1618 if (ChipID == DOC_ChipID_DocMilPlus16) {
1627 WriteDOC(~newval, virtadr, Mplus_AliasResolution); 1619 WriteDOC(~newval, virtadr, Mplus_AliasResolution);
1628 oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution); 1620 oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
1629 WriteDOC(newval, virtadr, Mplus_AliasResolution); // restore it 1621 WriteDOC(newval, virtadr, Mplus_AliasResolution); // restore it
1630 } else { 1622 } else {
1631 WriteDOC(~newval, virtadr, AliasResolution); 1623 WriteDOC(~newval, virtadr, AliasResolution);
1632 oldval = ReadDOC(doc->virtadr, AliasResolution); 1624 oldval = ReadDOC(doc->virtadr, AliasResolution);
1633 WriteDOC(newval, virtadr, AliasResolution); // restore it 1625 WriteDOC(newval, virtadr, AliasResolution); // restore it
1634 } 1626 }
1635 newval = ~newval; 1627 newval = ~newval;
1636 if (oldval == newval) { 1628 if (oldval == newval) {
@@ -1642,10 +1634,8 @@ static int __init doc_probe(unsigned long physadr)
1642 printk(KERN_NOTICE "DiskOnChip found at 0x%lx\n", physadr); 1634 printk(KERN_NOTICE "DiskOnChip found at 0x%lx\n", physadr);
1643 1635
1644 len = sizeof(struct mtd_info) + 1636 len = sizeof(struct mtd_info) +
1645 sizeof(struct nand_chip) + 1637 sizeof(struct nand_chip) + sizeof(struct doc_priv) + (2 * sizeof(struct nand_bbt_descr));
1646 sizeof(struct doc_priv) + 1638 mtd = kmalloc(len, GFP_KERNEL);
1647 (2 * sizeof(struct nand_bbt_descr));
1648 mtd = kmalloc(len, GFP_KERNEL);
1649 if (!mtd) { 1639 if (!mtd) {
1650 printk(KERN_ERR "DiskOnChip kmalloc (%d bytes) failed!\n", len); 1640 printk(KERN_ERR "DiskOnChip kmalloc (%d bytes) failed!\n", len);
1651 ret = -ENOMEM; 1641 ret = -ENOMEM;
@@ -1663,17 +1653,19 @@ static int __init doc_probe(unsigned long physadr)
1663 1653
1664 nand->priv = doc; 1654 nand->priv = doc;
1665 nand->select_chip = doc200x_select_chip; 1655 nand->select_chip = doc200x_select_chip;
1666 nand->hwcontrol = doc200x_hwcontrol; 1656 nand->cmd_ctrl = doc200x_hwcontrol;
1667 nand->dev_ready = doc200x_dev_ready; 1657 nand->dev_ready = doc200x_dev_ready;
1668 nand->waitfunc = doc200x_wait; 1658 nand->waitfunc = doc200x_wait;
1669 nand->block_bad = doc200x_block_bad; 1659 nand->block_bad = doc200x_block_bad;
1670 nand->enable_hwecc = doc200x_enable_hwecc; 1660 nand->ecc.hwctl = doc200x_enable_hwecc;
1671 nand->calculate_ecc = doc200x_calculate_ecc; 1661 nand->ecc.calculate = doc200x_calculate_ecc;
1672 nand->correct_data = doc200x_correct_data; 1662 nand->ecc.correct = doc200x_correct_data;
1673 1663
1674 nand->autooob = &doc200x_oobinfo; 1664 nand->ecc.layout = &doc200x_oobinfo;
1675 nand->eccmode = NAND_ECC_HW6_512; 1665 nand->ecc.mode = NAND_ECC_HW_SYNDROME;
1676 nand->options = NAND_USE_FLASH_BBT | NAND_HWECC_SYNDROME; 1666 nand->ecc.size = 512;
1667 nand->ecc.bytes = 6;
1668 nand->options = NAND_USE_FLASH_BBT;
1677 1669
1678 doc->physadr = physadr; 1670 doc->physadr = physadr;
1679 doc->virtadr = virtadr; 1671 doc->virtadr = virtadr;
@@ -1707,18 +1699,18 @@ static int __init doc_probe(unsigned long physadr)
1707 doclist = mtd; 1699 doclist = mtd;
1708 return 0; 1700 return 0;
1709 1701
1710notfound: 1702 notfound:
1711 /* Put back the contents of the DOCControl register, in case it's not 1703 /* Put back the contents of the DOCControl register, in case it's not
1712 actually a DiskOnChip. */ 1704 actually a DiskOnChip. */
1713 WriteDOC(save_control, virtadr, DOCControl); 1705 WriteDOC(save_control, virtadr, DOCControl);
1714fail: 1706 fail:
1715 iounmap(virtadr); 1707 iounmap(virtadr);
1716 return ret; 1708 return ret;
1717} 1709}
1718 1710
1719static void release_nanddoc(void) 1711static void release_nanddoc(void)
1720{ 1712{
1721 struct mtd_info *mtd, *nextmtd; 1713 struct mtd_info *mtd, *nextmtd;
1722 struct nand_chip *nand; 1714 struct nand_chip *nand;
1723 struct doc_priv *doc; 1715 struct doc_priv *doc;
1724 1716
@@ -1747,8 +1739,8 @@ static int __init init_nanddoc(void)
1747 * generator polinomial degree = 4 1739 * generator polinomial degree = 4
1748 */ 1740 */
1749 rs_decoder = init_rs(10, 0x409, FCR, 1, NROOTS); 1741 rs_decoder = init_rs(10, 0x409, FCR, 1, NROOTS);
1750 if (!rs_decoder) { 1742 if (!rs_decoder) {
1751 printk (KERN_ERR "DiskOnChip: Could not create a RS decoder\n"); 1743 printk(KERN_ERR "DiskOnChip: Could not create a RS decoder\n");
1752 return -ENOMEM; 1744 return -ENOMEM;
1753 } 1745 }
1754 1746
@@ -1758,7 +1750,7 @@ static int __init init_nanddoc(void)
1758 if (ret < 0) 1750 if (ret < 0)
1759 goto outerr; 1751 goto outerr;
1760 } else { 1752 } else {
1761 for (i=0; (doc_locations[i] != 0xffffffff); i++) { 1753 for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
1762 doc_probe(doc_locations[i]); 1754 doc_probe(doc_locations[i]);
1763 } 1755 }
1764 } 1756 }
@@ -1770,7 +1762,7 @@ static int __init init_nanddoc(void)
1770 goto outerr; 1762 goto outerr;
1771 } 1763 }
1772 return 0; 1764 return 0;
1773outerr: 1765 outerr:
1774 free_rs(rs_decoder); 1766 free_rs(rs_decoder);
1775 return ret; 1767 return ret;
1776} 1768}
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c
index 9b1fd2f387fa..516c0e5e564c 100644
--- a/drivers/mtd/nand/edb7312.c
+++ b/drivers/mtd/nand/edb7312.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/mtd/nand/edb7312.c 2 * drivers/mtd/nand/edb7312.c
3 * 3 *
4 * Copyright (C) 2002 Marius Gröger (mag@sysgo.de) 4 * Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
5 * 5 *
6 * Derived from drivers/mtd/nand/autcpu12.c 6 * Derived from drivers/mtd/nand/autcpu12.c
7 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) 7 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
@@ -25,7 +25,7 @@
25#include <linux/mtd/nand.h> 25#include <linux/mtd/nand.h>
26#include <linux/mtd/partitions.h> 26#include <linux/mtd/partitions.h>
27#include <asm/io.h> 27#include <asm/io.h>
28#include <asm/arch/hardware.h> /* for CLPS7111_VIRT_BASE */ 28#include <asm/arch/hardware.h> /* for CLPS7111_VIRT_BASE */
29#include <asm/sizes.h> 29#include <asm/sizes.h>
30#include <asm/hardware/clps7111.h> 30#include <asm/hardware/clps7111.h>
31 31
@@ -54,51 +54,45 @@ static struct mtd_info *ep7312_mtd = NULL;
54 */ 54 */
55 55
56static unsigned long ep7312_fio_pbase = EP7312_FIO_PBASE; 56static unsigned long ep7312_fio_pbase = EP7312_FIO_PBASE;
57static void __iomem * ep7312_pxdr = (void __iomem *) EP7312_PXDR; 57static void __iomem *ep7312_pxdr = (void __iomem *)EP7312_PXDR;
58static void __iomem * ep7312_pxddr = (void __iomem *) EP7312_PXDDR; 58static void __iomem *ep7312_pxddr = (void __iomem *)EP7312_PXDDR;
59 59
60#ifdef CONFIG_MTD_PARTITIONS 60#ifdef CONFIG_MTD_PARTITIONS
61/* 61/*
62 * Define static partitions for flash device 62 * Define static partitions for flash device
63 */ 63 */
64static struct mtd_partition partition_info[] = { 64static struct mtd_partition partition_info[] = {
65 { .name = "EP7312 Nand Flash", 65 {.name = "EP7312 Nand Flash",
66 .offset = 0, 66 .offset = 0,
67 .size = 8*1024*1024 } 67 .size = 8 * 1024 * 1024}
68}; 68};
69
69#define NUM_PARTITIONS 1 70#define NUM_PARTITIONS 1
70 71
71#endif 72#endif
72 73
73
74/* 74/*
75 * hardware specific access to control-lines 75 * hardware specific access to control-lines
76 *
77 * NAND_NCE: bit 0 -> bit 7
78 * NAND_CLE: bit 1 -> bit 4
79 * NAND_ALE: bit 2 -> bit 5
76 */ 80 */
77static void ep7312_hwcontrol(struct mtd_info *mtd, int cmd) 81static void ep7312_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
78{ 82{
79 switch(cmd) { 83 struct nand_chip *chip = mtd->priv;
80 84
81 case NAND_CTL_SETCLE: 85 if (ctrl & NAND_CTRL_CHANGE) {
82 clps_writeb(clps_readb(ep7312_pxdr) | 0x10, ep7312_pxdr); 86 unsigned char bits;
83 break; 87
84 case NAND_CTL_CLRCLE: 88 bits = (ctrl & (NAND_CLE | NAND_ALE)) << 3;
85 clps_writeb(clps_readb(ep7312_pxdr) & ~0x10, ep7312_pxdr); 89 bits = (ctrl & NAND_NCE) << 7;
86 break; 90
87 91 clps_writeb((clps_readb(ep7312_pxdr) & 0xB0) | 0x10,
88 case NAND_CTL_SETALE: 92 ep7312_pxdr);
89 clps_writeb(clps_readb(ep7312_pxdr) | 0x20, ep7312_pxdr);
90 break;
91 case NAND_CTL_CLRALE:
92 clps_writeb(clps_readb(ep7312_pxdr) & ~0x20, ep7312_pxdr);
93 break;
94
95 case NAND_CTL_SETNCE:
96 clps_writeb((clps_readb(ep7312_pxdr) | 0x80) & ~0x40, ep7312_pxdr);
97 break;
98 case NAND_CTL_CLRNCE:
99 clps_writeb((clps_readb(ep7312_pxdr) | 0x80) | 0x40, ep7312_pxdr);
100 break;
101 } 93 }
94 if (cmd != NAND_CMD_NONE)
95 writeb(cmd, chip->IO_ADDR_W);
102} 96}
103 97
104/* 98/*
@@ -108,6 +102,7 @@ static int ep7312_device_ready(struct mtd_info *mtd)
108{ 102{
109 return 1; 103 return 1;
110} 104}
105
111#ifdef CONFIG_MTD_PARTITIONS 106#ifdef CONFIG_MTD_PARTITIONS
112const char *part_probes[] = { "cmdlinepart", NULL }; 107const char *part_probes[] = { "cmdlinepart", NULL };
113#endif 108#endif
@@ -115,18 +110,16 @@ const char *part_probes[] = { "cmdlinepart", NULL };
115/* 110/*
116 * Main initialization routine 111 * Main initialization routine
117 */ 112 */
118static int __init ep7312_init (void) 113static int __init ep7312_init(void)
119{ 114{
120 struct nand_chip *this; 115 struct nand_chip *this;
121 const char *part_type = 0; 116 const char *part_type = 0;
122 int mtd_parts_nb = 0; 117 int mtd_parts_nb = 0;
123 struct mtd_partition *mtd_parts = 0; 118 struct mtd_partition *mtd_parts = 0;
124 void __iomem * ep7312_fio_base; 119 void __iomem *ep7312_fio_base;
125 120
126 /* Allocate memory for MTD device structure and private data */ 121 /* Allocate memory for MTD device structure and private data */
127 ep7312_mtd = kmalloc(sizeof(struct mtd_info) + 122 ep7312_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
128 sizeof(struct nand_chip),
129 GFP_KERNEL);
130 if (!ep7312_mtd) { 123 if (!ep7312_mtd) {
131 printk("Unable to allocate EDB7312 NAND MTD device structure.\n"); 124 printk("Unable to allocate EDB7312 NAND MTD device structure.\n");
132 return -ENOMEM; 125 return -ENOMEM;
@@ -134,21 +127,22 @@ static int __init ep7312_init (void)
134 127
135 /* map physical adress */ 128 /* map physical adress */
136 ep7312_fio_base = ioremap(ep7312_fio_pbase, SZ_1K); 129 ep7312_fio_base = ioremap(ep7312_fio_pbase, SZ_1K);
137 if(!ep7312_fio_base) { 130 if (!ep7312_fio_base) {
138 printk("ioremap EDB7312 NAND flash failed\n"); 131 printk("ioremap EDB7312 NAND flash failed\n");
139 kfree(ep7312_mtd); 132 kfree(ep7312_mtd);
140 return -EIO; 133 return -EIO;
141 } 134 }
142 135
143 /* Get pointer to private data */ 136 /* Get pointer to private data */
144 this = (struct nand_chip *) (&ep7312_mtd[1]); 137 this = (struct nand_chip *)(&ep7312_mtd[1]);
145 138
146 /* Initialize structures */ 139 /* Initialize structures */
147 memset((char *) ep7312_mtd, 0, sizeof(struct mtd_info)); 140 memset(ep7312_mtd, 0, sizeof(struct mtd_info));
148 memset((char *) this, 0, sizeof(struct nand_chip)); 141 memset(this, 0, sizeof(struct nand_chip));
149 142
150 /* Link the private data with the MTD structure */ 143 /* Link the private data with the MTD structure */
151 ep7312_mtd->priv = this; 144 ep7312_mtd->priv = this;
145 ep7312_mtd->owner = THIS_MODULE;
152 146
153 /* 147 /*
154 * Set GPIO Port B control register so that the pins are configured 148 * Set GPIO Port B control register so that the pins are configured
@@ -159,22 +153,20 @@ static int __init ep7312_init (void)
159 /* insert callbacks */ 153 /* insert callbacks */
160 this->IO_ADDR_R = ep7312_fio_base; 154 this->IO_ADDR_R = ep7312_fio_base;
161 this->IO_ADDR_W = ep7312_fio_base; 155 this->IO_ADDR_W = ep7312_fio_base;
162 this->hwcontrol = ep7312_hwcontrol; 156 this->cmd_ctrl = ep7312_hwcontrol;
163 this->dev_ready = ep7312_device_ready; 157 this->dev_ready = ep7312_device_ready;
164 /* 15 us command delay time */ 158 /* 15 us command delay time */
165 this->chip_delay = 15; 159 this->chip_delay = 15;
166 160
167 /* Scan to find existence of the device */ 161 /* Scan to find existence of the device */
168 if (nand_scan (ep7312_mtd, 1)) { 162 if (nand_scan(ep7312_mtd, 1)) {
169 iounmap((void *)ep7312_fio_base); 163 iounmap((void *)ep7312_fio_base);
170 kfree (ep7312_mtd); 164 kfree(ep7312_mtd);
171 return -ENXIO; 165 return -ENXIO;
172 } 166 }
173
174#ifdef CONFIG_MTD_PARTITIONS 167#ifdef CONFIG_MTD_PARTITIONS
175 ep7312_mtd->name = "edb7312-nand"; 168 ep7312_mtd->name = "edb7312-nand";
176 mtd_parts_nb = parse_mtd_partitions(ep7312_mtd, part_probes, 169 mtd_parts_nb = parse_mtd_partitions(ep7312_mtd, part_probes, &mtd_parts, 0);
177 &mtd_parts, 0);
178 if (mtd_parts_nb > 0) 170 if (mtd_parts_nb > 0)
179 part_type = "command line"; 171 part_type = "command line";
180 else 172 else
@@ -193,24 +185,23 @@ static int __init ep7312_init (void)
193 /* Return happy */ 185 /* Return happy */
194 return 0; 186 return 0;
195} 187}
188
196module_init(ep7312_init); 189module_init(ep7312_init);
197 190
198/* 191/*
199 * Clean up routine 192 * Clean up routine
200 */ 193 */
201static void __exit ep7312_cleanup (void) 194static void __exit ep7312_cleanup(void)
202{ 195{
203 struct nand_chip *this = (struct nand_chip *) &ep7312_mtd[1]; 196 struct nand_chip *this = (struct nand_chip *)&ep7312_mtd[1];
204 197
205 /* Release resources, unregister device */ 198 /* Release resources, unregister device */
206 nand_release (ap7312_mtd); 199 nand_release(ap7312_mtd);
207
208 /* Free internal data buffer */
209 kfree (this->data_buf);
210 200
211 /* Free the MTD device structure */ 201 /* Free the MTD device structure */
212 kfree (ep7312_mtd); 202 kfree(ep7312_mtd);
213} 203}
204
214module_exit(ep7312_cleanup); 205module_exit(ep7312_cleanup);
215 206
216MODULE_LICENSE("GPL"); 207MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index f68f7a99a630..2d585d2d090c 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 2003 Joshua Wise (joshua@joshuawise.com) 4 * Copyright (C) 2003 Joshua Wise (joshua@joshuawise.com)
5 * 5 *
6 * Derived from drivers/mtd/nand/edb7312.c 6 * Derived from drivers/mtd/nand/edb7312.c
7 * Copyright (C) 2002 Marius Gröger (mag@sysgo.de) 7 * Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
8 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) 8 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
9 * 9 *
10 * $Id: h1910.c,v 1.6 2005/11/07 11:14:30 gleixner Exp $ 10 * $Id: h1910.c,v 1.6 2005/11/07 11:14:30 gleixner Exp $
@@ -26,7 +26,7 @@
26#include <linux/mtd/nand.h> 26#include <linux/mtd/nand.h>
27#include <linux/mtd/partitions.h> 27#include <linux/mtd/partitions.h>
28#include <asm/io.h> 28#include <asm/io.h>
29#include <asm/arch/hardware.h> /* for CLPS7111_VIRT_BASE */ 29#include <asm/arch/hardware.h> /* for CLPS7111_VIRT_BASE */
30#include <asm/sizes.h> 30#include <asm/sizes.h>
31#include <asm/arch/h1900-gpio.h> 31#include <asm/arch/h1900-gpio.h>
32#include <asm/arch/ipaq.h> 32#include <asm/arch/ipaq.h>
@@ -45,47 +45,29 @@ static struct mtd_info *h1910_nand_mtd = NULL;
45 * Define static partitions for flash device 45 * Define static partitions for flash device
46 */ 46 */
47static struct mtd_partition partition_info[] = { 47static struct mtd_partition partition_info[] = {
48 { name: "h1910 NAND Flash", 48 {name:"h1910 NAND Flash",
49 offset: 0, 49 offset:0,
50 size: 16*1024*1024 } 50 size:16 * 1024 * 1024}
51}; 51};
52
52#define NUM_PARTITIONS 1 53#define NUM_PARTITIONS 1
53 54
54#endif 55#endif
55 56
56
57/* 57/*
58 * hardware specific access to control-lines 58 * hardware specific access to control-lines
59 *
60 * NAND_NCE: bit 0 - don't care
61 * NAND_CLE: bit 1 - address bit 2
62 * NAND_ALE: bit 2 - address bit 3
59 */ 63 */
60static void h1910_hwcontrol(struct mtd_info *mtd, int cmd) 64static void h1910_hwcontrol(struct mtd_info *mtd, int cmd,
65 unsigned int ctrl)
61{ 66{
62 struct nand_chip* this = (struct nand_chip *) (mtd->priv); 67 struct nand_chip *chip = mtd->priv;
63 68
64 switch(cmd) { 69 if (cmd != NAND_CMD_NONE)
65 70 writeb(cmd, chip->IO_ADDR_W | ((ctrl & 0x6) << 1));
66 case NAND_CTL_SETCLE:
67 this->IO_ADDR_R |= (1 << 2);
68 this->IO_ADDR_W |= (1 << 2);
69 break;
70 case NAND_CTL_CLRCLE:
71 this->IO_ADDR_R &= ~(1 << 2);
72 this->IO_ADDR_W &= ~(1 << 2);
73 break;
74
75 case NAND_CTL_SETALE:
76 this->IO_ADDR_R |= (1 << 3);
77 this->IO_ADDR_W |= (1 << 3);
78 break;
79 case NAND_CTL_CLRALE:
80 this->IO_ADDR_R &= ~(1 << 3);
81 this->IO_ADDR_W &= ~(1 << 3);
82 break;
83
84 case NAND_CTL_SETNCE:
85 break;
86 case NAND_CTL_CLRNCE:
87 break;
88 }
89} 71}
90 72
91/* 73/*
@@ -101,7 +83,7 @@ static int h1910_device_ready(struct mtd_info *mtd)
101/* 83/*
102 * Main initialization routine 84 * Main initialization routine
103 */ 85 */
104static int __init h1910_init (void) 86static int __init h1910_init(void)
105{ 87{
106 struct nand_chip *this; 88 struct nand_chip *this;
107 const char *part_type = 0; 89 const char *part_type = 0;
@@ -119,24 +101,23 @@ static int __init h1910_init (void)
119 } 101 }
120 102
121 /* Allocate memory for MTD device structure and private data */ 103 /* Allocate memory for MTD device structure and private data */
122 h1910_nand_mtd = kmalloc(sizeof(struct mtd_info) + 104 h1910_nand_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
123 sizeof(struct nand_chip),
124 GFP_KERNEL);
125 if (!h1910_nand_mtd) { 105 if (!h1910_nand_mtd) {
126 printk("Unable to allocate h1910 NAND MTD device structure.\n"); 106 printk("Unable to allocate h1910 NAND MTD device structure.\n");
127 iounmap ((void *) nandaddr); 107 iounmap((void *)nandaddr);
128 return -ENOMEM; 108 return -ENOMEM;
129 } 109 }
130 110
131 /* Get pointer to private data */ 111 /* Get pointer to private data */
132 this = (struct nand_chip *) (&h1910_nand_mtd[1]); 112 this = (struct nand_chip *)(&h1910_nand_mtd[1]);
133 113
134 /* Initialize structures */ 114 /* Initialize structures */
135 memset((char *) h1910_nand_mtd, 0, sizeof(struct mtd_info)); 115 memset(h1910_nand_mtd, 0, sizeof(struct mtd_info));
136 memset((char *) this, 0, sizeof(struct nand_chip)); 116 memset(this, 0, sizeof(struct nand_chip));
137 117
138 /* Link the private data with the MTD structure */ 118 /* Link the private data with the MTD structure */
139 h1910_nand_mtd->priv = this; 119 h1910_nand_mtd->priv = this;
120 h1910_nand_mtd->owner = THIS_MODULE;
140 121
141 /* 122 /*
142 * Enable VPEN 123 * Enable VPEN
@@ -146,31 +127,28 @@ static int __init h1910_init (void)
146 /* insert callbacks */ 127 /* insert callbacks */
147 this->IO_ADDR_R = nandaddr; 128 this->IO_ADDR_R = nandaddr;
148 this->IO_ADDR_W = nandaddr; 129 this->IO_ADDR_W = nandaddr;
149 this->hwcontrol = h1910_hwcontrol; 130 this->cmd_ctrl = h1910_hwcontrol;
150 this->dev_ready = NULL; /* unknown whether that was correct or not so we will just do it like this */ 131 this->dev_ready = NULL; /* unknown whether that was correct or not so we will just do it like this */
151 /* 15 us command delay time */ 132 /* 15 us command delay time */
152 this->chip_delay = 50; 133 this->chip_delay = 50;
153 this->eccmode = NAND_ECC_SOFT; 134 this->ecc.mode = NAND_ECC_SOFT;
154 this->options = NAND_NO_AUTOINCR; 135 this->options = NAND_NO_AUTOINCR;
155 136
156 /* Scan to find existence of the device */ 137 /* Scan to find existence of the device */
157 if (nand_scan (h1910_nand_mtd, 1)) { 138 if (nand_scan(h1910_nand_mtd, 1)) {
158 printk(KERN_NOTICE "No NAND device - returning -ENXIO\n"); 139 printk(KERN_NOTICE "No NAND device - returning -ENXIO\n");
159 kfree (h1910_nand_mtd); 140 kfree(h1910_nand_mtd);
160 iounmap ((void *) nandaddr); 141 iounmap((void *)nandaddr);
161 return -ENXIO; 142 return -ENXIO;
162 } 143 }
163
164#ifdef CONFIG_MTD_CMDLINE_PARTS 144#ifdef CONFIG_MTD_CMDLINE_PARTS
165 mtd_parts_nb = parse_cmdline_partitions(h1910_nand_mtd, &mtd_parts, 145 mtd_parts_nb = parse_cmdline_partitions(h1910_nand_mtd, &mtd_parts, "h1910-nand");
166 "h1910-nand");
167 if (mtd_parts_nb > 0) 146 if (mtd_parts_nb > 0)
168 part_type = "command line"; 147 part_type = "command line";
169 else 148 else
170 mtd_parts_nb = 0; 149 mtd_parts_nb = 0;
171#endif 150#endif
172 if (mtd_parts_nb == 0) 151 if (mtd_parts_nb == 0) {
173 {
174 mtd_parts = partition_info; 152 mtd_parts = partition_info;
175 mtd_parts_nb = NUM_PARTITIONS; 153 mtd_parts_nb = NUM_PARTITIONS;
176 part_type = "static"; 154 part_type = "static";
@@ -183,24 +161,26 @@ static int __init h1910_init (void)
183 /* Return happy */ 161 /* Return happy */
184 return 0; 162 return 0;
185} 163}
164
186module_init(h1910_init); 165module_init(h1910_init);
187 166
188/* 167/*
189 * Clean up routine 168 * Clean up routine
190 */ 169 */
191static void __exit h1910_cleanup (void) 170static void __exit h1910_cleanup(void)
192{ 171{
193 struct nand_chip *this = (struct nand_chip *) &h1910_nand_mtd[1]; 172 struct nand_chip *this = (struct nand_chip *)&h1910_nand_mtd[1];
194 173
195 /* Release resources, unregister device */ 174 /* Release resources, unregister device */
196 nand_release (h1910_nand_mtd); 175 nand_release(h1910_nand_mtd);
197 176
198 /* Release io resource */ 177 /* Release io resource */
199 iounmap ((void *) this->IO_ADDR_W); 178 iounmap((void *)this->IO_ADDR_W);
200 179
201 /* Free the MTD device structure */ 180 /* Free the MTD device structure */
202 kfree (h1910_nand_mtd); 181 kfree(h1910_nand_mtd);
203} 182}
183
204module_exit(h1910_cleanup); 184module_exit(h1910_cleanup);
205 185
206MODULE_LICENSE("GPL"); 186MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 95e96fa1fceb..27083ed0a017 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -10,67 +10,31 @@
10 * http://www.linux-mtd.infradead.org/tech/nand.html 10 * http://www.linux-mtd.infradead.org/tech/nand.html
11 * 11 *
12 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) 12 * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
13 * 2002 Thomas Gleixner (tglx@linutronix.de) 13 * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
14 * 14 *
15 * 02-08-2004 tglx: support for strange chips, which cannot auto increment 15 * Credits:
16 * pages on read / read_oob
17 *
18 * 03-17-2004 tglx: Check ready before auto increment check. Simon Bayes
19 * pointed this out, as he marked an auto increment capable chip
20 * as NOAUTOINCR in the board driver.
21 * Make reads over block boundaries work too
22 *
23 * 04-14-2004 tglx: first working version for 2k page size chips
24 *
25 * 05-19-2004 tglx: Basic support for Renesas AG-AND chips
26 *
27 * 09-24-2004 tglx: add support for hardware controllers (e.g. ECC) shared
28 * among multiple independend devices. Suggestions and initial patch
29 * from Ben Dooks <ben-mtd@fluff.org>
30 *
31 * 12-05-2004 dmarlin: add workaround for Renesas AG-AND chips "disturb" issue.
32 * Basically, any block not rewritten may lose data when surrounding blocks
33 * are rewritten many times. JFFS2 ensures this doesn't happen for blocks
34 * it uses, but the Bad Block Table(s) may not be rewritten. To ensure they
35 * do not lose data, force them to be rewritten when some of the surrounding
36 * blocks are erased. Rather than tracking a specific nearby block (which
37 * could itself go bad), use a page address 'mask' to select several blocks
38 * in the same area, and rewrite the BBT when any of them are erased.
39 *
40 * 01-03-2005 dmarlin: added support for the device recovery command sequence for Renesas
41 * AG-AND chips. If there was a sudden loss of power during an erase operation,
42 * a "device recovery" operation must be performed when power is restored
43 * to ensure correct operation.
44 *
45 * 01-20-2005 dmarlin: added support for optional hardware specific callback routine to
46 * perform extra error status checks on erase and write failures. This required
47 * adding a wrapper function for nand_read_ecc.
48 *
49 * 08-20-2005 vwool: suspend/resume added
50 *
51 * Credits:
52 * David Woodhouse for adding multichip support 16 * David Woodhouse for adding multichip support
53 * 17 *
54 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the 18 * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
55 * rework for 2K page size chips 19 * rework for 2K page size chips
56 * 20 *
57 * TODO: 21 * TODO:
58 * Enable cached programming for 2k page size chips 22 * Enable cached programming for 2k page size chips
59 * Check, if mtd->ecctype should be set to MTD_ECC_HW 23 * Check, if mtd->ecctype should be set to MTD_ECC_HW
60 * if we have HW ecc support. 24 * if we have HW ecc support.
61 * The AG-AND chips have nice features for speed improvement, 25 * The AG-AND chips have nice features for speed improvement,
62 * which are not supported yet. Read / program 4 pages in one go. 26 * which are not supported yet. Read / program 4 pages in one go.
63 * 27 *
64 * $Id: nand_base.c,v 1.150 2005/09/15 13:58:48 vwool Exp $
65 *
66 * This program is free software; you can redistribute it and/or modify 28 * This program is free software; you can redistribute it and/or modify
67 * it under the terms of the GNU General Public License version 2 as 29 * it under the terms of the GNU General Public License version 2 as
68 * published by the Free Software Foundation. 30 * published by the Free Software Foundation.
69 * 31 *
70 */ 32 */
71 33
34#include <linux/module.h>
72#include <linux/delay.h> 35#include <linux/delay.h>
73#include <linux/errno.h> 36#include <linux/errno.h>
37#include <linux/err.h>
74#include <linux/sched.h> 38#include <linux/sched.h>
75#include <linux/slab.h> 39#include <linux/slab.h>
76#include <linux/types.h> 40#include <linux/types.h>
@@ -88,75 +52,46 @@
88#endif 52#endif
89 53
90/* Define default oob placement schemes for large and small page devices */ 54/* Define default oob placement schemes for large and small page devices */
91static struct nand_oobinfo nand_oob_8 = { 55static struct nand_ecclayout nand_oob_8 = {
92 .useecc = MTD_NANDECC_AUTOPLACE,
93 .eccbytes = 3, 56 .eccbytes = 3,
94 .eccpos = {0, 1, 2}, 57 .eccpos = {0, 1, 2},
95 .oobfree = { {3, 2}, {6, 2} } 58 .oobfree = {
59 {.offset = 3,
60 .length = 2},
61 {.offset = 6,
62 .length = 2}}
96}; 63};
97 64
98static struct nand_oobinfo nand_oob_16 = { 65static struct nand_ecclayout nand_oob_16 = {
99 .useecc = MTD_NANDECC_AUTOPLACE,
100 .eccbytes = 6, 66 .eccbytes = 6,
101 .eccpos = {0, 1, 2, 3, 6, 7}, 67 .eccpos = {0, 1, 2, 3, 6, 7},
102 .oobfree = { {8, 8} } 68 .oobfree = {
69 {.offset = 8,
70 . length = 8}}
103}; 71};
104 72
105static struct nand_oobinfo nand_oob_64 = { 73static struct nand_ecclayout nand_oob_64 = {
106 .useecc = MTD_NANDECC_AUTOPLACE,
107 .eccbytes = 24, 74 .eccbytes = 24,
108 .eccpos = { 75 .eccpos = {
109 40, 41, 42, 43, 44, 45, 46, 47, 76 40, 41, 42, 43, 44, 45, 46, 47,
110 48, 49, 50, 51, 52, 53, 54, 55, 77 48, 49, 50, 51, 52, 53, 54, 55,
111 56, 57, 58, 59, 60, 61, 62, 63}, 78 56, 57, 58, 59, 60, 61, 62, 63},
112 .oobfree = { {2, 38} } 79 .oobfree = {
80 {.offset = 2,
81 .length = 38}}
113}; 82};
114 83
115/* This is used for padding purposes in nand_write_oob */ 84static int nand_get_device(struct nand_chip *chip, struct mtd_info *mtd,
116static u_char ffchars[] = { 85 int new_state);
117 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 86
118 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 87static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
119 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 88 struct mtd_oob_ops *ops);
120 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
121 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
122 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
123 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
124 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
125};
126 89
127/* 90/*
128 * NAND low-level MTD interface functions 91 * For devices which display every fart in the system on a seperate LED. Is
92 * compiled away when LED support is disabled.
129 */ 93 */
130static void nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len); 94DEFINE_LED_TRIGGER(nand_led_trigger);
131static void nand_read_buf(struct mtd_info *mtd, u_char *buf, int len);
132static int nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len);
133
134static int nand_read (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf);
135static int nand_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
136 size_t * retlen, u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel);
137static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf);
138static int nand_write (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf);
139static int nand_write_ecc (struct mtd_info *mtd, loff_t to, size_t len,
140 size_t * retlen, const u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel);
141static int nand_write_oob (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char *buf);
142static int nand_writev (struct mtd_info *mtd, const struct kvec *vecs,
143 unsigned long count, loff_t to, size_t * retlen);
144static int nand_writev_ecc (struct mtd_info *mtd, const struct kvec *vecs,
145 unsigned long count, loff_t to, size_t * retlen, u_char *eccbuf, struct nand_oobinfo *oobsel);
146static int nand_erase (struct mtd_info *mtd, struct erase_info *instr);
147static void nand_sync (struct mtd_info *mtd);
148
149/* Some internal functions */
150static int nand_write_page (struct mtd_info *mtd, struct nand_chip *this, int page, u_char *oob_buf,
151 struct nand_oobinfo *oobsel, int mode);
152#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
153static int nand_verify_pages (struct mtd_info *mtd, struct nand_chip *this, int page, int numpages,
154 u_char *oob_buf, struct nand_oobinfo *oobsel, int chipnr, int oobmode);
155#else
156#define nand_verify_pages(...) (0)
157#endif
158
159static int nand_get_device (struct nand_chip *this, struct mtd_info *mtd, int new_state);
160 95
161/** 96/**
162 * nand_release_device - [GENERIC] release chip 97 * nand_release_device - [GENERIC] release chip
@@ -164,27 +99,19 @@ static int nand_get_device (struct nand_chip *this, struct mtd_info *mtd, int ne
164 * 99 *
165 * Deselect, release chip lock and wake up anyone waiting on the device 100 * Deselect, release chip lock and wake up anyone waiting on the device
166 */ 101 */
167static void nand_release_device (struct mtd_info *mtd) 102static void nand_release_device(struct mtd_info *mtd)
168{ 103{
169 struct nand_chip *this = mtd->priv; 104 struct nand_chip *chip = mtd->priv;
170 105
171 /* De-select the NAND device */ 106 /* De-select the NAND device */
172 this->select_chip(mtd, -1); 107 chip->select_chip(mtd, -1);
173 108
174 if (this->controller) { 109 /* Release the controller and the chip */
175 /* Release the controller and the chip */ 110 spin_lock(&chip->controller->lock);
176 spin_lock(&this->controller->lock); 111 chip->controller->active = NULL;
177 this->controller->active = NULL; 112 chip->state = FL_READY;
178 this->state = FL_READY; 113 wake_up(&chip->controller->wq);
179 wake_up(&this->controller->wq); 114 spin_unlock(&chip->controller->lock);
180 spin_unlock(&this->controller->lock);
181 } else {
182 /* Release the chip */
183 spin_lock(&this->chip_lock);
184 this->state = FL_READY;
185 wake_up(&this->wq);
186 spin_unlock(&this->chip_lock);
187 }
188} 115}
189 116
190/** 117/**
@@ -193,23 +120,10 @@ static void nand_release_device (struct mtd_info *mtd)
193 * 120 *
194 * Default read function for 8bit buswith 121 * Default read function for 8bit buswith
195 */ 122 */
196static u_char nand_read_byte(struct mtd_info *mtd) 123static uint8_t nand_read_byte(struct mtd_info *mtd)
197{
198 struct nand_chip *this = mtd->priv;
199 return readb(this->IO_ADDR_R);
200}
201
202/**
203 * nand_write_byte - [DEFAULT] write one byte to the chip
204 * @mtd: MTD device structure
205 * @byte: pointer to data byte to write
206 *
207 * Default write function for 8it buswith
208 */
209static void nand_write_byte(struct mtd_info *mtd, u_char byte)
210{ 124{
211 struct nand_chip *this = mtd->priv; 125 struct nand_chip *chip = mtd->priv;
212 writeb(byte, this->IO_ADDR_W); 126 return readb(chip->IO_ADDR_R);
213} 127}
214 128
215/** 129/**
@@ -219,24 +133,10 @@ static void nand_write_byte(struct mtd_info *mtd, u_char byte)
219 * Default read function for 16bit buswith with 133 * Default read function for 16bit buswith with
220 * endianess conversion 134 * endianess conversion
221 */ 135 */
222static u_char nand_read_byte16(struct mtd_info *mtd) 136static uint8_t nand_read_byte16(struct mtd_info *mtd)
223{ 137{
224 struct nand_chip *this = mtd->priv; 138 struct nand_chip *chip = mtd->priv;
225 return (u_char) cpu_to_le16(readw(this->IO_ADDR_R)); 139 return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
226}
227
228/**
229 * nand_write_byte16 - [DEFAULT] write one byte endianess aware to the chip
230 * @mtd: MTD device structure
231 * @byte: pointer to data byte to write
232 *
233 * Default write function for 16bit buswith with
234 * endianess conversion
235 */
236static void nand_write_byte16(struct mtd_info *mtd, u_char byte)
237{
238 struct nand_chip *this = mtd->priv;
239 writew(le16_to_cpu((u16) byte), this->IO_ADDR_W);
240} 140}
241 141
242/** 142/**
@@ -248,22 +148,8 @@ static void nand_write_byte16(struct mtd_info *mtd, u_char byte)
248 */ 148 */
249static u16 nand_read_word(struct mtd_info *mtd) 149static u16 nand_read_word(struct mtd_info *mtd)
250{ 150{
251 struct nand_chip *this = mtd->priv; 151 struct nand_chip *chip = mtd->priv;
252 return readw(this->IO_ADDR_R); 152 return readw(chip->IO_ADDR_R);
253}
254
255/**
256 * nand_write_word - [DEFAULT] write one word to the chip
257 * @mtd: MTD device structure
258 * @word: data word to write
259 *
260 * Default write function for 16bit buswith without
261 * endianess conversion
262 */
263static void nand_write_word(struct mtd_info *mtd, u16 word)
264{
265 struct nand_chip *this = mtd->priv;
266 writew(word, this->IO_ADDR_W);
267} 153}
268 154
269/** 155/**
@@ -273,15 +159,15 @@ static void nand_write_word(struct mtd_info *mtd, u16 word)
273 * 159 *
274 * Default select function for 1 chip devices. 160 * Default select function for 1 chip devices.
275 */ 161 */
276static void nand_select_chip(struct mtd_info *mtd, int chip) 162static void nand_select_chip(struct mtd_info *mtd, int chipnr)
277{ 163{
278 struct nand_chip *this = mtd->priv; 164 struct nand_chip *chip = mtd->priv;
279 switch(chip) { 165
166 switch (chipnr) {
280 case -1: 167 case -1:
281 this->hwcontrol(mtd, NAND_CTL_CLRNCE); 168 chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
282 break; 169 break;
283 case 0: 170 case 0:
284 this->hwcontrol(mtd, NAND_CTL_SETNCE);
285 break; 171 break;
286 172
287 default: 173 default:
@@ -297,13 +183,13 @@ static void nand_select_chip(struct mtd_info *mtd, int chip)
297 * 183 *
298 * Default write function for 8bit buswith 184 * Default write function for 8bit buswith
299 */ 185 */
300static void nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) 186static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
301{ 187{
302 int i; 188 int i;
303 struct nand_chip *this = mtd->priv; 189 struct nand_chip *chip = mtd->priv;
304 190
305 for (i=0; i<len; i++) 191 for (i = 0; i < len; i++)
306 writeb(buf[i], this->IO_ADDR_W); 192 writeb(buf[i], chip->IO_ADDR_W);
307} 193}
308 194
309/** 195/**
@@ -314,13 +200,13 @@ static void nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
314 * 200 *
315 * Default read function for 8bit buswith 201 * Default read function for 8bit buswith
316 */ 202 */
317static void nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) 203static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
318{ 204{
319 int i; 205 int i;
320 struct nand_chip *this = mtd->priv; 206 struct nand_chip *chip = mtd->priv;
321 207
322 for (i=0; i<len; i++) 208 for (i = 0; i < len; i++)
323 buf[i] = readb(this->IO_ADDR_R); 209 buf[i] = readb(chip->IO_ADDR_R);
324} 210}
325 211
326/** 212/**
@@ -331,15 +217,14 @@ static void nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
331 * 217 *
332 * Default verify function for 8bit buswith 218 * Default verify function for 8bit buswith
333 */ 219 */
334static int nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) 220static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
335{ 221{
336 int i; 222 int i;
337 struct nand_chip *this = mtd->priv; 223 struct nand_chip *chip = mtd->priv;
338 224
339 for (i=0; i<len; i++) 225 for (i = 0; i < len; i++)
340 if (buf[i] != readb(this->IO_ADDR_R)) 226 if (buf[i] != readb(chip->IO_ADDR_R))
341 return -EFAULT; 227 return -EFAULT;
342
343 return 0; 228 return 0;
344} 229}
345 230
@@ -351,15 +236,15 @@ static int nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
351 * 236 *
352 * Default write function for 16bit buswith 237 * Default write function for 16bit buswith
353 */ 238 */
354static void nand_write_buf16(struct mtd_info *mtd, const u_char *buf, int len) 239static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
355{ 240{
356 int i; 241 int i;
357 struct nand_chip *this = mtd->priv; 242 struct nand_chip *chip = mtd->priv;
358 u16 *p = (u16 *) buf; 243 u16 *p = (u16 *) buf;
359 len >>= 1; 244 len >>= 1;
360 245
361 for (i=0; i<len; i++) 246 for (i = 0; i < len; i++)
362 writew(p[i], this->IO_ADDR_W); 247 writew(p[i], chip->IO_ADDR_W);
363 248
364} 249}
365 250
@@ -371,15 +256,15 @@ static void nand_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
371 * 256 *
372 * Default read function for 16bit buswith 257 * Default read function for 16bit buswith
373 */ 258 */
374static void nand_read_buf16(struct mtd_info *mtd, u_char *buf, int len) 259static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
375{ 260{
376 int i; 261 int i;
377 struct nand_chip *this = mtd->priv; 262 struct nand_chip *chip = mtd->priv;
378 u16 *p = (u16 *) buf; 263 u16 *p = (u16 *) buf;
379 len >>= 1; 264 len >>= 1;
380 265
381 for (i=0; i<len; i++) 266 for (i = 0; i < len; i++)
382 p[i] = readw(this->IO_ADDR_R); 267 p[i] = readw(chip->IO_ADDR_R);
383} 268}
384 269
385/** 270/**
@@ -390,15 +275,15 @@ static void nand_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
390 * 275 *
391 * Default verify function for 16bit buswith 276 * Default verify function for 16bit buswith
392 */ 277 */
393static int nand_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len) 278static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
394{ 279{
395 int i; 280 int i;
396 struct nand_chip *this = mtd->priv; 281 struct nand_chip *chip = mtd->priv;
397 u16 *p = (u16 *) buf; 282 u16 *p = (u16 *) buf;
398 len >>= 1; 283 len >>= 1;
399 284
400 for (i=0; i<len; i++) 285 for (i = 0; i < len; i++)
401 if (p[i] != readw(this->IO_ADDR_R)) 286 if (p[i] != readw(chip->IO_ADDR_R))
402 return -EFAULT; 287 return -EFAULT;
403 288
404 return 0; 289 return 0;
@@ -415,38 +300,37 @@ static int nand_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len)
415static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) 300static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
416{ 301{
417 int page, chipnr, res = 0; 302 int page, chipnr, res = 0;
418 struct nand_chip *this = mtd->priv; 303 struct nand_chip *chip = mtd->priv;
419 u16 bad; 304 u16 bad;
420 305
421 if (getchip) { 306 if (getchip) {
422 page = (int)(ofs >> this->page_shift); 307 page = (int)(ofs >> chip->page_shift);
423 chipnr = (int)(ofs >> this->chip_shift); 308 chipnr = (int)(ofs >> chip->chip_shift);
424 309
425 /* Grab the lock and see if the device is available */ 310 nand_get_device(chip, mtd, FL_READING);
426 nand_get_device (this, mtd, FL_READING);
427 311
428 /* Select the NAND device */ 312 /* Select the NAND device */
429 this->select_chip(mtd, chipnr); 313 chip->select_chip(mtd, chipnr);
430 } else 314 } else
431 page = (int) ofs; 315 page = (int)ofs;
432 316
433 if (this->options & NAND_BUSWIDTH_16) { 317 if (chip->options & NAND_BUSWIDTH_16) {
434 this->cmdfunc (mtd, NAND_CMD_READOOB, this->badblockpos & 0xFE, page & this->pagemask); 318 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos & 0xFE,
435 bad = cpu_to_le16(this->read_word(mtd)); 319 page & chip->pagemask);
436 if (this->badblockpos & 0x1) 320 bad = cpu_to_le16(chip->read_word(mtd));
321 if (chip->badblockpos & 0x1)
437 bad >>= 8; 322 bad >>= 8;
438 if ((bad & 0xFF) != 0xff) 323 if ((bad & 0xFF) != 0xff)
439 res = 1; 324 res = 1;
440 } else { 325 } else {
441 this->cmdfunc (mtd, NAND_CMD_READOOB, this->badblockpos, page & this->pagemask); 326 chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
442 if (this->read_byte(mtd) != 0xff) 327 page & chip->pagemask);
328 if (chip->read_byte(mtd) != 0xff)
443 res = 1; 329 res = 1;
444 } 330 }
445 331
446 if (getchip) { 332 if (getchip)
447 /* Deselect and wake up anyone waiting on the device */
448 nand_release_device(mtd); 333 nand_release_device(mtd);
449 }
450 334
451 return res; 335 return res;
452} 336}
@@ -461,23 +345,33 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip)
461*/ 345*/
462static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) 346static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
463{ 347{
464 struct nand_chip *this = mtd->priv; 348 struct nand_chip *chip = mtd->priv;
465 u_char buf[2] = {0, 0}; 349 uint8_t buf[2] = { 0, 0 };
466 size_t retlen; 350 int block, ret;
467 int block;
468 351
469 /* Get block number */ 352 /* Get block number */
470 block = ((int) ofs) >> this->bbt_erase_shift; 353 block = ((int)ofs) >> chip->bbt_erase_shift;
471 if (this->bbt) 354 if (chip->bbt)
472 this->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); 355 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
473 356
474 /* Do we have a flash based bad block table ? */ 357 /* Do we have a flash based bad block table ? */
475 if (this->options & NAND_USE_FLASH_BBT) 358 if (chip->options & NAND_USE_FLASH_BBT)
476 return nand_update_bbt (mtd, ofs); 359 ret = nand_update_bbt(mtd, ofs);
360 else {
361 /* We write two bytes, so we dont have to mess with 16 bit
362 * access
363 */
364 ofs += mtd->oobsize;
365 chip->ops.len = 2;
366 chip->ops.datbuf = NULL;
367 chip->ops.oobbuf = buf;
368 chip->ops.ooboffs = chip->badblockpos & ~0x01;
477 369
478 /* We write two bytes, so we dont have to mess with 16 bit access */ 370 ret = nand_do_write_oob(mtd, ofs, &chip->ops);
479 ofs += mtd->oobsize + (this->badblockpos & ~0x01); 371 }
480 return nand_write_oob (mtd, ofs , 2, &retlen, buf); 372 if (!ret)
373 mtd->ecc_stats.badblocks++;
374 return ret;
481} 375}
482 376
483/** 377/**
@@ -487,12 +381,12 @@ static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
487 * 381 *
488 * The function expects, that the device is already selected 382 * The function expects, that the device is already selected
489 */ 383 */
490static int nand_check_wp (struct mtd_info *mtd) 384static int nand_check_wp(struct mtd_info *mtd)
491{ 385{
492 struct nand_chip *this = mtd->priv; 386 struct nand_chip *chip = mtd->priv;
493 /* Check the WP bit */ 387 /* Check the WP bit */
494 this->cmdfunc (mtd, NAND_CMD_STATUS, -1, -1); 388 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
495 return (this->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1; 389 return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
496} 390}
497 391
498/** 392/**
@@ -505,32 +399,31 @@ static int nand_check_wp (struct mtd_info *mtd)
505 * Check, if the block is bad. Either by reading the bad block table or 399 * Check, if the block is bad. Either by reading the bad block table or
506 * calling of the scan function. 400 * calling of the scan function.
507 */ 401 */
508static int nand_block_checkbad (struct mtd_info *mtd, loff_t ofs, int getchip, int allowbbt) 402static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip,
403 int allowbbt)
509{ 404{
510 struct nand_chip *this = mtd->priv; 405 struct nand_chip *chip = mtd->priv;
511 406
512 if (!this->bbt) 407 if (!chip->bbt)
513 return this->block_bad(mtd, ofs, getchip); 408 return chip->block_bad(mtd, ofs, getchip);
514 409
515 /* Return info from the table */ 410 /* Return info from the table */
516 return nand_isbad_bbt (mtd, ofs, allowbbt); 411 return nand_isbad_bbt(mtd, ofs, allowbbt);
517} 412}
518 413
519DEFINE_LED_TRIGGER(nand_led_trigger);
520
521/* 414/*
522 * Wait for the ready pin, after a command 415 * Wait for the ready pin, after a command
523 * The timeout is catched later. 416 * The timeout is catched later.
524 */ 417 */
525static void nand_wait_ready(struct mtd_info *mtd) 418static void nand_wait_ready(struct mtd_info *mtd)
526{ 419{
527 struct nand_chip *this = mtd->priv; 420 struct nand_chip *chip = mtd->priv;
528 unsigned long timeo = jiffies + 2; 421 unsigned long timeo = jiffies + 2;
529 422
530 led_trigger_event(nand_led_trigger, LED_FULL); 423 led_trigger_event(nand_led_trigger, LED_FULL);
531 /* wait until command is processed or timeout occures */ 424 /* wait until command is processed or timeout occures */
532 do { 425 do {
533 if (this->dev_ready(mtd)) 426 if (chip->dev_ready(mtd))
534 break; 427 break;
535 touch_softlockup_watchdog(); 428 touch_softlockup_watchdog();
536 } while (time_before(jiffies, timeo)); 429 } while (time_before(jiffies, timeo));
@@ -547,21 +440,21 @@ static void nand_wait_ready(struct mtd_info *mtd)
547 * Send command to NAND device. This function is used for small page 440 * Send command to NAND device. This function is used for small page
548 * devices (256/512 Bytes per page) 441 * devices (256/512 Bytes per page)
549 */ 442 */
550static void nand_command (struct mtd_info *mtd, unsigned command, int column, int page_addr) 443static void nand_command(struct mtd_info *mtd, unsigned int command,
444 int column, int page_addr)
551{ 445{
552 register struct nand_chip *this = mtd->priv; 446 register struct nand_chip *chip = mtd->priv;
447 int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
553 448
554 /* Begin command latch cycle */
555 this->hwcontrol(mtd, NAND_CTL_SETCLE);
556 /* 449 /*
557 * Write out the command to the device. 450 * Write out the command to the device.
558 */ 451 */
559 if (command == NAND_CMD_SEQIN) { 452 if (command == NAND_CMD_SEQIN) {
560 int readcmd; 453 int readcmd;
561 454
562 if (column >= mtd->oobblock) { 455 if (column >= mtd->writesize) {
563 /* OOB area */ 456 /* OOB area */
564 column -= mtd->oobblock; 457 column -= mtd->writesize;
565 readcmd = NAND_CMD_READOOB; 458 readcmd = NAND_CMD_READOOB;
566 } else if (column < 256) { 459 } else if (column < 256) {
567 /* First 256 bytes --> READ0 */ 460 /* First 256 bytes --> READ0 */
@@ -570,38 +463,37 @@ static void nand_command (struct mtd_info *mtd, unsigned command, int column, in
570 column -= 256; 463 column -= 256;
571 readcmd = NAND_CMD_READ1; 464 readcmd = NAND_CMD_READ1;
572 } 465 }
573 this->write_byte(mtd, readcmd); 466 chip->cmd_ctrl(mtd, readcmd, ctrl);
467 ctrl &= ~NAND_CTRL_CHANGE;
574 } 468 }
575 this->write_byte(mtd, command); 469 chip->cmd_ctrl(mtd, command, ctrl);
576
577 /* Set ALE and clear CLE to start address cycle */
578 this->hwcontrol(mtd, NAND_CTL_CLRCLE);
579 470
580 if (column != -1 || page_addr != -1) { 471 /*
581 this->hwcontrol(mtd, NAND_CTL_SETALE); 472 * Address cycle, when necessary
582 473 */
583 /* Serially input address */ 474 ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
584 if (column != -1) { 475 /* Serially input address */
585 /* Adjust columns for 16 bit buswidth */ 476 if (column != -1) {
586 if (this->options & NAND_BUSWIDTH_16) 477 /* Adjust columns for 16 bit buswidth */
587 column >>= 1; 478 if (chip->options & NAND_BUSWIDTH_16)
588 this->write_byte(mtd, column); 479 column >>= 1;
589 } 480 chip->cmd_ctrl(mtd, column, ctrl);
590 if (page_addr != -1) { 481 ctrl &= ~NAND_CTRL_CHANGE;
591 this->write_byte(mtd, (unsigned char) (page_addr & 0xff)); 482 }
592 this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff)); 483 if (page_addr != -1) {
593 /* One more address cycle for devices > 32MiB */ 484 chip->cmd_ctrl(mtd, page_addr, ctrl);
594 if (this->chipsize > (32 << 20)) 485 ctrl &= ~NAND_CTRL_CHANGE;
595 this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0x0f)); 486 chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
596 } 487 /* One more address cycle for devices > 32MiB */
597 /* Latch in address */ 488 if (chip->chipsize > (32 << 20))
598 this->hwcontrol(mtd, NAND_CTL_CLRALE); 489 chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
599 } 490 }
491 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
600 492
601 /* 493 /*
602 * program and erase have their own busy handlers 494 * program and erase have their own busy handlers
603 * status and sequential in needs no delay 495 * status and sequential in needs no delay
604 */ 496 */
605 switch (command) { 497 switch (command) {
606 498
607 case NAND_CMD_PAGEPROG: 499 case NAND_CMD_PAGEPROG:
@@ -612,29 +504,30 @@ static void nand_command (struct mtd_info *mtd, unsigned command, int column, in
612 return; 504 return;
613 505
614 case NAND_CMD_RESET: 506 case NAND_CMD_RESET:
615 if (this->dev_ready) 507 if (chip->dev_ready)
616 break; 508 break;
617 udelay(this->chip_delay); 509 udelay(chip->chip_delay);
618 this->hwcontrol(mtd, NAND_CTL_SETCLE); 510 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
619 this->write_byte(mtd, NAND_CMD_STATUS); 511 NAND_CTRL_CLE | NAND_CTRL_CHANGE);
620 this->hwcontrol(mtd, NAND_CTL_CLRCLE); 512 chip->cmd_ctrl(mtd,
621 while ( !(this->read_byte(mtd) & NAND_STATUS_READY)); 513 NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
514 while (!(chip->read_byte(mtd) & NAND_STATUS_READY)) ;
622 return; 515 return;
623 516
624 /* This applies to read commands */ 517 /* This applies to read commands */
625 default: 518 default:
626 /* 519 /*
627 * If we don't have access to the busy pin, we apply the given 520 * If we don't have access to the busy pin, we apply the given
628 * command delay 521 * command delay
629 */ 522 */
630 if (!this->dev_ready) { 523 if (!chip->dev_ready) {
631 udelay (this->chip_delay); 524 udelay(chip->chip_delay);
632 return; 525 return;
633 } 526 }
634 } 527 }
635 /* Apply this short delay always to ensure that we do wait tWB in 528 /* Apply this short delay always to ensure that we do wait tWB in
636 * any case on any machine. */ 529 * any case on any machine. */
637 ndelay (100); 530 ndelay(100);
638 531
639 nand_wait_ready(mtd); 532 nand_wait_ready(mtd);
640} 533}
@@ -646,50 +539,49 @@ static void nand_command (struct mtd_info *mtd, unsigned command, int column, in
646 * @column: the column address for this command, -1 if none 539 * @column: the column address for this command, -1 if none
647 * @page_addr: the page address for this command, -1 if none 540 * @page_addr: the page address for this command, -1 if none
648 * 541 *
649 * Send command to NAND device. This is the version for the new large page devices 542 * Send command to NAND device. This is the version for the new large page
650 * We dont have the seperate regions as we have in the small page devices. 543 * devices We dont have the separate regions as we have in the small page
651 * We must emulate NAND_CMD_READOOB to keep the code compatible. 544 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
652 * 545 *
653 */ 546 */
654static void nand_command_lp (struct mtd_info *mtd, unsigned command, int column, int page_addr) 547static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
548 int column, int page_addr)
655{ 549{
656 register struct nand_chip *this = mtd->priv; 550 register struct nand_chip *chip = mtd->priv;
657 551
658 /* Emulate NAND_CMD_READOOB */ 552 /* Emulate NAND_CMD_READOOB */
659 if (command == NAND_CMD_READOOB) { 553 if (command == NAND_CMD_READOOB) {
660 column += mtd->oobblock; 554 column += mtd->writesize;
661 command = NAND_CMD_READ0; 555 command = NAND_CMD_READ0;
662 } 556 }
663 557
664 558 /* Command latch cycle */
665 /* Begin command latch cycle */ 559 chip->cmd_ctrl(mtd, command & 0xff,
666 this->hwcontrol(mtd, NAND_CTL_SETCLE); 560 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
667 /* Write out the command to the device. */
668 this->write_byte(mtd, (command & 0xff));
669 /* End command latch cycle */
670 this->hwcontrol(mtd, NAND_CTL_CLRCLE);
671 561
672 if (column != -1 || page_addr != -1) { 562 if (column != -1 || page_addr != -1) {
673 this->hwcontrol(mtd, NAND_CTL_SETALE); 563 int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
674 564
675 /* Serially input address */ 565 /* Serially input address */
676 if (column != -1) { 566 if (column != -1) {
677 /* Adjust columns for 16 bit buswidth */ 567 /* Adjust columns for 16 bit buswidth */
678 if (this->options & NAND_BUSWIDTH_16) 568 if (chip->options & NAND_BUSWIDTH_16)
679 column >>= 1; 569 column >>= 1;
680 this->write_byte(mtd, column & 0xff); 570 chip->cmd_ctrl(mtd, column, ctrl);
681 this->write_byte(mtd, column >> 8); 571 ctrl &= ~NAND_CTRL_CHANGE;
572 chip->cmd_ctrl(mtd, column >> 8, ctrl);
682 } 573 }
683 if (page_addr != -1) { 574 if (page_addr != -1) {
684 this->write_byte(mtd, (unsigned char) (page_addr & 0xff)); 575 chip->cmd_ctrl(mtd, page_addr, ctrl);
685 this->write_byte(mtd, (unsigned char) ((page_addr >> 8) & 0xff)); 576 chip->cmd_ctrl(mtd, page_addr >> 8,
577 NAND_NCE | NAND_ALE);
686 /* One more address cycle for devices > 128MiB */ 578 /* One more address cycle for devices > 128MiB */
687 if (this->chipsize > (128 << 20)) 579 if (chip->chipsize > (128 << 20))
688 this->write_byte(mtd, (unsigned char) ((page_addr >> 16) & 0xff)); 580 chip->cmd_ctrl(mtd, page_addr >> 16,
581 NAND_NCE | NAND_ALE);
689 } 582 }
690 /* Latch in address */
691 this->hwcontrol(mtd, NAND_CTL_CLRALE);
692 } 583 }
584 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
693 585
694 /* 586 /*
695 * program and erase have their own busy handlers 587 * program and erase have their own busy handlers
@@ -702,55 +594,62 @@ static void nand_command_lp (struct mtd_info *mtd, unsigned command, int column,
702 case NAND_CMD_ERASE1: 594 case NAND_CMD_ERASE1:
703 case NAND_CMD_ERASE2: 595 case NAND_CMD_ERASE2:
704 case NAND_CMD_SEQIN: 596 case NAND_CMD_SEQIN:
597 case NAND_CMD_RNDIN:
705 case NAND_CMD_STATUS: 598 case NAND_CMD_STATUS:
706 case NAND_CMD_DEPLETE1: 599 case NAND_CMD_DEPLETE1:
707 return; 600 return;
708 601
709 /* 602 /*
710 * read error status commands require only a short delay 603 * read error status commands require only a short delay
711 */ 604 */
712 case NAND_CMD_STATUS_ERROR: 605 case NAND_CMD_STATUS_ERROR:
713 case NAND_CMD_STATUS_ERROR0: 606 case NAND_CMD_STATUS_ERROR0:
714 case NAND_CMD_STATUS_ERROR1: 607 case NAND_CMD_STATUS_ERROR1:
715 case NAND_CMD_STATUS_ERROR2: 608 case NAND_CMD_STATUS_ERROR2:
716 case NAND_CMD_STATUS_ERROR3: 609 case NAND_CMD_STATUS_ERROR3:
717 udelay(this->chip_delay); 610 udelay(chip->chip_delay);
718 return; 611 return;
719 612
720 case NAND_CMD_RESET: 613 case NAND_CMD_RESET:
721 if (this->dev_ready) 614 if (chip->dev_ready)
722 break; 615 break;
723 udelay(this->chip_delay); 616 udelay(chip->chip_delay);
724 this->hwcontrol(mtd, NAND_CTL_SETCLE); 617 chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
725 this->write_byte(mtd, NAND_CMD_STATUS); 618 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
726 this->hwcontrol(mtd, NAND_CTL_CLRCLE); 619 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
727 while ( !(this->read_byte(mtd) & NAND_STATUS_READY)); 620 NAND_NCE | NAND_CTRL_CHANGE);
621 while (!(chip->read_byte(mtd) & NAND_STATUS_READY)) ;
622 return;
623
624 case NAND_CMD_RNDOUT:
625 /* No ready / busy check necessary */
626 chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
627 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
628 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
629 NAND_NCE | NAND_CTRL_CHANGE);
728 return; 630 return;
729 631
730 case NAND_CMD_READ0: 632 case NAND_CMD_READ0:
731 /* Begin command latch cycle */ 633 chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
732 this->hwcontrol(mtd, NAND_CTL_SETCLE); 634 NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
733 /* Write out the start read command */ 635 chip->cmd_ctrl(mtd, NAND_CMD_NONE,
734 this->write_byte(mtd, NAND_CMD_READSTART); 636 NAND_NCE | NAND_CTRL_CHANGE);
735 /* End command latch cycle */ 637
736 this->hwcontrol(mtd, NAND_CTL_CLRCLE); 638 /* This applies to read commands */
737 /* Fall through into ready check */
738
739 /* This applies to read commands */
740 default: 639 default:
741 /* 640 /*
742 * If we don't have access to the busy pin, we apply the given 641 * If we don't have access to the busy pin, we apply the given
743 * command delay 642 * command delay
744 */ 643 */
745 if (!this->dev_ready) { 644 if (!chip->dev_ready) {
746 udelay (this->chip_delay); 645 udelay(chip->chip_delay);
747 return; 646 return;
748 } 647 }
749 } 648 }
750 649
751 /* Apply this short delay always to ensure that we do wait tWB in 650 /* Apply this short delay always to ensure that we do wait tWB in
752 * any case on any machine. */ 651 * any case on any machine. */
753 ndelay (100); 652 ndelay(100);
754 653
755 nand_wait_ready(mtd); 654 nand_wait_ready(mtd);
756} 655}
@@ -763,34 +662,28 @@ static void nand_command_lp (struct mtd_info *mtd, unsigned command, int column,
763 * 662 *
764 * Get the device and lock it for exclusive access 663 * Get the device and lock it for exclusive access
765 */ 664 */
766static int nand_get_device (struct nand_chip *this, struct mtd_info *mtd, int new_state) 665static int
666nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state)
767{ 667{
768 struct nand_chip *active; 668 spinlock_t *lock = &chip->controller->lock;
769 spinlock_t *lock; 669 wait_queue_head_t *wq = &chip->controller->wq;
770 wait_queue_head_t *wq; 670 DECLARE_WAITQUEUE(wait, current);
771 DECLARE_WAITQUEUE (wait, current); 671 retry:
772
773 lock = (this->controller) ? &this->controller->lock : &this->chip_lock;
774 wq = (this->controller) ? &this->controller->wq : &this->wq;
775retry:
776 active = this;
777 spin_lock(lock); 672 spin_lock(lock);
778 673
779 /* Hardware controller shared among independend devices */ 674 /* Hardware controller shared among independend devices */
780 if (this->controller) { 675 /* Hardware controller shared among independend devices */
781 if (this->controller->active) 676 if (!chip->controller->active)
782 active = this->controller->active; 677 chip->controller->active = chip;
783 else 678
784 this->controller->active = this; 679 if (chip->controller->active == chip && chip->state == FL_READY) {
785 } 680 chip->state = new_state;
786 if (active == this && this->state == FL_READY) {
787 this->state = new_state;
788 spin_unlock(lock); 681 spin_unlock(lock);
789 return 0; 682 return 0;
790 } 683 }
791 if (new_state == FL_PM_SUSPENDED) { 684 if (new_state == FL_PM_SUSPENDED) {
792 spin_unlock(lock); 685 spin_unlock(lock);
793 return (this->state == FL_PM_SUSPENDED) ? 0 : -EAGAIN; 686 return (chip->state == FL_PM_SUSPENDED) ? 0 : -EAGAIN;
794 } 687 }
795 set_current_state(TASK_UNINTERRUPTIBLE); 688 set_current_state(TASK_UNINTERRUPTIBLE);
796 add_wait_queue(wq, &wait); 689 add_wait_queue(wq, &wait);
@@ -804,540 +697,339 @@ retry:
804 * nand_wait - [DEFAULT] wait until the command is done 697 * nand_wait - [DEFAULT] wait until the command is done
805 * @mtd: MTD device structure 698 * @mtd: MTD device structure
806 * @this: NAND chip structure 699 * @this: NAND chip structure
807 * @state: state to select the max. timeout value
808 * 700 *
809 * Wait for command done. This applies to erase and program only 701 * Wait for command done. This applies to erase and program only
810 * Erase can take up to 400ms and program up to 20ms according to 702 * Erase can take up to 400ms and program up to 20ms according to
811 * general NAND and SmartMedia specs 703 * general NAND and SmartMedia specs
812 * 704 *
813*/ 705*/
814static int nand_wait(struct mtd_info *mtd, struct nand_chip *this, int state) 706static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
815{ 707{
816 708
817 unsigned long timeo = jiffies; 709 unsigned long timeo = jiffies;
818 int status; 710 int status, state = chip->state;
819 711
820 if (state == FL_ERASING) 712 if (state == FL_ERASING)
821 timeo += (HZ * 400) / 1000; 713 timeo += (HZ * 400) / 1000;
822 else 714 else
823 timeo += (HZ * 20) / 1000; 715 timeo += (HZ * 20) / 1000;
824 716
825 led_trigger_event(nand_led_trigger, LED_FULL); 717 led_trigger_event(nand_led_trigger, LED_FULL);
826 718
827 /* Apply this short delay always to ensure that we do wait tWB in 719 /* Apply this short delay always to ensure that we do wait tWB in
828 * any case on any machine. */ 720 * any case on any machine. */
829 ndelay (100); 721 ndelay(100);
830 722
831 if ((state == FL_ERASING) && (this->options & NAND_IS_AND)) 723 if ((state == FL_ERASING) && (chip->options & NAND_IS_AND))
832 this->cmdfunc (mtd, NAND_CMD_STATUS_MULTI, -1, -1); 724 chip->cmdfunc(mtd, NAND_CMD_STATUS_MULTI, -1, -1);
833 else 725 else
834 this->cmdfunc (mtd, NAND_CMD_STATUS, -1, -1); 726 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
835 727
836 while (time_before(jiffies, timeo)) { 728 while (time_before(jiffies, timeo)) {
837 /* Check, if we were interrupted */ 729 if (chip->dev_ready) {
838 if (this->state != state) 730 if (chip->dev_ready(mtd))
839 return 0;
840
841 if (this->dev_ready) {
842 if (this->dev_ready(mtd))
843 break; 731 break;
844 } else { 732 } else {
845 if (this->read_byte(mtd) & NAND_STATUS_READY) 733 if (chip->read_byte(mtd) & NAND_STATUS_READY)
846 break; 734 break;
847 } 735 }
848 cond_resched(); 736 cond_resched();
849 } 737 }
850 led_trigger_event(nand_led_trigger, LED_OFF); 738 led_trigger_event(nand_led_trigger, LED_OFF);
851 739
852 status = (int) this->read_byte(mtd); 740 status = (int)chip->read_byte(mtd);
853 return status; 741 return status;
854} 742}
855 743
856/** 744/**
857 * nand_write_page - [GENERIC] write one page 745 * nand_read_page_raw - [Intern] read raw page data without ecc
858 * @mtd: MTD device structure 746 * @mtd: mtd info structure
859 * @this: NAND chip structure 747 * @chip: nand chip info structure
860 * @page: startpage inside the chip, must be called with (page & this->pagemask) 748 * @buf: buffer to store read data
861 * @oob_buf: out of band data buffer
862 * @oobsel: out of band selecttion structre
863 * @cached: 1 = enable cached programming if supported by chip
864 *
865 * Nand_page_program function is used for write and writev !
866 * This function will always program a full page of data
867 * If you call it with a non page aligned buffer, you're lost :)
868 *
869 * Cached programming is not supported yet.
870 */ 749 */
871static int nand_write_page (struct mtd_info *mtd, struct nand_chip *this, int page, 750static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
872 u_char *oob_buf, struct nand_oobinfo *oobsel, int cached) 751 uint8_t *buf)
873{ 752{
874 int i, status; 753 chip->read_buf(mtd, buf, mtd->writesize);
875 u_char ecc_code[32]; 754 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
876 int eccmode = oobsel->useecc ? this->eccmode : NAND_ECC_NONE; 755 return 0;
877 int *oob_config = oobsel->eccpos; 756}
878 int datidx = 0, eccidx = 0, eccsteps = this->eccsteps;
879 int eccbytes = 0;
880
881 /* FIXME: Enable cached programming */
882 cached = 0;
883 757
884 /* Send command to begin auto page programming */ 758/**
885 this->cmdfunc (mtd, NAND_CMD_SEQIN, 0x00, page); 759 * nand_read_page_swecc - {REPLACABLE] software ecc based page read function
760 * @mtd: mtd info structure
761 * @chip: nand chip info structure
762 * @buf: buffer to store read data
763 */
764static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
765 uint8_t *buf)
766{
767 int i, eccsize = chip->ecc.size;
768 int eccbytes = chip->ecc.bytes;
769 int eccsteps = chip->ecc.steps;
770 uint8_t *p = buf;
771 uint8_t *ecc_calc = chip->buffers.ecccalc;
772 uint8_t *ecc_code = chip->buffers.ecccode;
773 int *eccpos = chip->ecc.layout->eccpos;
886 774
887 /* Write out complete page of data, take care of eccmode */ 775 nand_read_page_raw(mtd, chip, buf);
888 switch (eccmode) {
889 /* No ecc, write all */
890 case NAND_ECC_NONE:
891 printk (KERN_WARNING "Writing data without ECC to NAND-FLASH is not recommended\n");
892 this->write_buf(mtd, this->data_poi, mtd->oobblock);
893 break;
894 776
895 /* Software ecc 3/256, write all */ 777 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
896 case NAND_ECC_SOFT: 778 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
897 for (; eccsteps; eccsteps--) {
898 this->calculate_ecc(mtd, &this->data_poi[datidx], ecc_code);
899 for (i = 0; i < 3; i++, eccidx++)
900 oob_buf[oob_config[eccidx]] = ecc_code[i];
901 datidx += this->eccsize;
902 }
903 this->write_buf(mtd, this->data_poi, mtd->oobblock);
904 break;
905 default:
906 eccbytes = this->eccbytes;
907 for (; eccsteps; eccsteps--) {
908 /* enable hardware ecc logic for write */
909 this->enable_hwecc(mtd, NAND_ECC_WRITE);
910 this->write_buf(mtd, &this->data_poi[datidx], this->eccsize);
911 this->calculate_ecc(mtd, &this->data_poi[datidx], ecc_code);
912 for (i = 0; i < eccbytes; i++, eccidx++)
913 oob_buf[oob_config[eccidx]] = ecc_code[i];
914 /* If the hardware ecc provides syndromes then
915 * the ecc code must be written immidiately after
916 * the data bytes (words) */
917 if (this->options & NAND_HWECC_SYNDROME)
918 this->write_buf(mtd, ecc_code, eccbytes);
919 datidx += this->eccsize;
920 }
921 break;
922 }
923 779
924 /* Write out OOB data */ 780 for (i = 0; i < chip->ecc.total; i++)
925 if (this->options & NAND_HWECC_SYNDROME) 781 ecc_code[i] = chip->oob_poi[eccpos[i]];
926 this->write_buf(mtd, &oob_buf[oobsel->eccbytes], mtd->oobsize - oobsel->eccbytes);
927 else
928 this->write_buf(mtd, oob_buf, mtd->oobsize);
929 782
930 /* Send command to actually program the data */ 783 eccsteps = chip->ecc.steps;
931 this->cmdfunc (mtd, cached ? NAND_CMD_CACHEDPROG : NAND_CMD_PAGEPROG, -1, -1); 784 p = buf;
932 785
933 if (!cached) { 786 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
934 /* call wait ready function */ 787 int stat;
935 status = this->waitfunc (mtd, this, FL_WRITING);
936 788
937 /* See if operation failed and additional status checks are available */ 789 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
938 if ((status & NAND_STATUS_FAIL) && (this->errstat)) { 790 if (stat == -1)
939 status = this->errstat(mtd, this, FL_WRITING, status, page); 791 mtd->ecc_stats.failed++;
940 } 792 else
941 793 mtd->ecc_stats.corrected += stat;
942 /* See if device thinks it succeeded */
943 if (status & NAND_STATUS_FAIL) {
944 DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write, page 0x%08x, ", __FUNCTION__, page);
945 return -EIO;
946 }
947 } else {
948 /* FIXME: Implement cached programming ! */
949 /* wait until cache is ready*/
950 // status = this->waitfunc (mtd, this, FL_CACHEDRPG);
951 } 794 }
952 return 0; 795 return 0;
953} 796}
954 797
955#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
956/** 798/**
957 * nand_verify_pages - [GENERIC] verify the chip contents after a write 799 * nand_read_page_hwecc - {REPLACABLE] hardware ecc based page read function
958 * @mtd: MTD device structure 800 * @mtd: mtd info structure
959 * @this: NAND chip structure 801 * @chip: nand chip info structure
960 * @page: startpage inside the chip, must be called with (page & this->pagemask) 802 * @buf: buffer to store read data
961 * @numpages: number of pages to verify
962 * @oob_buf: out of band data buffer
963 * @oobsel: out of band selecttion structre
964 * @chipnr: number of the current chip
965 * @oobmode: 1 = full buffer verify, 0 = ecc only
966 * 803 *
967 * The NAND device assumes that it is always writing to a cleanly erased page. 804 * Not for syndrome calculating ecc controllers which need a special oob layout
968 * Hence, it performs its internal write verification only on bits that
969 * transitioned from 1 to 0. The device does NOT verify the whole page on a
970 * byte by byte basis. It is possible that the page was not completely erased
971 * or the page is becoming unusable due to wear. The read with ECC would catch
972 * the error later when the ECC page check fails, but we would rather catch
973 * it early in the page write stage. Better to write no data than invalid data.
974 */ 805 */
975static int nand_verify_pages (struct mtd_info *mtd, struct nand_chip *this, int page, int numpages, 806static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
976 u_char *oob_buf, struct nand_oobinfo *oobsel, int chipnr, int oobmode) 807 uint8_t *buf)
977{ 808{
978 int i, j, datidx = 0, oobofs = 0, res = -EIO; 809 int i, eccsize = chip->ecc.size;
979 int eccsteps = this->eccsteps; 810 int eccbytes = chip->ecc.bytes;
980 int hweccbytes; 811 int eccsteps = chip->ecc.steps;
981 u_char oobdata[64]; 812 uint8_t *p = buf;
982 813 uint8_t *ecc_calc = chip->buffers.ecccalc;
983 hweccbytes = (this->options & NAND_HWECC_SYNDROME) ? (oobsel->eccbytes / eccsteps) : 0; 814 uint8_t *ecc_code = chip->buffers.ecccode;
984 815 int *eccpos = chip->ecc.layout->eccpos;
985 /* Send command to read back the first page */ 816
986 this->cmdfunc (mtd, NAND_CMD_READ0, 0, page); 817 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
987 818 chip->ecc.hwctl(mtd, NAND_ECC_READ);
988 for(;;) { 819 chip->read_buf(mtd, p, eccsize);
989 for (j = 0; j < eccsteps; j++) { 820 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
990 /* Loop through and verify the data */ 821 }
991 if (this->verify_buf(mtd, &this->data_poi[datidx], mtd->eccsize)) { 822 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
992 DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write verify, page 0x%08x ", __FUNCTION__, page);
993 goto out;
994 }
995 datidx += mtd->eccsize;
996 /* Have we a hw generator layout ? */
997 if (!hweccbytes)
998 continue;
999 if (this->verify_buf(mtd, &this->oob_buf[oobofs], hweccbytes)) {
1000 DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write verify, page 0x%08x ", __FUNCTION__, page);
1001 goto out;
1002 }
1003 oobofs += hweccbytes;
1004 }
1005 823
1006 /* check, if we must compare all data or if we just have to 824 for (i = 0; i < chip->ecc.total; i++)
1007 * compare the ecc bytes 825 ecc_code[i] = chip->oob_poi[eccpos[i]];
1008 */
1009 if (oobmode) {
1010 if (this->verify_buf(mtd, &oob_buf[oobofs], mtd->oobsize - hweccbytes * eccsteps)) {
1011 DEBUG (MTD_DEBUG_LEVEL0, "%s: " "Failed write verify, page 0x%08x ", __FUNCTION__, page);
1012 goto out;
1013 }
1014 } else {
1015 /* Read always, else autoincrement fails */
1016 this->read_buf(mtd, oobdata, mtd->oobsize - hweccbytes * eccsteps);
1017
1018 if (oobsel->useecc != MTD_NANDECC_OFF && !hweccbytes) {
1019 int ecccnt = oobsel->eccbytes;
1020
1021 for (i = 0; i < ecccnt; i++) {
1022 int idx = oobsel->eccpos[i];
1023 if (oobdata[idx] != oob_buf[oobofs + idx] ) {
1024 DEBUG (MTD_DEBUG_LEVEL0,
1025 "%s: Failed ECC write "
1026 "verify, page 0x%08x, " "%6i bytes were succesful\n", __FUNCTION__, page, i);
1027 goto out;
1028 }
1029 }
1030 }
1031 }
1032 oobofs += mtd->oobsize - hweccbytes * eccsteps;
1033 page++;
1034 numpages--;
1035
1036 /* Apply delay or wait for ready/busy pin
1037 * Do this before the AUTOINCR check, so no problems
1038 * arise if a chip which does auto increment
1039 * is marked as NOAUTOINCR by the board driver.
1040 * Do this also before returning, so the chip is
1041 * ready for the next command.
1042 */
1043 if (!this->dev_ready)
1044 udelay (this->chip_delay);
1045 else
1046 nand_wait_ready(mtd);
1047 826
1048 /* All done, return happy */ 827 eccsteps = chip->ecc.steps;
1049 if (!numpages) 828 p = buf;
1050 return 0;
1051 829
830 for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
831 int stat;
1052 832
1053 /* Check, if the chip supports auto page increment */ 833 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1054 if (!NAND_CANAUTOINCR(this)) 834 if (stat == -1)
1055 this->cmdfunc (mtd, NAND_CMD_READ0, 0x00, page); 835 mtd->ecc_stats.failed++;
836 else
837 mtd->ecc_stats.corrected += stat;
1056 } 838 }
1057 /* 839 return 0;
1058 * Terminate the read command. We come here in case of an error
1059 * So we must issue a reset command.
1060 */
1061out:
1062 this->cmdfunc (mtd, NAND_CMD_RESET, -1, -1);
1063 return res;
1064} 840}
1065#endif
1066 841
1067/** 842/**
1068 * nand_read - [MTD Interface] MTD compability function for nand_do_read_ecc 843 * nand_read_page_syndrome - {REPLACABLE] hardware ecc syndrom based page read
1069 * @mtd: MTD device structure 844 * @mtd: mtd info structure
1070 * @from: offset to read from 845 * @chip: nand chip info structure
1071 * @len: number of bytes to read 846 * @buf: buffer to store read data
1072 * @retlen: pointer to variable to store the number of read bytes
1073 * @buf: the databuffer to put data
1074 * 847 *
1075 * This function simply calls nand_do_read_ecc with oob buffer and oobsel = NULL 848 * The hw generator calculates the error syndrome automatically. Therefor
1076 * and flags = 0xff 849 * we need a special oob layout and handling.
1077 */ 850 */
1078static int nand_read (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf) 851static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
852 uint8_t *buf)
1079{ 853{
1080 return nand_do_read_ecc (mtd, from, len, retlen, buf, NULL, &mtd->oobinfo, 0xff); 854 int i, eccsize = chip->ecc.size;
1081} 855 int eccbytes = chip->ecc.bytes;
856 int eccsteps = chip->ecc.steps;
857 uint8_t *p = buf;
858 uint8_t *oob = chip->oob_poi;
859
860 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
861 int stat;
862
863 chip->ecc.hwctl(mtd, NAND_ECC_READ);
864 chip->read_buf(mtd, p, eccsize);
865
866 if (chip->ecc.prepad) {
867 chip->read_buf(mtd, oob, chip->ecc.prepad);
868 oob += chip->ecc.prepad;
869 }
870
871 chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
872 chip->read_buf(mtd, oob, eccbytes);
873 stat = chip->ecc.correct(mtd, p, oob, NULL);
874
875 if (stat == -1)
876 mtd->ecc_stats.failed++;
877 else
878 mtd->ecc_stats.corrected += stat;
879
880 oob += eccbytes;
881
882 if (chip->ecc.postpad) {
883 chip->read_buf(mtd, oob, chip->ecc.postpad);
884 oob += chip->ecc.postpad;
885 }
886 }
887
888 /* Calculate remaining oob bytes */
889 i = mtd->oobsize - (oob - chip->oob_poi);
890 if (i)
891 chip->read_buf(mtd, oob, i);
1082 892
893 return 0;
894}
1083 895
1084/** 896/**
1085 * nand_read_ecc - [MTD Interface] MTD compability function for nand_do_read_ecc 897 * nand_transfer_oob - [Internal] Transfer oob to client buffer
1086 * @mtd: MTD device structure 898 * @chip: nand chip structure
1087 * @from: offset to read from 899 * @ops: oob ops structure
1088 * @len: number of bytes to read
1089 * @retlen: pointer to variable to store the number of read bytes
1090 * @buf: the databuffer to put data
1091 * @oob_buf: filesystem supplied oob data buffer
1092 * @oobsel: oob selection structure
1093 *
1094 * This function simply calls nand_do_read_ecc with flags = 0xff
1095 */ 900 */
1096static int nand_read_ecc (struct mtd_info *mtd, loff_t from, size_t len, 901static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
1097 size_t * retlen, u_char * buf, u_char * oob_buf, struct nand_oobinfo *oobsel) 902 struct mtd_oob_ops *ops)
1098{ 903{
1099 /* use userspace supplied oobinfo, if zero */ 904 size_t len = ops->ooblen;
1100 if (oobsel == NULL) 905
1101 oobsel = &mtd->oobinfo; 906 switch(ops->mode) {
1102 return nand_do_read_ecc(mtd, from, len, retlen, buf, oob_buf, oobsel, 0xff); 907
908 case MTD_OOB_PLACE:
909 case MTD_OOB_RAW:
910 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
911 return oob + len;
912
913 case MTD_OOB_AUTO: {
914 struct nand_oobfree *free = chip->ecc.layout->oobfree;
915 uint32_t boffs = 0, roffs = ops->ooboffs;
916 size_t bytes = 0;
917
918 for(; free->length && len; free++, len -= bytes) {
919 /* Read request not from offset 0 ? */
920 if (unlikely(roffs)) {
921 if (roffs >= free->length) {
922 roffs -= free->length;
923 continue;
924 }
925 boffs = free->offset + roffs;
926 bytes = min_t(size_t, len,
927 (free->length - roffs));
928 roffs = 0;
929 } else {
930 bytes = min_t(size_t, len, free->length);
931 boffs = free->offset;
932 }
933 memcpy(oob, chip->oob_poi + boffs, bytes);
934 oob += bytes;
935 }
936 return oob;
937 }
938 default:
939 BUG();
940 }
941 return NULL;
1103} 942}
1104 943
1105
1106/** 944/**
1107 * nand_do_read_ecc - [MTD Interface] Read data with ECC 945 * nand_do_read_ops - [Internal] Read data with ECC
946 *
1108 * @mtd: MTD device structure 947 * @mtd: MTD device structure
1109 * @from: offset to read from 948 * @from: offset to read from
1110 * @len: number of bytes to read
1111 * @retlen: pointer to variable to store the number of read bytes
1112 * @buf: the databuffer to put data
1113 * @oob_buf: filesystem supplied oob data buffer (can be NULL)
1114 * @oobsel: oob selection structure
1115 * @flags: flag to indicate if nand_get_device/nand_release_device should be preformed
1116 * and how many corrected error bits are acceptable:
1117 * bits 0..7 - number of tolerable errors
1118 * bit 8 - 0 == do not get/release chip, 1 == get/release chip
1119 * 949 *
1120 * NAND read with ECC 950 * Internal function. Called with chip held.
1121 */ 951 */
1122int nand_do_read_ecc (struct mtd_info *mtd, loff_t from, size_t len, 952static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1123 size_t * retlen, u_char * buf, u_char * oob_buf, 953 struct mtd_oob_ops *ops)
1124 struct nand_oobinfo *oobsel, int flags)
1125{ 954{
955 int chipnr, page, realpage, col, bytes, aligned;
956 struct nand_chip *chip = mtd->priv;
957 struct mtd_ecc_stats stats;
958 int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
959 int sndcmd = 1;
960 int ret = 0;
961 uint32_t readlen = ops->len;
962 uint8_t *bufpoi, *oob, *buf;
1126 963
1127 int i, j, col, realpage, page, end, ecc, chipnr, sndcmd = 1; 964 stats = mtd->ecc_stats;
1128 int read = 0, oob = 0, ecc_status = 0, ecc_failed = 0;
1129 struct nand_chip *this = mtd->priv;
1130 u_char *data_poi, *oob_data = oob_buf;
1131 u_char ecc_calc[32];
1132 u_char ecc_code[32];
1133 int eccmode, eccsteps;
1134 int *oob_config, datidx;
1135 int blockcheck = (1 << (this->phys_erase_shift - this->page_shift)) - 1;
1136 int eccbytes;
1137 int compareecc = 1;
1138 int oobreadlen;
1139
1140
1141 DEBUG (MTD_DEBUG_LEVEL3, "nand_read_ecc: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
1142
1143 /* Do not allow reads past end of device */
1144 if ((from + len) > mtd->size) {
1145 DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: Attempt read beyond end of device\n");
1146 *retlen = 0;
1147 return -EINVAL;
1148 }
1149
1150 /* Grab the lock and see if the device is available */
1151 if (flags & NAND_GET_DEVICE)
1152 nand_get_device (this, mtd, FL_READING);
1153
1154 /* Autoplace of oob data ? Use the default placement scheme */
1155 if (oobsel->useecc == MTD_NANDECC_AUTOPLACE)
1156 oobsel = this->autooob;
1157
1158 eccmode = oobsel->useecc ? this->eccmode : NAND_ECC_NONE;
1159 oob_config = oobsel->eccpos;
1160
1161 /* Select the NAND device */
1162 chipnr = (int)(from >> this->chip_shift);
1163 this->select_chip(mtd, chipnr);
1164
1165 /* First we calculate the starting page */
1166 realpage = (int) (from >> this->page_shift);
1167 page = realpage & this->pagemask;
1168
1169 /* Get raw starting column */
1170 col = from & (mtd->oobblock - 1);
1171
1172 end = mtd->oobblock;
1173 ecc = this->eccsize;
1174 eccbytes = this->eccbytes;
1175
1176 if ((eccmode == NAND_ECC_NONE) || (this->options & NAND_HWECC_SYNDROME))
1177 compareecc = 0;
1178
1179 oobreadlen = mtd->oobsize;
1180 if (this->options & NAND_HWECC_SYNDROME)
1181 oobreadlen -= oobsel->eccbytes;
1182 965
1183 /* Loop until all data read */ 966 chipnr = (int)(from >> chip->chip_shift);
1184 while (read < len) { 967 chip->select_chip(mtd, chipnr);
1185 968
1186 int aligned = (!col && (len - read) >= end); 969 realpage = (int)(from >> chip->page_shift);
1187 /* 970 page = realpage & chip->pagemask;
1188 * If the read is not page aligned, we have to read into data buffer
1189 * due to ecc, else we read into return buffer direct
1190 */
1191 if (aligned)
1192 data_poi = &buf[read];
1193 else
1194 data_poi = this->data_buf;
1195 971
1196 /* Check, if we have this page in the buffer 972 col = (int)(from & (mtd->writesize - 1));
1197 * 973 chip->oob_poi = chip->buffers.oobrbuf;
1198 * FIXME: Make it work when we must provide oob data too,
1199 * check the usage of data_buf oob field
1200 */
1201 if (realpage == this->pagebuf && !oob_buf) {
1202 /* aligned read ? */
1203 if (aligned)
1204 memcpy (data_poi, this->data_buf, end);
1205 goto readdata;
1206 }
1207 974
1208 /* Check, if we must send the read command */ 975 buf = ops->datbuf;
1209 if (sndcmd) { 976 oob = ops->oobbuf;
1210 this->cmdfunc (mtd, NAND_CMD_READ0, 0x00, page);
1211 sndcmd = 0;
1212 }
1213 977
1214 /* get oob area, if we have no oob buffer from fs-driver */ 978 while(1) {
1215 if (!oob_buf || oobsel->useecc == MTD_NANDECC_AUTOPLACE || 979 bytes = min(mtd->writesize - col, readlen);
1216 oobsel->useecc == MTD_NANDECC_AUTOPL_USR) 980 aligned = (bytes == mtd->writesize);
1217 oob_data = &this->data_buf[end];
1218 981
1219 eccsteps = this->eccsteps; 982 /* Is the current page in the buffer ? */
983 if (realpage != chip->pagebuf || oob) {
984 bufpoi = aligned ? buf : chip->buffers.databuf;
1220 985
1221 switch (eccmode) { 986 if (likely(sndcmd)) {
1222 case NAND_ECC_NONE: { /* No ECC, Read in a page */ 987 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1223 static unsigned long lastwhinge = 0; 988 sndcmd = 0;
1224 if ((lastwhinge / HZ) != (jiffies / HZ)) {
1225 printk (KERN_WARNING "Reading data from NAND FLASH without ECC is not recommended\n");
1226 lastwhinge = jiffies;
1227 } 989 }
1228 this->read_buf(mtd, data_poi, end);
1229 break;
1230 }
1231 990
1232 case NAND_ECC_SOFT: /* Software ECC 3/256: Read in a page + oob data */ 991 /* Now read the page into the buffer */
1233 this->read_buf(mtd, data_poi, end); 992 ret = chip->ecc.read_page(mtd, chip, bufpoi);
1234 for (i = 0, datidx = 0; eccsteps; eccsteps--, i+=3, datidx += ecc) 993 if (ret < 0)
1235 this->calculate_ecc(mtd, &data_poi[datidx], &ecc_calc[i]); 994 break;
1236 break;
1237 995
1238 default: 996 /* Transfer not aligned data */
1239 for (i = 0, datidx = 0; eccsteps; eccsteps--, i+=eccbytes, datidx += ecc) { 997 if (!aligned) {
1240 this->enable_hwecc(mtd, NAND_ECC_READ); 998 chip->pagebuf = realpage;
1241 this->read_buf(mtd, &data_poi[datidx], ecc); 999 memcpy(buf, chip->buffers.databuf + col, bytes);
1242
1243 /* HW ecc with syndrome calculation must read the
1244 * syndrome from flash immidiately after the data */
1245 if (!compareecc) {
1246 /* Some hw ecc generators need to know when the
1247 * syndrome is read from flash */
1248 this->enable_hwecc(mtd, NAND_ECC_READSYN);
1249 this->read_buf(mtd, &oob_data[i], eccbytes);
1250 /* We calc error correction directly, it checks the hw
1251 * generator for an error, reads back the syndrome and
1252 * does the error correction on the fly */
1253 ecc_status = this->correct_data(mtd, &data_poi[datidx], &oob_data[i], &ecc_code[i]);
1254 if ((ecc_status == -1) || (ecc_status > (flags && 0xff))) {
1255 DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: "
1256 "Failed ECC read, page 0x%08x on chip %d\n", page, chipnr);
1257 ecc_failed++;
1258 }
1259 } else {
1260 this->calculate_ecc(mtd, &data_poi[datidx], &ecc_calc[i]);
1261 }
1262 } 1000 }
1263 break;
1264 }
1265
1266 /* read oobdata */
1267 this->read_buf(mtd, &oob_data[mtd->oobsize - oobreadlen], oobreadlen);
1268
1269 /* Skip ECC check, if not requested (ECC_NONE or HW_ECC with syndromes) */
1270 if (!compareecc)
1271 goto readoob;
1272
1273 /* Pick the ECC bytes out of the oob data */
1274 for (j = 0; j < oobsel->eccbytes; j++)
1275 ecc_code[j] = oob_data[oob_config[j]];
1276 1001
1277 /* correct data, if neccecary */ 1002 buf += bytes;
1278 for (i = 0, j = 0, datidx = 0; i < this->eccsteps; i++, datidx += ecc) {
1279 ecc_status = this->correct_data(mtd, &data_poi[datidx], &ecc_code[j], &ecc_calc[j]);
1280 1003
1281 /* Get next chunk of ecc bytes */ 1004 if (unlikely(oob)) {
1282 j += eccbytes; 1005 /* Raw mode does data:oob:data:oob */
1283 1006 if (ops->mode != MTD_OOB_RAW)
1284 /* Check, if we have a fs supplied oob-buffer, 1007 oob = nand_transfer_oob(chip, oob, ops);
1285 * This is the legacy mode. Used by YAFFS1 1008 else
1286 * Should go away some day 1009 buf = nand_transfer_oob(chip, buf, ops);
1287 */
1288 if (oob_buf && oobsel->useecc == MTD_NANDECC_PLACE) {
1289 int *p = (int *)(&oob_data[mtd->oobsize]);
1290 p[i] = ecc_status;
1291 } 1010 }
1292 1011
1293 if ((ecc_status == -1) || (ecc_status > (flags && 0xff))) { 1012 if (!(chip->options & NAND_NO_READRDY)) {
1294 DEBUG (MTD_DEBUG_LEVEL0, "nand_read_ecc: " "Failed ECC read, page 0x%08x\n", page); 1013 /*
1295 ecc_failed++; 1014 * Apply delay or wait for ready/busy pin. Do
1015 * this before the AUTOINCR check, so no
1016 * problems arise if a chip which does auto
1017 * increment is marked as NOAUTOINCR by the
1018 * board driver.
1019 */
1020 if (!chip->dev_ready)
1021 udelay(chip->chip_delay);
1022 else
1023 nand_wait_ready(mtd);
1296 } 1024 }
1025 } else {
1026 memcpy(buf, chip->buffers.databuf + col, bytes);
1027 buf += bytes;
1297 } 1028 }
1298 1029
1299 readoob: 1030 readlen -= bytes;
1300 /* check, if we have a fs supplied oob-buffer */
1301 if (oob_buf) {
1302 /* without autoplace. Legacy mode used by YAFFS1 */
1303 switch(oobsel->useecc) {
1304 case MTD_NANDECC_AUTOPLACE:
1305 case MTD_NANDECC_AUTOPL_USR:
1306 /* Walk through the autoplace chunks */
1307 for (i = 0; oobsel->oobfree[i][1]; i++) {
1308 int from = oobsel->oobfree[i][0];
1309 int num = oobsel->oobfree[i][1];
1310 memcpy(&oob_buf[oob], &oob_data[from], num);
1311 oob += num;
1312 }
1313 break;
1314 case MTD_NANDECC_PLACE:
1315 /* YAFFS1 legacy mode */
1316 oob_data += this->eccsteps * sizeof (int);
1317 default:
1318 oob_data += mtd->oobsize;
1319 }
1320 }
1321 readdata:
1322 /* Partial page read, transfer data into fs buffer */
1323 if (!aligned) {
1324 for (j = col; j < end && read < len; j++)
1325 buf[read++] = data_poi[j];
1326 this->pagebuf = realpage;
1327 } else
1328 read += mtd->oobblock;
1329
1330 /* Apply delay or wait for ready/busy pin
1331 * Do this before the AUTOINCR check, so no problems
1332 * arise if a chip which does auto increment
1333 * is marked as NOAUTOINCR by the board driver.
1334 */
1335 if (!this->dev_ready)
1336 udelay (this->chip_delay);
1337 else
1338 nand_wait_ready(mtd);
1339 1031
1340 if (read == len) 1032 if (!readlen)
1341 break; 1033 break;
1342 1034
1343 /* For subsequent reads align to page boundary. */ 1035 /* For subsequent reads align to page boundary. */
@@ -1345,701 +1037,775 @@ int nand_do_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
1345 /* Increment page address */ 1037 /* Increment page address */
1346 realpage++; 1038 realpage++;
1347 1039
1348 page = realpage & this->pagemask; 1040 page = realpage & chip->pagemask;
1349 /* Check, if we cross a chip boundary */ 1041 /* Check, if we cross a chip boundary */
1350 if (!page) { 1042 if (!page) {
1351 chipnr++; 1043 chipnr++;
1352 this->select_chip(mtd, -1); 1044 chip->select_chip(mtd, -1);
1353 this->select_chip(mtd, chipnr); 1045 chip->select_chip(mtd, chipnr);
1354 } 1046 }
1047
1355 /* Check, if the chip supports auto page increment 1048 /* Check, if the chip supports auto page increment
1356 * or if we have hit a block boundary. 1049 * or if we have hit a block boundary.
1357 */ 1050 */
1358 if (!NAND_CANAUTOINCR(this) || !(page & blockcheck)) 1051 if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
1359 sndcmd = 1; 1052 sndcmd = 1;
1360 } 1053 }
1361 1054
1362 /* Deselect and wake up anyone waiting on the device */ 1055 ops->retlen = ops->len - (size_t) readlen;
1363 if (flags & NAND_GET_DEVICE)
1364 nand_release_device(mtd);
1365 1056
1366 /* 1057 if (ret)
1367 * Return success, if no ECC failures, else -EBADMSG 1058 return ret;
1368 * fs driver will take care of that, because 1059
1369 * retlen == desired len and result == -EBADMSG 1060 if (mtd->ecc_stats.failed - stats.failed)
1370 */ 1061 return -EBADMSG;
1371 *retlen = read; 1062
1372 return ecc_failed ? -EBADMSG : 0; 1063 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
1373} 1064}
1374 1065
1375/** 1066/**
1376 * nand_read_oob - [MTD Interface] NAND read out-of-band 1067 * nand_read - [MTD Interface] MTD compability function for nand_do_read_ecc
1377 * @mtd: MTD device structure 1068 * @mtd: MTD device structure
1378 * @from: offset to read from 1069 * @from: offset to read from
1379 * @len: number of bytes to read 1070 * @len: number of bytes to read
1380 * @retlen: pointer to variable to store the number of read bytes 1071 * @retlen: pointer to variable to store the number of read bytes
1381 * @buf: the databuffer to put data 1072 * @buf: the databuffer to put data
1382 * 1073 *
1383 * NAND read out-of-band data from the spare area 1074 * Get hold of the chip and call nand_do_read
1384 */ 1075 */
1385static int nand_read_oob (struct mtd_info *mtd, loff_t from, size_t len, size_t * retlen, u_char * buf) 1076static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
1077 size_t *retlen, uint8_t *buf)
1386{ 1078{
1387 int i, col, page, chipnr; 1079 struct nand_chip *chip = mtd->priv;
1388 struct nand_chip *this = mtd->priv; 1080 int ret;
1389 int blockcheck = (1 << (this->phys_erase_shift - this->page_shift)) - 1;
1390 1081
1391 DEBUG (MTD_DEBUG_LEVEL3, "nand_read_oob: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); 1082 /* Do not allow reads past end of device */
1083 if ((from + len) > mtd->size)
1084 return -EINVAL;
1085 if (!len)
1086 return 0;
1392 1087
1393 /* Shift to get page */ 1088 nand_get_device(chip, mtd, FL_READING);
1394 page = (int)(from >> this->page_shift);
1395 chipnr = (int)(from >> this->chip_shift);
1396 1089
1397 /* Mask to get column */ 1090 chip->ops.len = len;
1398 col = from & (mtd->oobsize - 1); 1091 chip->ops.datbuf = buf;
1092 chip->ops.oobbuf = NULL;
1399 1093
1400 /* Initialize return length value */ 1094 ret = nand_do_read_ops(mtd, from, &chip->ops);
1401 *retlen = 0;
1402 1095
1403 /* Do not allow reads past end of device */ 1096 nand_release_device(mtd);
1404 if ((from + len) > mtd->size) { 1097
1405 DEBUG (MTD_DEBUG_LEVEL0, "nand_read_oob: Attempt read beyond end of device\n"); 1098 *retlen = chip->ops.retlen;
1406 *retlen = 0; 1099 return ret;
1407 return -EINVAL; 1100}
1101
1102/**
1103 * nand_read_oob_std - [REPLACABLE] the most common OOB data read function
1104 * @mtd: mtd info structure
1105 * @chip: nand chip info structure
1106 * @page: page number to read
1107 * @sndcmd: flag whether to issue read command or not
1108 */
1109static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1110 int page, int sndcmd)
1111{
1112 if (sndcmd) {
1113 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
1114 sndcmd = 0;
1408 } 1115 }
1116 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1117 return sndcmd;
1118}
1409 1119
1410 /* Grab the lock and see if the device is available */ 1120/**
1411 nand_get_device (this, mtd , FL_READING); 1121 * nand_read_oob_syndrome - [REPLACABLE] OOB data read function for HW ECC
1122 * with syndromes
1123 * @mtd: mtd info structure
1124 * @chip: nand chip info structure
1125 * @page: page number to read
1126 * @sndcmd: flag whether to issue read command or not
1127 */
1128static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1129 int page, int sndcmd)
1130{
1131 uint8_t *buf = chip->oob_poi;
1132 int length = mtd->oobsize;
1133 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
1134 int eccsize = chip->ecc.size;
1135 uint8_t *bufpoi = buf;
1136 int i, toread, sndrnd = 0, pos;
1137
1138 chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
1139 for (i = 0; i < chip->ecc.steps; i++) {
1140 if (sndrnd) {
1141 pos = eccsize + i * (eccsize + chunk);
1142 if (mtd->writesize > 512)
1143 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
1144 else
1145 chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
1146 } else
1147 sndrnd = 1;
1148 toread = min_t(int, length, chunk);
1149 chip->read_buf(mtd, bufpoi, toread);
1150 bufpoi += toread;
1151 length -= toread;
1152 }
1153 if (length > 0)
1154 chip->read_buf(mtd, bufpoi, length);
1412 1155
1413 /* Select the NAND device */ 1156 return 1;
1414 this->select_chip(mtd, chipnr); 1157}
1158
1159/**
1160 * nand_write_oob_std - [REPLACABLE] the most common OOB data write function
1161 * @mtd: mtd info structure
1162 * @chip: nand chip info structure
1163 * @page: page number to write
1164 */
1165static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1166 int page)
1167{
1168 int status = 0;
1169 const uint8_t *buf = chip->oob_poi;
1170 int length = mtd->oobsize;
1171
1172 chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
1173 chip->write_buf(mtd, buf, length);
1174 /* Send command to program the OOB data */
1175 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1176
1177 status = chip->waitfunc(mtd, chip);
1178
1179 return status;
1180}
1181
1182/**
1183 * nand_write_oob_syndrome - [REPLACABLE] OOB data write function for HW ECC
1184 * with syndrome - only for large page flash !
1185 * @mtd: mtd info structure
1186 * @chip: nand chip info structure
1187 * @page: page number to write
1188 */
1189static int nand_write_oob_syndrome(struct mtd_info *mtd,
1190 struct nand_chip *chip, int page)
1191{
1192 int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
1193 int eccsize = chip->ecc.size, length = mtd->oobsize;
1194 int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
1195 const uint8_t *bufpoi = chip->oob_poi;
1415 1196
1416 /* Send the read command */
1417 this->cmdfunc (mtd, NAND_CMD_READOOB, col, page & this->pagemask);
1418 /* 1197 /*
1419 * Read the data, if we read more than one page 1198 * data-ecc-data-ecc ... ecc-oob
1420 * oob data, let the device transfer the data ! 1199 * or
1200 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
1421 */ 1201 */
1422 i = 0; 1202 if (!chip->ecc.prepad && !chip->ecc.postpad) {
1423 while (i < len) { 1203 pos = steps * (eccsize + chunk);
1424 int thislen = mtd->oobsize - col; 1204 steps = 0;
1425 thislen = min_t(int, thislen, len); 1205 } else
1426 this->read_buf(mtd, &buf[i], thislen); 1206 pos = eccsize + chunk;
1427 i += thislen;
1428
1429 /* Read more ? */
1430 if (i < len) {
1431 page++;
1432 col = 0;
1433
1434 /* Check, if we cross a chip boundary */
1435 if (!(page & this->pagemask)) {
1436 chipnr++;
1437 this->select_chip(mtd, -1);
1438 this->select_chip(mtd, chipnr);
1439 }
1440
1441 /* Apply delay or wait for ready/busy pin
1442 * Do this before the AUTOINCR check, so no problems
1443 * arise if a chip which does auto increment
1444 * is marked as NOAUTOINCR by the board driver.
1445 */
1446 if (!this->dev_ready)
1447 udelay (this->chip_delay);
1448 else
1449 nand_wait_ready(mtd);
1450 1207
1451 /* Check, if the chip supports auto page increment 1208 chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
1452 * or if we have hit a block boundary. 1209 for (i = 0; i < steps; i++) {
1453 */ 1210 if (sndcmd) {
1454 if (!NAND_CANAUTOINCR(this) || !(page & blockcheck)) { 1211 if (mtd->writesize <= 512) {
1455 /* For subsequent page reads set offset to 0 */ 1212 uint32_t fill = 0xFFFFFFFF;
1456 this->cmdfunc (mtd, NAND_CMD_READOOB, 0x0, page & this->pagemask); 1213
1214 len = eccsize;
1215 while (len > 0) {
1216 int num = min_t(int, len, 4);
1217 chip->write_buf(mtd, (uint8_t *)&fill,
1218 num);
1219 len -= num;
1220 }
1221 } else {
1222 pos = eccsize + i * (eccsize + chunk);
1223 chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
1457 } 1224 }
1458 } 1225 } else
1226 sndcmd = 1;
1227 len = min_t(int, length, chunk);
1228 chip->write_buf(mtd, bufpoi, len);
1229 bufpoi += len;
1230 length -= len;
1459 } 1231 }
1232 if (length > 0)
1233 chip->write_buf(mtd, bufpoi, length);
1460 1234
1461 /* Deselect and wake up anyone waiting on the device */ 1235 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1462 nand_release_device(mtd); 1236 status = chip->waitfunc(mtd, chip);
1463 1237
1464 /* Return happy */ 1238 return status & NAND_STATUS_FAIL ? -EIO : 0;
1465 *retlen = len;
1466 return 0;
1467} 1239}
1468 1240
1469/** 1241/**
1470 * nand_read_raw - [GENERIC] Read raw data including oob into buffer 1242 * nand_do_read_oob - [Intern] NAND read out-of-band
1471 * @mtd: MTD device structure 1243 * @mtd: MTD device structure
1472 * @buf: temporary buffer
1473 * @from: offset to read from 1244 * @from: offset to read from
1474 * @len: number of bytes to read 1245 * @ops: oob operations description structure
1475 * @ooblen: number of oob data bytes to read
1476 * 1246 *
1477 * Read raw data including oob into buffer 1247 * NAND read out-of-band data from the spare area
1478 */ 1248 */
1479int nand_read_raw (struct mtd_info *mtd, uint8_t *buf, loff_t from, size_t len, size_t ooblen) 1249static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1250 struct mtd_oob_ops *ops)
1480{ 1251{
1481 struct nand_chip *this = mtd->priv; 1252 int page, realpage, chipnr, sndcmd = 1;
1482 int page = (int) (from >> this->page_shift); 1253 struct nand_chip *chip = mtd->priv;
1483 int chip = (int) (from >> this->chip_shift); 1254 int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
1484 int sndcmd = 1; 1255 int readlen = ops->len;
1485 int cnt = 0; 1256 uint8_t *buf = ops->oobbuf;
1486 int pagesize = mtd->oobblock + mtd->oobsize;
1487 int blockcheck = (1 << (this->phys_erase_shift - this->page_shift)) - 1;
1488 1257
1489 /* Do not allow reads past end of device */ 1258 DEBUG(MTD_DEBUG_LEVEL3, "nand_read_oob: from = 0x%08Lx, len = %i\n",
1490 if ((from + len) > mtd->size) { 1259 (unsigned long long)from, readlen);
1491 DEBUG (MTD_DEBUG_LEVEL0, "nand_read_raw: Attempt read beyond end of device\n");
1492 return -EINVAL;
1493 }
1494 1260
1495 /* Grab the lock and see if the device is available */ 1261 chipnr = (int)(from >> chip->chip_shift);
1496 nand_get_device (this, mtd , FL_READING); 1262 chip->select_chip(mtd, chipnr);
1497 1263
1498 this->select_chip (mtd, chip); 1264 /* Shift to get page */
1265 realpage = (int)(from >> chip->page_shift);
1266 page = realpage & chip->pagemask;
1499 1267
1500 /* Add requested oob length */ 1268 chip->oob_poi = chip->buffers.oobrbuf;
1501 len += ooblen;
1502 1269
1503 while (len) { 1270 while(1) {
1504 if (sndcmd) 1271 sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
1505 this->cmdfunc (mtd, NAND_CMD_READ0, 0, page & this->pagemask); 1272 buf = nand_transfer_oob(chip, buf, ops);
1506 sndcmd = 0;
1507 1273
1508 this->read_buf (mtd, &buf[cnt], pagesize); 1274 readlen -= ops->ooblen;
1275 if (!readlen)
1276 break;
1509 1277
1510 len -= pagesize; 1278 if (!(chip->options & NAND_NO_READRDY)) {
1511 cnt += pagesize; 1279 /*
1512 page++; 1280 * Apply delay or wait for ready/busy pin. Do this
1281 * before the AUTOINCR check, so no problems arise if a
1282 * chip which does auto increment is marked as
1283 * NOAUTOINCR by the board driver.
1284 */
1285 if (!chip->dev_ready)
1286 udelay(chip->chip_delay);
1287 else
1288 nand_wait_ready(mtd);
1289 }
1513 1290
1514 if (!this->dev_ready) 1291 /* Increment page address */
1515 udelay (this->chip_delay); 1292 realpage++;
1516 else
1517 nand_wait_ready(mtd);
1518 1293
1519 /* Check, if the chip supports auto page increment */ 1294 page = realpage & chip->pagemask;
1520 if (!NAND_CANAUTOINCR(this) || !(page & blockcheck)) 1295 /* Check, if we cross a chip boundary */
1296 if (!page) {
1297 chipnr++;
1298 chip->select_chip(mtd, -1);
1299 chip->select_chip(mtd, chipnr);
1300 }
1301
1302 /* Check, if the chip supports auto page increment
1303 * or if we have hit a block boundary.
1304 */
1305 if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
1521 sndcmd = 1; 1306 sndcmd = 1;
1522 } 1307 }
1523 1308
1524 /* Deselect and wake up anyone waiting on the device */ 1309 ops->retlen = ops->len;
1525 nand_release_device(mtd);
1526 return 0; 1310 return 0;
1527} 1311}
1528 1312
1529
1530/** 1313/**
1531 * nand_prepare_oobbuf - [GENERIC] Prepare the out of band buffer 1314 * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
1532 * @mtd: MTD device structure 1315 * @mtd: MTD device structure
1533 * @fsbuf: buffer given by fs driver 1316 * @from: offset to read from
1534 * @oobsel: out of band selection structre 1317 * @ops: oob operation description structure
1535 * @autoplace: 1 = place given buffer into the oob bytes
1536 * @numpages: number of pages to prepare
1537 *
1538 * Return:
1539 * 1. Filesystem buffer available and autoplacement is off,
1540 * return filesystem buffer
1541 * 2. No filesystem buffer or autoplace is off, return internal
1542 * buffer
1543 * 3. Filesystem buffer is given and autoplace selected
1544 * put data from fs buffer into internal buffer and
1545 * retrun internal buffer
1546 *
1547 * Note: The internal buffer is filled with 0xff. This must
1548 * be done only once, when no autoplacement happens
1549 * Autoplacement sets the buffer dirty flag, which
1550 * forces the 0xff fill before using the buffer again.
1551 * 1318 *
1552*/ 1319 * NAND read data and/or out-of-band data
1553static u_char * nand_prepare_oobbuf (struct mtd_info *mtd, u_char *fsbuf, struct nand_oobinfo *oobsel, 1320 */
1554 int autoplace, int numpages) 1321static int nand_read_oob(struct mtd_info *mtd, loff_t from,
1322 struct mtd_oob_ops *ops)
1555{ 1323{
1556 struct nand_chip *this = mtd->priv; 1324 int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip,
1557 int i, len, ofs; 1325 uint8_t *buf) = NULL;
1558 1326 struct nand_chip *chip = mtd->priv;
1559 /* Zero copy fs supplied buffer */ 1327 int ret = -ENOTSUPP;
1560 if (fsbuf && !autoplace) 1328
1561 return fsbuf; 1329 ops->retlen = 0;
1562 1330
1563 /* Check, if the buffer must be filled with ff again */ 1331 /* Do not allow reads past end of device */
1564 if (this->oobdirty) { 1332 if ((from + ops->len) > mtd->size) {
1565 memset (this->oob_buf, 0xff, 1333 DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: "
1566 mtd->oobsize << (this->phys_erase_shift - this->page_shift)); 1334 "Attempt read beyond end of device\n");
1567 this->oobdirty = 0; 1335 return -EINVAL;
1568 } 1336 }
1569 1337
1570 /* If we have no autoplacement or no fs buffer use the internal one */ 1338 nand_get_device(chip, mtd, FL_READING);
1571 if (!autoplace || !fsbuf) 1339
1572 return this->oob_buf; 1340 switch(ops->mode) {
1573 1341 case MTD_OOB_PLACE:
1574 /* Walk through the pages and place the data */ 1342 case MTD_OOB_AUTO:
1575 this->oobdirty = 1; 1343 break;
1576 ofs = 0; 1344
1577 while (numpages--) { 1345 case MTD_OOB_RAW:
1578 for (i = 0, len = 0; len < mtd->oobavail; i++) { 1346 /* Replace the read_page algorithm temporary */
1579 int to = ofs + oobsel->oobfree[i][0]; 1347 read_page = chip->ecc.read_page;
1580 int num = oobsel->oobfree[i][1]; 1348 chip->ecc.read_page = nand_read_page_raw;
1581 memcpy (&this->oob_buf[to], fsbuf, num); 1349 break;
1582 len += num; 1350
1583 fsbuf += num; 1351 default:
1584 } 1352 goto out;
1585 ofs += mtd->oobavail;
1586 } 1353 }
1587 return this->oob_buf; 1354
1355 if (!ops->datbuf)
1356 ret = nand_do_read_oob(mtd, from, ops);
1357 else
1358 ret = nand_do_read_ops(mtd, from, ops);
1359
1360 if (unlikely(ops->mode == MTD_OOB_RAW))
1361 chip->ecc.read_page = read_page;
1362 out:
1363 nand_release_device(mtd);
1364 return ret;
1588} 1365}
1589 1366
1590#define NOTALIGNED(x) (x & (mtd->oobblock-1)) != 0
1591 1367
1592/** 1368/**
1593 * nand_write - [MTD Interface] compability function for nand_write_ecc 1369 * nand_write_page_raw - [Intern] raw page write function
1594 * @mtd: MTD device structure 1370 * @mtd: mtd info structure
1595 * @to: offset to write to 1371 * @chip: nand chip info structure
1596 * @len: number of bytes to write 1372 * @buf: data buffer
1597 * @retlen: pointer to variable to store the number of written bytes 1373 */
1598 * @buf: the data to write 1374static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1599 * 1375 const uint8_t *buf)
1600 * This function simply calls nand_write_ecc with oob buffer and oobsel = NULL
1601 *
1602*/
1603static int nand_write (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf)
1604{ 1376{
1605 return (nand_write_ecc (mtd, to, len, retlen, buf, NULL, NULL)); 1377 chip->write_buf(mtd, buf, mtd->writesize);
1378 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1606} 1379}
1607 1380
1608/** 1381/**
1609 * nand_write_ecc - [MTD Interface] NAND write with ECC 1382 * nand_write_page_swecc - {REPLACABLE] software ecc based page write function
1610 * @mtd: MTD device structure 1383 * @mtd: mtd info structure
1611 * @to: offset to write to 1384 * @chip: nand chip info structure
1612 * @len: number of bytes to write 1385 * @buf: data buffer
1613 * @retlen: pointer to variable to store the number of written bytes
1614 * @buf: the data to write
1615 * @eccbuf: filesystem supplied oob data buffer
1616 * @oobsel: oob selection structure
1617 *
1618 * NAND write with ECC
1619 */ 1386 */
1620static int nand_write_ecc (struct mtd_info *mtd, loff_t to, size_t len, 1387static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1621 size_t * retlen, const u_char * buf, u_char * eccbuf, struct nand_oobinfo *oobsel) 1388 const uint8_t *buf)
1622{ 1389{
1623 int startpage, page, ret = -EIO, oob = 0, written = 0, chipnr; 1390 int i, eccsize = chip->ecc.size;
1624 int autoplace = 0, numpages, totalpages; 1391 int eccbytes = chip->ecc.bytes;
1625 struct nand_chip *this = mtd->priv; 1392 int eccsteps = chip->ecc.steps;
1626 u_char *oobbuf, *bufstart; 1393 uint8_t *ecc_calc = chip->buffers.ecccalc;
1627 int ppblock = (1 << (this->phys_erase_shift - this->page_shift)); 1394 const uint8_t *p = buf;
1395 int *eccpos = chip->ecc.layout->eccpos;
1628 1396
1629 DEBUG (MTD_DEBUG_LEVEL3, "nand_write_ecc: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len); 1397 /* Software ecc calculation */
1398 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
1399 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1630 1400
1631 /* Initialize retlen, in case of early exit */ 1401 for (i = 0; i < chip->ecc.total; i++)
1632 *retlen = 0; 1402 chip->oob_poi[eccpos[i]] = ecc_calc[i];
1633 1403
1634 /* Do not allow write past end of device */ 1404 nand_write_page_raw(mtd, chip, buf);
1635 if ((to + len) > mtd->size) { 1405}
1636 DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: Attempt to write past end of page\n");
1637 return -EINVAL;
1638 }
1639 1406
1640 /* reject writes, which are not page aligned */ 1407/**
1641 if (NOTALIGNED (to) || NOTALIGNED(len)) { 1408 * nand_write_page_hwecc - {REPLACABLE] hardware ecc based page write function
1642 printk (KERN_NOTICE "nand_write_ecc: Attempt to write not page aligned data\n"); 1409 * @mtd: mtd info structure
1643 return -EINVAL; 1410 * @chip: nand chip info structure
1411 * @buf: data buffer
1412 */
1413static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1414 const uint8_t *buf)
1415{
1416 int i, eccsize = chip->ecc.size;
1417 int eccbytes = chip->ecc.bytes;
1418 int eccsteps = chip->ecc.steps;
1419 uint8_t *ecc_calc = chip->buffers.ecccalc;
1420 const uint8_t *p = buf;
1421 int *eccpos = chip->ecc.layout->eccpos;
1422
1423 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1424 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
1425 chip->write_buf(mtd, p, eccsize);
1426 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1644 } 1427 }
1645 1428
1646 /* Grab the lock and see if the device is available */ 1429 for (i = 0; i < chip->ecc.total; i++)
1647 nand_get_device (this, mtd, FL_WRITING); 1430 chip->oob_poi[eccpos[i]] = ecc_calc[i];
1648 1431
1649 /* Calculate chipnr */ 1432 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1650 chipnr = (int)(to >> this->chip_shift); 1433}
1651 /* Select the NAND device */
1652 this->select_chip(mtd, chipnr);
1653 1434
1654 /* Check, if it is write protected */ 1435/**
1655 if (nand_check_wp(mtd)) 1436 * nand_write_page_syndrome - {REPLACABLE] hardware ecc syndrom based page write
1656 goto out; 1437 * @mtd: mtd info structure
1438 * @chip: nand chip info structure
1439 * @buf: data buffer
1440 *
1441 * The hw generator calculates the error syndrome automatically. Therefor
1442 * we need a special oob layout and handling.
1443 */
1444static void nand_write_page_syndrome(struct mtd_info *mtd,
1445 struct nand_chip *chip, const uint8_t *buf)
1446{
1447 int i, eccsize = chip->ecc.size;
1448 int eccbytes = chip->ecc.bytes;
1449 int eccsteps = chip->ecc.steps;
1450 const uint8_t *p = buf;
1451 uint8_t *oob = chip->oob_poi;
1657 1452
1658 /* if oobsel is NULL, use chip defaults */ 1453 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1659 if (oobsel == NULL)
1660 oobsel = &mtd->oobinfo;
1661 1454
1662 /* Autoplace of oob data ? Use the default placement scheme */ 1455 chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
1663 if (oobsel->useecc == MTD_NANDECC_AUTOPLACE) { 1456 chip->write_buf(mtd, p, eccsize);
1664 oobsel = this->autooob;
1665 autoplace = 1;
1666 }
1667 if (oobsel->useecc == MTD_NANDECC_AUTOPL_USR)
1668 autoplace = 1;
1669 1457
1670 /* Setup variables and oob buffer */ 1458 if (chip->ecc.prepad) {
1671 totalpages = len >> this->page_shift; 1459 chip->write_buf(mtd, oob, chip->ecc.prepad);
1672 page = (int) (to >> this->page_shift); 1460 oob += chip->ecc.prepad;
1673 /* Invalidate the page cache, if we write to the cached page */
1674 if (page <= this->pagebuf && this->pagebuf < (page + totalpages))
1675 this->pagebuf = -1;
1676
1677 /* Set it relative to chip */
1678 page &= this->pagemask;
1679 startpage = page;
1680 /* Calc number of pages we can write in one go */
1681 numpages = min (ppblock - (startpage & (ppblock - 1)), totalpages);
1682 oobbuf = nand_prepare_oobbuf (mtd, eccbuf, oobsel, autoplace, numpages);
1683 bufstart = (u_char *)buf;
1684
1685 /* Loop until all data is written */
1686 while (written < len) {
1687
1688 this->data_poi = (u_char*) &buf[written];
1689 /* Write one page. If this is the last page to write
1690 * or the last page in this block, then use the
1691 * real pageprogram command, else select cached programming
1692 * if supported by the chip.
1693 */
1694 ret = nand_write_page (mtd, this, page, &oobbuf[oob], oobsel, (--numpages > 0));
1695 if (ret) {
1696 DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: write_page failed %d\n", ret);
1697 goto out;
1698 } 1461 }
1699 /* Next oob page */
1700 oob += mtd->oobsize;
1701 /* Update written bytes count */
1702 written += mtd->oobblock;
1703 if (written == len)
1704 goto cmp;
1705 1462
1706 /* Increment page address */ 1463 chip->ecc.calculate(mtd, p, oob);
1707 page++; 1464 chip->write_buf(mtd, oob, eccbytes);
1708 1465 oob += eccbytes;
1709 /* Have we hit a block boundary ? Then we have to verify and 1466
1710 * if verify is ok, we have to setup the oob buffer for 1467 if (chip->ecc.postpad) {
1711 * the next pages. 1468 chip->write_buf(mtd, oob, chip->ecc.postpad);
1712 */ 1469 oob += chip->ecc.postpad;
1713 if (!(page & (ppblock - 1))){
1714 int ofs;
1715 this->data_poi = bufstart;
1716 ret = nand_verify_pages (mtd, this, startpage,
1717 page - startpage,
1718 oobbuf, oobsel, chipnr, (eccbuf != NULL));
1719 if (ret) {
1720 DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: verify_pages failed %d\n", ret);
1721 goto out;
1722 }
1723 *retlen = written;
1724
1725 ofs = autoplace ? mtd->oobavail : mtd->oobsize;
1726 if (eccbuf)
1727 eccbuf += (page - startpage) * ofs;
1728 totalpages -= page - startpage;
1729 numpages = min (totalpages, ppblock);
1730 page &= this->pagemask;
1731 startpage = page;
1732 oobbuf = nand_prepare_oobbuf (mtd, eccbuf, oobsel,
1733 autoplace, numpages);
1734 oob = 0;
1735 /* Check, if we cross a chip boundary */
1736 if (!page) {
1737 chipnr++;
1738 this->select_chip(mtd, -1);
1739 this->select_chip(mtd, chipnr);
1740 }
1741 } 1470 }
1742 } 1471 }
1743 /* Verify the remaining pages */
1744cmp:
1745 this->data_poi = bufstart;
1746 ret = nand_verify_pages (mtd, this, startpage, totalpages,
1747 oobbuf, oobsel, chipnr, (eccbuf != NULL));
1748 if (!ret)
1749 *retlen = written;
1750 else
1751 DEBUG (MTD_DEBUG_LEVEL0, "nand_write_ecc: verify_pages failed %d\n", ret);
1752 1472
1753out: 1473 /* Calculate remaining oob bytes */
1754 /* Deselect and wake up anyone waiting on the device */ 1474 i = mtd->oobsize - (oob - chip->oob_poi);
1755 nand_release_device(mtd); 1475 if (i)
1756 1476 chip->write_buf(mtd, oob, i);
1757 return ret;
1758} 1477}
1759 1478
1760
1761/** 1479/**
1762 * nand_write_oob - [MTD Interface] NAND write out-of-band 1480 * nand_write_page - [INTERNAL] write one page
1763 * @mtd: MTD device structure 1481 * @mtd: MTD device structure
1764 * @to: offset to write to 1482 * @chip: NAND chip descriptor
1765 * @len: number of bytes to write
1766 * @retlen: pointer to variable to store the number of written bytes
1767 * @buf: the data to write 1483 * @buf: the data to write
1768 * 1484 * @page: page number to write
1769 * NAND write out-of-band 1485 * @cached: cached programming
1770 */ 1486 */
1771static int nand_write_oob (struct mtd_info *mtd, loff_t to, size_t len, size_t * retlen, const u_char * buf) 1487static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1488 const uint8_t *buf, int page, int cached)
1772{ 1489{
1773 int column, page, status, ret = -EIO, chipnr; 1490 int status;
1774 struct nand_chip *this = mtd->priv;
1775 1491
1776 DEBUG (MTD_DEBUG_LEVEL3, "nand_write_oob: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len); 1492 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
1777 1493
1778 /* Shift to get page */ 1494 chip->ecc.write_page(mtd, chip, buf);
1779 page = (int) (to >> this->page_shift);
1780 chipnr = (int) (to >> this->chip_shift);
1781 1495
1782 /* Mask to get column */ 1496 /*
1783 column = to & (mtd->oobsize - 1); 1497 * Cached progamming disabled for now, Not sure if its worth the
1498 * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s)
1499 */
1500 cached = 0;
1784 1501
1785 /* Initialize return length value */ 1502 if (!cached || !(chip->options & NAND_CACHEPRG)) {
1786 *retlen = 0;
1787 1503
1788 /* Do not allow write past end of page */ 1504 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1789 if ((column + len) > mtd->oobsize) { 1505 status = chip->waitfunc(mtd, chip);
1790 DEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: Attempt to write past end of page\n"); 1506 /*
1791 return -EINVAL; 1507 * See if operation failed and additional status checks are
1508 * available
1509 */
1510 if ((status & NAND_STATUS_FAIL) && (chip->errstat))
1511 status = chip->errstat(mtd, chip, FL_WRITING, status,
1512 page);
1513
1514 if (status & NAND_STATUS_FAIL)
1515 return -EIO;
1516 } else {
1517 chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
1518 status = chip->waitfunc(mtd, chip);
1792 } 1519 }
1793 1520
1794 /* Grab the lock and see if the device is available */ 1521#ifdef CONFIG_MTD_NAND_VERIFY_WRITE
1795 nand_get_device (this, mtd, FL_WRITING); 1522 /* Send command to read back the data */
1523 chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
1796 1524
1797 /* Select the NAND device */ 1525 if (chip->verify_buf(mtd, buf, mtd->writesize))
1798 this->select_chip(mtd, chipnr); 1526 return -EIO;
1527#endif
1528 return 0;
1529}
1530
1531/**
1532 * nand_fill_oob - [Internal] Transfer client buffer to oob
1533 * @chip: nand chip structure
1534 * @oob: oob data buffer
1535 * @ops: oob ops structure
1536 */
1537static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob,
1538 struct mtd_oob_ops *ops)
1539{
1540 size_t len = ops->ooblen;
1541
1542 switch(ops->mode) {
1543
1544 case MTD_OOB_PLACE:
1545 case MTD_OOB_RAW:
1546 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
1547 return oob + len;
1548
1549 case MTD_OOB_AUTO: {
1550 struct nand_oobfree *free = chip->ecc.layout->oobfree;
1551 uint32_t boffs = 0, woffs = ops->ooboffs;
1552 size_t bytes = 0;
1553
1554 for(; free->length && len; free++, len -= bytes) {
1555 /* Write request not from offset 0 ? */
1556 if (unlikely(woffs)) {
1557 if (woffs >= free->length) {
1558 woffs -= free->length;
1559 continue;
1560 }
1561 boffs = free->offset + woffs;
1562 bytes = min_t(size_t, len,
1563 (free->length - woffs));
1564 woffs = 0;
1565 } else {
1566 bytes = min_t(size_t, len, free->length);
1567 boffs = free->offset;
1568 }
1569 memcpy(chip->oob_poi + woffs, oob, bytes);
1570 oob += bytes;
1571 }
1572 return oob;
1573 }
1574 default:
1575 BUG();
1576 }
1577 return NULL;
1578}
1579
1580#define NOTALIGNED(x) (x & (mtd->writesize-1)) != 0
1581
1582/**
1583 * nand_do_write_ops - [Internal] NAND write with ECC
1584 * @mtd: MTD device structure
1585 * @to: offset to write to
1586 * @ops: oob operations description structure
1587 *
1588 * NAND write with ECC
1589 */
1590static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
1591 struct mtd_oob_ops *ops)
1592{
1593 int chipnr, realpage, page, blockmask;
1594 struct nand_chip *chip = mtd->priv;
1595 uint32_t writelen = ops->len;
1596 uint8_t *oob = ops->oobbuf;
1597 uint8_t *buf = ops->datbuf;
1598 int bytes = mtd->writesize;
1599 int ret;
1799 1600
1800 /* Reset the chip. Some chips (like the Toshiba TC5832DC found 1601 ops->retlen = 0;
1801 in one of my DiskOnChip 2000 test units) will clear the whole 1602
1802 data page too if we don't do this. I have no clue why, but 1603 /* reject writes, which are not page aligned */
1803 I seem to have 'fixed' it in the doc2000 driver in 1604 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
1804 August 1999. dwmw2. */ 1605 printk(KERN_NOTICE "nand_write: "
1805 this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); 1606 "Attempt to write not page aligned data\n");
1607 return -EINVAL;
1608 }
1609
1610 if (!writelen)
1611 return 0;
1806 1612
1807 /* Check, if it is write protected */ 1613 /* Check, if it is write protected */
1808 if (nand_check_wp(mtd)) 1614 if (nand_check_wp(mtd))
1809 goto out; 1615 return -EIO;
1810 1616
1811 /* Invalidate the page cache, if we write to the cached page */ 1617 chipnr = (int)(to >> chip->chip_shift);
1812 if (page == this->pagebuf) 1618 chip->select_chip(mtd, chipnr);
1813 this->pagebuf = -1;
1814
1815 if (NAND_MUST_PAD(this)) {
1816 /* Write out desired data */
1817 this->cmdfunc (mtd, NAND_CMD_SEQIN, mtd->oobblock, page & this->pagemask);
1818 /* prepad 0xff for partial programming */
1819 this->write_buf(mtd, ffchars, column);
1820 /* write data */
1821 this->write_buf(mtd, buf, len);
1822 /* postpad 0xff for partial programming */
1823 this->write_buf(mtd, ffchars, mtd->oobsize - (len+column));
1824 } else {
1825 /* Write out desired data */
1826 this->cmdfunc (mtd, NAND_CMD_SEQIN, mtd->oobblock + column, page & this->pagemask);
1827 /* write data */
1828 this->write_buf(mtd, buf, len);
1829 }
1830 /* Send command to program the OOB data */
1831 this->cmdfunc (mtd, NAND_CMD_PAGEPROG, -1, -1);
1832 1619
1833 status = this->waitfunc (mtd, this, FL_WRITING); 1620 realpage = (int)(to >> chip->page_shift);
1621 page = realpage & chip->pagemask;
1622 blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
1834 1623
1835 /* See if device thinks it succeeded */ 1624 /* Invalidate the page cache, when we write to the cached page */
1836 if (status & NAND_STATUS_FAIL) { 1625 if (to <= (chip->pagebuf << chip->page_shift) &&
1837 DEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: " "Failed write, page 0x%08x\n", page); 1626 (chip->pagebuf << chip->page_shift) < (to + ops->len))
1838 ret = -EIO; 1627 chip->pagebuf = -1;
1839 goto out;
1840 }
1841 /* Return happy */
1842 *retlen = len;
1843 1628
1844#ifdef CONFIG_MTD_NAND_VERIFY_WRITE 1629 chip->oob_poi = chip->buffers.oobwbuf;
1845 /* Send command to read back the data */
1846 this->cmdfunc (mtd, NAND_CMD_READOOB, column, page & this->pagemask);
1847 1630
1848 if (this->verify_buf(mtd, buf, len)) { 1631 while(1) {
1849 DEBUG (MTD_DEBUG_LEVEL0, "nand_write_oob: " "Failed write verify, page 0x%08x\n", page); 1632 int cached = writelen > bytes && page != blockmask;
1850 ret = -EIO; 1633
1851 goto out; 1634 if (unlikely(oob))
1635 oob = nand_fill_oob(chip, oob, ops);
1636
1637 ret = nand_write_page(mtd, chip, buf, page, cached);
1638 if (ret)
1639 break;
1640
1641 writelen -= bytes;
1642 if (!writelen)
1643 break;
1644
1645 buf += bytes;
1646 realpage++;
1647
1648 page = realpage & chip->pagemask;
1649 /* Check, if we cross a chip boundary */
1650 if (!page) {
1651 chipnr++;
1652 chip->select_chip(mtd, -1);
1653 chip->select_chip(mtd, chipnr);
1654 }
1852 } 1655 }
1853#endif
1854 ret = 0;
1855out:
1856 /* Deselect and wake up anyone waiting on the device */
1857 nand_release_device(mtd);
1858 1656
1657 if (unlikely(oob))
1658 memset(chip->oob_poi, 0xff, mtd->oobsize);
1659
1660 ops->retlen = ops->len - writelen;
1859 return ret; 1661 return ret;
1860} 1662}
1861 1663
1862
1863/** 1664/**
1864 * nand_writev - [MTD Interface] compabilty function for nand_writev_ecc 1665 * nand_write - [MTD Interface] NAND write with ECC
1865 * @mtd: MTD device structure 1666 * @mtd: MTD device structure
1866 * @vecs: the iovectors to write
1867 * @count: number of vectors
1868 * @to: offset to write to 1667 * @to: offset to write to
1668 * @len: number of bytes to write
1869 * @retlen: pointer to variable to store the number of written bytes 1669 * @retlen: pointer to variable to store the number of written bytes
1670 * @buf: the data to write
1870 * 1671 *
1871 * NAND write with kvec. This just calls the ecc function 1672 * NAND write with ECC
1872 */ 1673 */
1873static int nand_writev (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, 1674static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
1874 loff_t to, size_t * retlen) 1675 size_t *retlen, const uint8_t *buf)
1875{ 1676{
1876 return (nand_writev_ecc (mtd, vecs, count, to, retlen, NULL, NULL)); 1677 struct nand_chip *chip = mtd->priv;
1678 int ret;
1679
1680 /* Do not allow reads past end of device */
1681 if ((to + len) > mtd->size)
1682 return -EINVAL;
1683 if (!len)
1684 return 0;
1685
1686 nand_get_device(chip, mtd, FL_WRITING);
1687
1688 chip->ops.len = len;
1689 chip->ops.datbuf = (uint8_t *)buf;
1690 chip->ops.oobbuf = NULL;
1691
1692 ret = nand_do_write_ops(mtd, to, &chip->ops);
1693
1694 nand_release_device(mtd);
1695
1696 *retlen = chip->ops.retlen;
1697 return ret;
1877} 1698}
1878 1699
1879/** 1700/**
1880 * nand_writev_ecc - [MTD Interface] write with iovec with ecc 1701 * nand_do_write_oob - [MTD Interface] NAND write out-of-band
1881 * @mtd: MTD device structure 1702 * @mtd: MTD device structure
1882 * @vecs: the iovectors to write
1883 * @count: number of vectors
1884 * @to: offset to write to 1703 * @to: offset to write to
1885 * @retlen: pointer to variable to store the number of written bytes 1704 * @ops: oob operation description structure
1886 * @eccbuf: filesystem supplied oob data buffer
1887 * @oobsel: oob selection structure
1888 * 1705 *
1889 * NAND write with iovec with ecc 1706 * NAND write out-of-band
1890 */ 1707 */
1891static int nand_writev_ecc (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, 1708static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
1892 loff_t to, size_t * retlen, u_char *eccbuf, struct nand_oobinfo *oobsel) 1709 struct mtd_oob_ops *ops)
1893{ 1710{
1894 int i, page, len, total_len, ret = -EIO, written = 0, chipnr; 1711 int chipnr, page, status;
1895 int oob, numpages, autoplace = 0, startpage; 1712 struct nand_chip *chip = mtd->priv;
1896 struct nand_chip *this = mtd->priv;
1897 int ppblock = (1 << (this->phys_erase_shift - this->page_shift));
1898 u_char *oobbuf, *bufstart;
1899
1900 /* Preset written len for early exit */
1901 *retlen = 0;
1902
1903 /* Calculate total length of data */
1904 total_len = 0;
1905 for (i = 0; i < count; i++)
1906 total_len += (int) vecs[i].iov_len;
1907 1713
1908 DEBUG (MTD_DEBUG_LEVEL3, 1714 DEBUG(MTD_DEBUG_LEVEL3, "nand_write_oob: to = 0x%08x, len = %i\n",
1909 "nand_writev: to = 0x%08x, len = %i, count = %ld\n", (unsigned int) to, (unsigned int) total_len, count); 1715 (unsigned int)to, (int)ops->len);
1910 1716
1911 /* Do not allow write past end of page */ 1717 /* Do not allow write past end of page */
1912 if ((to + total_len) > mtd->size) { 1718 if ((ops->ooboffs + ops->len) > mtd->oobsize) {
1913 DEBUG (MTD_DEBUG_LEVEL0, "nand_writev: Attempted write past end of device\n"); 1719 DEBUG(MTD_DEBUG_LEVEL0, "nand_write_oob: "
1720 "Attempt to write past end of page\n");
1914 return -EINVAL; 1721 return -EINVAL;
1915 } 1722 }
1916 1723
1917 /* reject writes, which are not page aligned */ 1724 chipnr = (int)(to >> chip->chip_shift);
1918 if (NOTALIGNED (to) || NOTALIGNED(total_len)) { 1725 chip->select_chip(mtd, chipnr);
1919 printk (KERN_NOTICE "nand_write_ecc: Attempt to write not page aligned data\n");
1920 return -EINVAL;
1921 }
1922 1726
1923 /* Grab the lock and see if the device is available */ 1727 /* Shift to get page */
1924 nand_get_device (this, mtd, FL_WRITING); 1728 page = (int)(to >> chip->page_shift);
1925 1729
1926 /* Get the current chip-nr */ 1730 /*
1927 chipnr = (int) (to >> this->chip_shift); 1731 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
1928 /* Select the NAND device */ 1732 * of my DiskOnChip 2000 test units) will clear the whole data page too
1929 this->select_chip(mtd, chipnr); 1733 * if we don't do this. I have no clue why, but I seem to have 'fixed'
1734 * it in the doc2000 driver in August 1999. dwmw2.
1735 */
1736 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
1930 1737
1931 /* Check, if it is write protected */ 1738 /* Check, if it is write protected */
1932 if (nand_check_wp(mtd)) 1739 if (nand_check_wp(mtd))
1933 goto out; 1740 return -EROFS;
1934 1741
1935 /* if oobsel is NULL, use chip defaults */ 1742 /* Invalidate the page cache, if we write to the cached page */
1936 if (oobsel == NULL) 1743 if (page == chip->pagebuf)
1937 oobsel = &mtd->oobinfo; 1744 chip->pagebuf = -1;
1938 1745
1939 /* Autoplace of oob data ? Use the default placement scheme */ 1746 chip->oob_poi = chip->buffers.oobwbuf;
1940 if (oobsel->useecc == MTD_NANDECC_AUTOPLACE) { 1747 memset(chip->oob_poi, 0xff, mtd->oobsize);
1941 oobsel = this->autooob; 1748 nand_fill_oob(chip, ops->oobbuf, ops);
1942 autoplace = 1; 1749 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
1943 } 1750 memset(chip->oob_poi, 0xff, mtd->oobsize);
1944 if (oobsel->useecc == MTD_NANDECC_AUTOPL_USR)
1945 autoplace = 1;
1946 1751
1947 /* Setup start page */ 1752 if (status)
1948 page = (int) (to >> this->page_shift); 1753 return status;
1949 /* Invalidate the page cache, if we write to the cached page */
1950 if (page <= this->pagebuf && this->pagebuf < ((to + total_len) >> this->page_shift))
1951 this->pagebuf = -1;
1952 1754
1953 startpage = page & this->pagemask; 1755 ops->retlen = ops->len;
1954 1756
1955 /* Loop until all kvec' data has been written */ 1757 return 0;
1956 len = 0; 1758}
1957 while (count) {
1958 /* If the given tuple is >= pagesize then
1959 * write it out from the iov
1960 */
1961 if ((vecs->iov_len - len) >= mtd->oobblock) {
1962 /* Calc number of pages we can write
1963 * out of this iov in one go */
1964 numpages = (vecs->iov_len - len) >> this->page_shift;
1965 /* Do not cross block boundaries */
1966 numpages = min (ppblock - (startpage & (ppblock - 1)), numpages);
1967 oobbuf = nand_prepare_oobbuf (mtd, NULL, oobsel, autoplace, numpages);
1968 bufstart = (u_char *)vecs->iov_base;
1969 bufstart += len;
1970 this->data_poi = bufstart;
1971 oob = 0;
1972 for (i = 1; i <= numpages; i++) {
1973 /* Write one page. If this is the last page to write
1974 * then use the real pageprogram command, else select
1975 * cached programming if supported by the chip.
1976 */
1977 ret = nand_write_page (mtd, this, page & this->pagemask,
1978 &oobbuf[oob], oobsel, i != numpages);
1979 if (ret)
1980 goto out;
1981 this->data_poi += mtd->oobblock;
1982 len += mtd->oobblock;
1983 oob += mtd->oobsize;
1984 page++;
1985 }
1986 /* Check, if we have to switch to the next tuple */
1987 if (len >= (int) vecs->iov_len) {
1988 vecs++;
1989 len = 0;
1990 count--;
1991 }
1992 } else {
1993 /* We must use the internal buffer, read data out of each
1994 * tuple until we have a full page to write
1995 */
1996 int cnt = 0;
1997 while (cnt < mtd->oobblock) {
1998 if (vecs->iov_base != NULL && vecs->iov_len)
1999 this->data_buf[cnt++] = ((u_char *) vecs->iov_base)[len++];
2000 /* Check, if we have to switch to the next tuple */
2001 if (len >= (int) vecs->iov_len) {
2002 vecs++;
2003 len = 0;
2004 count--;
2005 }
2006 }
2007 this->pagebuf = page;
2008 this->data_poi = this->data_buf;
2009 bufstart = this->data_poi;
2010 numpages = 1;
2011 oobbuf = nand_prepare_oobbuf (mtd, NULL, oobsel, autoplace, numpages);
2012 ret = nand_write_page (mtd, this, page & this->pagemask,
2013 oobbuf, oobsel, 0);
2014 if (ret)
2015 goto out;
2016 page++;
2017 }
2018 1759
2019 this->data_poi = bufstart; 1760/**
2020 ret = nand_verify_pages (mtd, this, startpage, numpages, oobbuf, oobsel, chipnr, 0); 1761 * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
2021 if (ret) 1762 * @mtd: MTD device structure
2022 goto out; 1763 * @from: offset to read from
1764 * @ops: oob operation description structure
1765 */
1766static int nand_write_oob(struct mtd_info *mtd, loff_t to,
1767 struct mtd_oob_ops *ops)
1768{
1769 void (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
1770 const uint8_t *buf) = NULL;
1771 struct nand_chip *chip = mtd->priv;
1772 int ret = -ENOTSUPP;
2023 1773
2024 written += mtd->oobblock * numpages; 1774 ops->retlen = 0;
2025 /* All done ? */
2026 if (!count)
2027 break;
2028 1775
2029 startpage = page & this->pagemask; 1776 /* Do not allow writes past end of device */
2030 /* Check, if we cross a chip boundary */ 1777 if ((to + ops->len) > mtd->size) {
2031 if (!startpage) { 1778 DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: "
2032 chipnr++; 1779 "Attempt read beyond end of device\n");
2033 this->select_chip(mtd, -1); 1780 return -EINVAL;
2034 this->select_chip(mtd, chipnr);
2035 }
2036 } 1781 }
2037 ret = 0;
2038out:
2039 /* Deselect and wake up anyone waiting on the device */
2040 nand_release_device(mtd);
2041 1782
2042 *retlen = written; 1783 nand_get_device(chip, mtd, FL_WRITING);
1784
1785 switch(ops->mode) {
1786 case MTD_OOB_PLACE:
1787 case MTD_OOB_AUTO:
1788 break;
1789
1790 case MTD_OOB_RAW:
1791 /* Replace the write_page algorithm temporary */
1792 write_page = chip->ecc.write_page;
1793 chip->ecc.write_page = nand_write_page_raw;
1794 break;
1795
1796 default:
1797 goto out;
1798 }
1799
1800 if (!ops->datbuf)
1801 ret = nand_do_write_oob(mtd, to, ops);
1802 else
1803 ret = nand_do_write_ops(mtd, to, ops);
1804
1805 if (unlikely(ops->mode == MTD_OOB_RAW))
1806 chip->ecc.write_page = write_page;
1807 out:
1808 nand_release_device(mtd);
2043 return ret; 1809 return ret;
2044} 1810}
2045 1811
@@ -2050,12 +1816,12 @@ out:
2050 * 1816 *
2051 * Standard erase command for NAND chips 1817 * Standard erase command for NAND chips
2052 */ 1818 */
2053static void single_erase_cmd (struct mtd_info *mtd, int page) 1819static void single_erase_cmd(struct mtd_info *mtd, int page)
2054{ 1820{
2055 struct nand_chip *this = mtd->priv; 1821 struct nand_chip *chip = mtd->priv;
2056 /* Send commands to erase a block */ 1822 /* Send commands to erase a block */
2057 this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page); 1823 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
2058 this->cmdfunc (mtd, NAND_CMD_ERASE2, -1, -1); 1824 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
2059} 1825}
2060 1826
2061/** 1827/**
@@ -2066,15 +1832,15 @@ static void single_erase_cmd (struct mtd_info *mtd, int page)
2066 * AND multi block erase command function 1832 * AND multi block erase command function
2067 * Erase 4 consecutive blocks 1833 * Erase 4 consecutive blocks
2068 */ 1834 */
2069static void multi_erase_cmd (struct mtd_info *mtd, int page) 1835static void multi_erase_cmd(struct mtd_info *mtd, int page)
2070{ 1836{
2071 struct nand_chip *this = mtd->priv; 1837 struct nand_chip *chip = mtd->priv;
2072 /* Send commands to erase a block */ 1838 /* Send commands to erase a block */
2073 this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page++); 1839 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
2074 this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page++); 1840 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
2075 this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page++); 1841 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++);
2076 this->cmdfunc (mtd, NAND_CMD_ERASE1, -1, page); 1842 chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
2077 this->cmdfunc (mtd, NAND_CMD_ERASE2, -1, -1); 1843 chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
2078} 1844}
2079 1845
2080/** 1846/**
@@ -2084,79 +1850,82 @@ static void multi_erase_cmd (struct mtd_info *mtd, int page)
2084 * 1850 *
2085 * Erase one ore more blocks 1851 * Erase one ore more blocks
2086 */ 1852 */
2087static int nand_erase (struct mtd_info *mtd, struct erase_info *instr) 1853static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
2088{ 1854{
2089 return nand_erase_nand (mtd, instr, 0); 1855 return nand_erase_nand(mtd, instr, 0);
2090} 1856}
2091 1857
2092#define BBT_PAGE_MASK 0xffffff3f 1858#define BBT_PAGE_MASK 0xffffff3f
2093/** 1859/**
2094 * nand_erase_intern - [NAND Interface] erase block(s) 1860 * nand_erase_nand - [Internal] erase block(s)
2095 * @mtd: MTD device structure 1861 * @mtd: MTD device structure
2096 * @instr: erase instruction 1862 * @instr: erase instruction
2097 * @allowbbt: allow erasing the bbt area 1863 * @allowbbt: allow erasing the bbt area
2098 * 1864 *
2099 * Erase one ore more blocks 1865 * Erase one ore more blocks
2100 */ 1866 */
2101int nand_erase_nand (struct mtd_info *mtd, struct erase_info *instr, int allowbbt) 1867int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
1868 int allowbbt)
2102{ 1869{
2103 int page, len, status, pages_per_block, ret, chipnr; 1870 int page, len, status, pages_per_block, ret, chipnr;
2104 struct nand_chip *this = mtd->priv; 1871 struct nand_chip *chip = mtd->priv;
2105 int rewrite_bbt[NAND_MAX_CHIPS]={0}; /* flags to indicate the page, if bbt needs to be rewritten. */ 1872 int rewrite_bbt[NAND_MAX_CHIPS]={0};
2106 unsigned int bbt_masked_page; /* bbt mask to compare to page being erased. */ 1873 unsigned int bbt_masked_page = 0xffffffff;
2107 /* It is used to see if the current page is in the same */
2108 /* 256 block group and the same bank as the bbt. */
2109 1874
2110 DEBUG (MTD_DEBUG_LEVEL3, 1875 DEBUG(MTD_DEBUG_LEVEL3, "nand_erase: start = 0x%08x, len = %i\n",
2111 "nand_erase: start = 0x%08x, len = %i\n", (unsigned int) instr->addr, (unsigned int) instr->len); 1876 (unsigned int)instr->addr, (unsigned int)instr->len);
2112 1877
2113 /* Start address must align on block boundary */ 1878 /* Start address must align on block boundary */
2114 if (instr->addr & ((1 << this->phys_erase_shift) - 1)) { 1879 if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) {
2115 DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Unaligned address\n"); 1880 DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: Unaligned address\n");
2116 return -EINVAL; 1881 return -EINVAL;
2117 } 1882 }
2118 1883
2119 /* Length must align on block boundary */ 1884 /* Length must align on block boundary */
2120 if (instr->len & ((1 << this->phys_erase_shift) - 1)) { 1885 if (instr->len & ((1 << chip->phys_erase_shift) - 1)) {
2121 DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Length not block aligned\n"); 1886 DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: "
1887 "Length not block aligned\n");
2122 return -EINVAL; 1888 return -EINVAL;
2123 } 1889 }
2124 1890
2125 /* Do not allow erase past end of device */ 1891 /* Do not allow erase past end of device */
2126 if ((instr->len + instr->addr) > mtd->size) { 1892 if ((instr->len + instr->addr) > mtd->size) {
2127 DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Erase past end of device\n"); 1893 DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: "
1894 "Erase past end of device\n");
2128 return -EINVAL; 1895 return -EINVAL;
2129 } 1896 }
2130 1897
2131 instr->fail_addr = 0xffffffff; 1898 instr->fail_addr = 0xffffffff;
2132 1899
2133 /* Grab the lock and see if the device is available */ 1900 /* Grab the lock and see if the device is available */
2134 nand_get_device (this, mtd, FL_ERASING); 1901 nand_get_device(chip, mtd, FL_ERASING);
2135 1902
2136 /* Shift to get first page */ 1903 /* Shift to get first page */
2137 page = (int) (instr->addr >> this->page_shift); 1904 page = (int)(instr->addr >> chip->page_shift);
2138 chipnr = (int) (instr->addr >> this->chip_shift); 1905 chipnr = (int)(instr->addr >> chip->chip_shift);
2139 1906
2140 /* Calculate pages in each block */ 1907 /* Calculate pages in each block */
2141 pages_per_block = 1 << (this->phys_erase_shift - this->page_shift); 1908 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
2142 1909
2143 /* Select the NAND device */ 1910 /* Select the NAND device */
2144 this->select_chip(mtd, chipnr); 1911 chip->select_chip(mtd, chipnr);
2145 1912
2146 /* Check the WP bit */
2147 /* Check, if it is write protected */ 1913 /* Check, if it is write protected */
2148 if (nand_check_wp(mtd)) { 1914 if (nand_check_wp(mtd)) {
2149 DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: Device is write protected!!!\n"); 1915 DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: "
1916 "Device is write protected!!!\n");
2150 instr->state = MTD_ERASE_FAILED; 1917 instr->state = MTD_ERASE_FAILED;
2151 goto erase_exit; 1918 goto erase_exit;
2152 } 1919 }
2153 1920
2154 /* if BBT requires refresh, set the BBT page mask to see if the BBT should be rewritten */ 1921 /*
2155 if (this->options & BBT_AUTO_REFRESH) { 1922 * If BBT requires refresh, set the BBT page mask to see if the BBT
2156 bbt_masked_page = this->bbt_td->pages[chipnr] & BBT_PAGE_MASK; 1923 * should be rewritten. Otherwise the mask is set to 0xffffffff which
2157 } else { 1924 * can not be matched. This is also done when the bbt is actually
2158 bbt_masked_page = 0xffffffff; /* should not match anything */ 1925 * erased to avoid recusrsive updates
2159 } 1926 */
1927 if (chip->options & BBT_AUTO_REFRESH && !allowbbt)
1928 bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK;
2160 1929
2161 /* Loop through the pages */ 1930 /* Loop through the pages */
2162 len = instr->len; 1931 len = instr->len;
@@ -2164,64 +1933,77 @@ int nand_erase_nand (struct mtd_info *mtd, struct erase_info *instr, int allowbb
2164 instr->state = MTD_ERASING; 1933 instr->state = MTD_ERASING;
2165 1934
2166 while (len) { 1935 while (len) {
2167 /* Check if we have a bad block, we do not erase bad blocks ! */ 1936 /*
2168 if (nand_block_checkbad(mtd, ((loff_t) page) << this->page_shift, 0, allowbbt)) { 1937 * heck if we have a bad block, we do not erase bad blocks !
2169 printk (KERN_WARNING "nand_erase: attempt to erase a bad block at page 0x%08x\n", page); 1938 */
1939 if (nand_block_checkbad(mtd, ((loff_t) page) <<
1940 chip->page_shift, 0, allowbbt)) {
1941 printk(KERN_WARNING "nand_erase: attempt to erase a "
1942 "bad block at page 0x%08x\n", page);
2170 instr->state = MTD_ERASE_FAILED; 1943 instr->state = MTD_ERASE_FAILED;
2171 goto erase_exit; 1944 goto erase_exit;
2172 } 1945 }
2173 1946
2174 /* Invalidate the page cache, if we erase the block which contains 1947 /*
2175 the current cached page */ 1948 * Invalidate the page cache, if we erase the block which
2176 if (page <= this->pagebuf && this->pagebuf < (page + pages_per_block)) 1949 * contains the current cached page
2177 this->pagebuf = -1; 1950 */
1951 if (page <= chip->pagebuf && chip->pagebuf <
1952 (page + pages_per_block))
1953 chip->pagebuf = -1;
2178 1954
2179 this->erase_cmd (mtd, page & this->pagemask); 1955 chip->erase_cmd(mtd, page & chip->pagemask);
2180 1956
2181 status = this->waitfunc (mtd, this, FL_ERASING); 1957 status = chip->waitfunc(mtd, chip);
2182 1958
2183 /* See if operation failed and additional status checks are available */ 1959 /*
2184 if ((status & NAND_STATUS_FAIL) && (this->errstat)) { 1960 * See if operation failed and additional status checks are
2185 status = this->errstat(mtd, this, FL_ERASING, status, page); 1961 * available
2186 } 1962 */
1963 if ((status & NAND_STATUS_FAIL) && (chip->errstat))
1964 status = chip->errstat(mtd, chip, FL_ERASING,
1965 status, page);
2187 1966
2188 /* See if block erase succeeded */ 1967 /* See if block erase succeeded */
2189 if (status & NAND_STATUS_FAIL) { 1968 if (status & NAND_STATUS_FAIL) {
2190 DEBUG (MTD_DEBUG_LEVEL0, "nand_erase: " "Failed erase, page 0x%08x\n", page); 1969 DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: "
1970 "Failed erase, page 0x%08x\n", page);
2191 instr->state = MTD_ERASE_FAILED; 1971 instr->state = MTD_ERASE_FAILED;
2192 instr->fail_addr = (page << this->page_shift); 1972 instr->fail_addr = (page << chip->page_shift);
2193 goto erase_exit; 1973 goto erase_exit;
2194 } 1974 }
2195 1975
2196 /* if BBT requires refresh, set the BBT rewrite flag to the page being erased */ 1976 /*
2197 if (this->options & BBT_AUTO_REFRESH) { 1977 * If BBT requires refresh, set the BBT rewrite flag to the
2198 if (((page & BBT_PAGE_MASK) == bbt_masked_page) && 1978 * page being erased
2199 (page != this->bbt_td->pages[chipnr])) { 1979 */
2200 rewrite_bbt[chipnr] = (page << this->page_shift); 1980 if (bbt_masked_page != 0xffffffff &&
2201 } 1981 (page & BBT_PAGE_MASK) == bbt_masked_page)
2202 } 1982 rewrite_bbt[chipnr] = (page << chip->page_shift);
2203 1983
2204 /* Increment page address and decrement length */ 1984 /* Increment page address and decrement length */
2205 len -= (1 << this->phys_erase_shift); 1985 len -= (1 << chip->phys_erase_shift);
2206 page += pages_per_block; 1986 page += pages_per_block;
2207 1987
2208 /* Check, if we cross a chip boundary */ 1988 /* Check, if we cross a chip boundary */
2209 if (len && !(page & this->pagemask)) { 1989 if (len && !(page & chip->pagemask)) {
2210 chipnr++; 1990 chipnr++;
2211 this->select_chip(mtd, -1); 1991 chip->select_chip(mtd, -1);
2212 this->select_chip(mtd, chipnr); 1992 chip->select_chip(mtd, chipnr);
2213
2214 /* if BBT requires refresh and BBT-PERCHIP,
2215 * set the BBT page mask to see if this BBT should be rewritten */
2216 if ((this->options & BBT_AUTO_REFRESH) && (this->bbt_td->options & NAND_BBT_PERCHIP)) {
2217 bbt_masked_page = this->bbt_td->pages[chipnr] & BBT_PAGE_MASK;
2218 }
2219 1993
1994 /*
1995 * If BBT requires refresh and BBT-PERCHIP, set the BBT
1996 * page mask to see if this BBT should be rewritten
1997 */
1998 if (bbt_masked_page != 0xffffffff &&
1999 (chip->bbt_td->options & NAND_BBT_PERCHIP))
2000 bbt_masked_page = chip->bbt_td->pages[chipnr] &
2001 BBT_PAGE_MASK;
2220 } 2002 }
2221 } 2003 }
2222 instr->state = MTD_ERASE_DONE; 2004 instr->state = MTD_ERASE_DONE;
2223 2005
2224erase_exit: 2006 erase_exit:
2225 2007
2226 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO; 2008 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
2227 /* Do call back function */ 2009 /* Do call back function */
@@ -2231,16 +2013,21 @@ erase_exit:
2231 /* Deselect and wake up anyone waiting on the device */ 2013 /* Deselect and wake up anyone waiting on the device */
2232 nand_release_device(mtd); 2014 nand_release_device(mtd);
2233 2015
2234 /* if BBT requires refresh and erase was successful, rewrite any selected bad block tables */ 2016 /*
2235 if ((this->options & BBT_AUTO_REFRESH) && (!ret)) { 2017 * If BBT requires refresh and erase was successful, rewrite any
2236 for (chipnr = 0; chipnr < this->numchips; chipnr++) { 2018 * selected bad block tables
2237 if (rewrite_bbt[chipnr]) { 2019 */
2238 /* update the BBT for chip */ 2020 if (bbt_masked_page == 0xffffffff || ret)
2239 DEBUG (MTD_DEBUG_LEVEL0, "nand_erase_nand: nand_update_bbt (%d:0x%0x 0x%0x)\n", 2021 return ret;
2240 chipnr, rewrite_bbt[chipnr], this->bbt_td->pages[chipnr]); 2022
2241 nand_update_bbt (mtd, rewrite_bbt[chipnr]); 2023 for (chipnr = 0; chipnr < chip->numchips; chipnr++) {
2242 } 2024 if (!rewrite_bbt[chipnr])
2243 } 2025 continue;
2026 /* update the BBT for chip */
2027 DEBUG(MTD_DEBUG_LEVEL0, "nand_erase_nand: nand_update_bbt "
2028 "(%d:0x%0x 0x%0x)\n", chipnr, rewrite_bbt[chipnr],
2029 chip->bbt_td->pages[chipnr]);
2030 nand_update_bbt(mtd, rewrite_bbt[chipnr]);
2244 } 2031 }
2245 2032
2246 /* Return more or less happy */ 2033 /* Return more or less happy */
@@ -2253,51 +2040,50 @@ erase_exit:
2253 * 2040 *
2254 * Sync is actually a wait for chip ready function 2041 * Sync is actually a wait for chip ready function
2255 */ 2042 */
2256static void nand_sync (struct mtd_info *mtd) 2043static void nand_sync(struct mtd_info *mtd)
2257{ 2044{
2258 struct nand_chip *this = mtd->priv; 2045 struct nand_chip *chip = mtd->priv;
2259 2046
2260 DEBUG (MTD_DEBUG_LEVEL3, "nand_sync: called\n"); 2047 DEBUG(MTD_DEBUG_LEVEL3, "nand_sync: called\n");
2261 2048
2262 /* Grab the lock and see if the device is available */ 2049 /* Grab the lock and see if the device is available */
2263 nand_get_device (this, mtd, FL_SYNCING); 2050 nand_get_device(chip, mtd, FL_SYNCING);
2264 /* Release it and go back */ 2051 /* Release it and go back */
2265 nand_release_device (mtd); 2052 nand_release_device(mtd);
2266} 2053}
2267 2054
2268
2269/** 2055/**
2270 * nand_block_isbad - [MTD Interface] Check whether the block at the given offset is bad 2056 * nand_block_isbad - [MTD Interface] Check if block at offset is bad
2271 * @mtd: MTD device structure 2057 * @mtd: MTD device structure
2272 * @ofs: offset relative to mtd start 2058 * @ofs: offset relative to mtd start
2273 */ 2059 */
2274static int nand_block_isbad (struct mtd_info *mtd, loff_t ofs) 2060static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
2275{ 2061{
2276 /* Check for invalid offset */ 2062 /* Check for invalid offset */
2277 if (ofs > mtd->size) 2063 if (offs > mtd->size)
2278 return -EINVAL; 2064 return -EINVAL;
2279 2065
2280 return nand_block_checkbad (mtd, ofs, 1, 0); 2066 return nand_block_checkbad(mtd, offs, 1, 0);
2281} 2067}
2282 2068
2283/** 2069/**
2284 * nand_block_markbad - [MTD Interface] Mark the block at the given offset as bad 2070 * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
2285 * @mtd: MTD device structure 2071 * @mtd: MTD device structure
2286 * @ofs: offset relative to mtd start 2072 * @ofs: offset relative to mtd start
2287 */ 2073 */
2288static int nand_block_markbad (struct mtd_info *mtd, loff_t ofs) 2074static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
2289{ 2075{
2290 struct nand_chip *this = mtd->priv; 2076 struct nand_chip *chip = mtd->priv;
2291 int ret; 2077 int ret;
2292 2078
2293 if ((ret = nand_block_isbad(mtd, ofs))) { 2079 if ((ret = nand_block_isbad(mtd, ofs))) {
2294 /* If it was bad already, return success and do nothing. */ 2080 /* If it was bad already, return success and do nothing. */
2295 if (ret > 0) 2081 if (ret > 0)
2296 return 0; 2082 return 0;
2297 return ret; 2083 return ret;
2298 } 2084 }
2299 2085
2300 return this->block_markbad(mtd, ofs); 2086 return chip->block_markbad(mtd, ofs);
2301} 2087}
2302 2088
2303/** 2089/**
@@ -2306,9 +2092,9 @@ static int nand_block_markbad (struct mtd_info *mtd, loff_t ofs)
2306 */ 2092 */
2307static int nand_suspend(struct mtd_info *mtd) 2093static int nand_suspend(struct mtd_info *mtd)
2308{ 2094{
2309 struct nand_chip *this = mtd->priv; 2095 struct nand_chip *chip = mtd->priv;
2310 2096
2311 return nand_get_device (this, mtd, FL_PM_SUSPENDED); 2097 return nand_get_device(chip, mtd, FL_PM_SUSPENDED);
2312} 2098}
2313 2099
2314/** 2100/**
@@ -2317,373 +2103,385 @@ static int nand_suspend(struct mtd_info *mtd)
2317 */ 2103 */
2318static void nand_resume(struct mtd_info *mtd) 2104static void nand_resume(struct mtd_info *mtd)
2319{ 2105{
2320 struct nand_chip *this = mtd->priv; 2106 struct nand_chip *chip = mtd->priv;
2321 2107
2322 if (this->state == FL_PM_SUSPENDED) 2108 if (chip->state == FL_PM_SUSPENDED)
2323 nand_release_device(mtd); 2109 nand_release_device(mtd);
2324 else 2110 else
2325 printk(KERN_ERR "resume() called for the chip which is not " 2111 printk(KERN_ERR "nand_resume() called for a chip which is not "
2326 "in suspended state\n"); 2112 "in suspended state\n");
2327
2328} 2113}
2329 2114
2330 2115/*
2331/** 2116 * Set default functions
2332 * nand_scan - [NAND Interface] Scan for the NAND device
2333 * @mtd: MTD device structure
2334 * @maxchips: Number of chips to scan for
2335 *
2336 * This fills out all the not initialized function pointers
2337 * with the defaults.
2338 * The flash ID is read and the mtd/chip structures are
2339 * filled with the appropriate values. Buffers are allocated if
2340 * they are not provided by the board driver
2341 *
2342 */ 2117 */
2343int nand_scan (struct mtd_info *mtd, int maxchips) 2118static void nand_set_defaults(struct nand_chip *chip, int busw)
2344{ 2119{
2345 int i, nand_maf_id, nand_dev_id, busw, maf_id;
2346 struct nand_chip *this = mtd->priv;
2347
2348 /* Get buswidth to select the correct functions*/
2349 busw = this->options & NAND_BUSWIDTH_16;
2350
2351 /* check for proper chip_delay setup, set 20us if not */ 2120 /* check for proper chip_delay setup, set 20us if not */
2352 if (!this->chip_delay) 2121 if (!chip->chip_delay)
2353 this->chip_delay = 20; 2122 chip->chip_delay = 20;
2354 2123
2355 /* check, if a user supplied command function given */ 2124 /* check, if a user supplied command function given */
2356 if (this->cmdfunc == NULL) 2125 if (chip->cmdfunc == NULL)
2357 this->cmdfunc = nand_command; 2126 chip->cmdfunc = nand_command;
2358 2127
2359 /* check, if a user supplied wait function given */ 2128 /* check, if a user supplied wait function given */
2360 if (this->waitfunc == NULL) 2129 if (chip->waitfunc == NULL)
2361 this->waitfunc = nand_wait; 2130 chip->waitfunc = nand_wait;
2362 2131
2363 if (!this->select_chip) 2132 if (!chip->select_chip)
2364 this->select_chip = nand_select_chip; 2133 chip->select_chip = nand_select_chip;
2365 if (!this->write_byte) 2134 if (!chip->read_byte)
2366 this->write_byte = busw ? nand_write_byte16 : nand_write_byte; 2135 chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
2367 if (!this->read_byte) 2136 if (!chip->read_word)
2368 this->read_byte = busw ? nand_read_byte16 : nand_read_byte; 2137 chip->read_word = nand_read_word;
2369 if (!this->write_word) 2138 if (!chip->block_bad)
2370 this->write_word = nand_write_word; 2139 chip->block_bad = nand_block_bad;
2371 if (!this->read_word) 2140 if (!chip->block_markbad)
2372 this->read_word = nand_read_word; 2141 chip->block_markbad = nand_default_block_markbad;
2373 if (!this->block_bad) 2142 if (!chip->write_buf)
2374 this->block_bad = nand_block_bad; 2143 chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
2375 if (!this->block_markbad) 2144 if (!chip->read_buf)
2376 this->block_markbad = nand_default_block_markbad; 2145 chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
2377 if (!this->write_buf) 2146 if (!chip->verify_buf)
2378 this->write_buf = busw ? nand_write_buf16 : nand_write_buf; 2147 chip->verify_buf = busw ? nand_verify_buf16 : nand_verify_buf;
2379 if (!this->read_buf) 2148 if (!chip->scan_bbt)
2380 this->read_buf = busw ? nand_read_buf16 : nand_read_buf; 2149 chip->scan_bbt = nand_default_bbt;
2381 if (!this->verify_buf) 2150
2382 this->verify_buf = busw ? nand_verify_buf16 : nand_verify_buf; 2151 if (!chip->controller) {
2383 if (!this->scan_bbt) 2152 chip->controller = &chip->hwcontrol;
2384 this->scan_bbt = nand_default_bbt; 2153 spin_lock_init(&chip->controller->lock);
2154 init_waitqueue_head(&chip->controller->wq);
2155 }
2156
2157}
2158
2159/*
2160 * Get the flash and manufacturer id and lookup if the type is supported
2161 */
2162static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
2163 struct nand_chip *chip,
2164 int busw, int *maf_id)
2165{
2166 struct nand_flash_dev *type = NULL;
2167 int i, dev_id, maf_idx;
2385 2168
2386 /* Select the device */ 2169 /* Select the device */
2387 this->select_chip(mtd, 0); 2170 chip->select_chip(mtd, 0);
2388 2171
2389 /* Send the command for reading device ID */ 2172 /* Send the command for reading device ID */
2390 this->cmdfunc (mtd, NAND_CMD_READID, 0x00, -1); 2173 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
2391 2174
2392 /* Read manufacturer and device IDs */ 2175 /* Read manufacturer and device IDs */
2393 nand_maf_id = this->read_byte(mtd); 2176 *maf_id = chip->read_byte(mtd);
2394 nand_dev_id = this->read_byte(mtd); 2177 dev_id = chip->read_byte(mtd);
2395 2178
2396 /* Print and store flash device information */ 2179 /* Lookup the flash id */
2397 for (i = 0; nand_flash_ids[i].name != NULL; i++) { 2180 for (i = 0; nand_flash_ids[i].name != NULL; i++) {
2181 if (dev_id == nand_flash_ids[i].id) {
2182 type = &nand_flash_ids[i];
2183 break;
2184 }
2185 }
2398 2186
2399 if (nand_dev_id != nand_flash_ids[i].id) 2187 if (!type)
2400 continue; 2188 return ERR_PTR(-ENODEV);
2189
2190 if (!mtd->name)
2191 mtd->name = type->name;
2192
2193 chip->chipsize = type->chipsize << 20;
2194
2195 /* Newer devices have all the information in additional id bytes */
2196 if (!type->pagesize) {
2197 int extid;
2198 /* The 3rd id byte contains non relevant data ATM */
2199 extid = chip->read_byte(mtd);
2200 /* The 4th id byte is the important one */
2201 extid = chip->read_byte(mtd);
2202 /* Calc pagesize */
2203 mtd->writesize = 1024 << (extid & 0x3);
2204 extid >>= 2;
2205 /* Calc oobsize */
2206 mtd->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
2207 extid >>= 2;
2208 /* Calc blocksize. Blocksize is multiples of 64KiB */
2209 mtd->erasesize = (64 * 1024) << (extid & 0x03);
2210 extid >>= 2;
2211 /* Get buswidth information */
2212 busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
2401 2213
2402 if (!mtd->name) mtd->name = nand_flash_ids[i].name; 2214 } else {
2403 this->chipsize = nand_flash_ids[i].chipsize << 20; 2215 /*
2404 2216 * Old devices have chip data hardcoded in the device id table
2405 /* New devices have all the information in additional id bytes */ 2217 */
2406 if (!nand_flash_ids[i].pagesize) { 2218 mtd->erasesize = type->erasesize;
2407 int extid; 2219 mtd->writesize = type->pagesize;
2408 /* The 3rd id byte contains non relevant data ATM */ 2220 mtd->oobsize = mtd->writesize / 32;
2409 extid = this->read_byte(mtd); 2221 busw = type->options & NAND_BUSWIDTH_16;
2410 /* The 4th id byte is the important one */ 2222 }
2411 extid = this->read_byte(mtd);
2412 /* Calc pagesize */
2413 mtd->oobblock = 1024 << (extid & 0x3);
2414 extid >>= 2;
2415 /* Calc oobsize */
2416 mtd->oobsize = (8 << (extid & 0x01)) * (mtd->oobblock >> 9);
2417 extid >>= 2;
2418 /* Calc blocksize. Blocksize is multiples of 64KiB */
2419 mtd->erasesize = (64 * 1024) << (extid & 0x03);
2420 extid >>= 2;
2421 /* Get buswidth information */
2422 busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
2423 2223
2424 } else { 2224 /* Try to identify manufacturer */
2425 /* Old devices have this data hardcoded in the 2225 for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_id++) {
2426 * device id table */ 2226 if (nand_manuf_ids[maf_idx].id == *maf_id)
2427 mtd->erasesize = nand_flash_ids[i].erasesize; 2227 break;
2428 mtd->oobblock = nand_flash_ids[i].pagesize; 2228 }
2429 mtd->oobsize = mtd->oobblock / 32;
2430 busw = nand_flash_ids[i].options & NAND_BUSWIDTH_16;
2431 }
2432 2229
2433 /* Try to identify manufacturer */ 2230 /*
2434 for (maf_id = 0; nand_manuf_ids[maf_id].id != 0x0; maf_id++) { 2231 * Check, if buswidth is correct. Hardware drivers should set
2435 if (nand_manuf_ids[maf_id].id == nand_maf_id) 2232 * chip correct !
2436 break; 2233 */
2437 } 2234 if (busw != (chip->options & NAND_BUSWIDTH_16)) {
2235 printk(KERN_INFO "NAND device: Manufacturer ID:"
2236 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
2237 dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
2238 printk(KERN_WARNING "NAND bus width %d instead %d bit\n",
2239 (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
2240 busw ? 16 : 8);
2241 return ERR_PTR(-EINVAL);
2242 }
2438 2243
2439 /* Check, if buswidth is correct. Hardware drivers should set 2244 /* Calculate the address shift from the page size */
2440 * this correct ! */ 2245 chip->page_shift = ffs(mtd->writesize) - 1;
2441 if (busw != (this->options & NAND_BUSWIDTH_16)) { 2246 /* Convert chipsize to number of pages per chip -1. */
2442 printk (KERN_INFO "NAND device: Manufacturer ID:" 2247 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
2443 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", nand_maf_id, nand_dev_id,
2444 nand_manuf_ids[maf_id].name , mtd->name);
2445 printk (KERN_WARNING
2446 "NAND bus width %d instead %d bit\n",
2447 (this->options & NAND_BUSWIDTH_16) ? 16 : 8,
2448 busw ? 16 : 8);
2449 this->select_chip(mtd, -1);
2450 return 1;
2451 }
2452 2248
2453 /* Calculate the address shift from the page size */ 2249 chip->bbt_erase_shift = chip->phys_erase_shift =
2454 this->page_shift = ffs(mtd->oobblock) - 1; 2250 ffs(mtd->erasesize) - 1;
2455 this->bbt_erase_shift = this->phys_erase_shift = ffs(mtd->erasesize) - 1; 2251 chip->chip_shift = ffs(chip->chipsize) - 1;
2456 this->chip_shift = ffs(this->chipsize) - 1;
2457
2458 /* Set the bad block position */
2459 this->badblockpos = mtd->oobblock > 512 ?
2460 NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
2461
2462 /* Get chip options, preserve non chip based options */
2463 this->options &= ~NAND_CHIPOPTIONS_MSK;
2464 this->options |= nand_flash_ids[i].options & NAND_CHIPOPTIONS_MSK;
2465 /* Set this as a default. Board drivers can override it, if neccecary */
2466 this->options |= NAND_NO_AUTOINCR;
2467 /* Check if this is a not a samsung device. Do not clear the options
2468 * for chips which are not having an extended id.
2469 */
2470 if (nand_maf_id != NAND_MFR_SAMSUNG && !nand_flash_ids[i].pagesize)
2471 this->options &= ~NAND_SAMSUNG_LP_OPTIONS;
2472 2252
2473 /* Check for AND chips with 4 page planes */ 2253 /* Set the bad block position */
2474 if (this->options & NAND_4PAGE_ARRAY) 2254 chip->badblockpos = mtd->writesize > 512 ?
2475 this->erase_cmd = multi_erase_cmd; 2255 NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS;
2476 else
2477 this->erase_cmd = single_erase_cmd;
2478 2256
2479 /* Do not replace user supplied command function ! */ 2257 /* Get chip options, preserve non chip based options */
2480 if (mtd->oobblock > 512 && this->cmdfunc == nand_command) 2258 chip->options &= ~NAND_CHIPOPTIONS_MSK;
2481 this->cmdfunc = nand_command_lp; 2259 chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
2482 2260
2483 printk (KERN_INFO "NAND device: Manufacturer ID:" 2261 /*
2484 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", nand_maf_id, nand_dev_id, 2262 * Set chip as a default. Board drivers can override it, if necessary
2485 nand_manuf_ids[maf_id].name , nand_flash_ids[i].name); 2263 */
2486 break; 2264 chip->options |= NAND_NO_AUTOINCR;
2487 } 2265
2266 /* Check if chip is a not a samsung device. Do not clear the
2267 * options for chips which are not having an extended id.
2268 */
2269 if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
2270 chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
2271
2272 /* Check for AND chips with 4 page planes */
2273 if (chip->options & NAND_4PAGE_ARRAY)
2274 chip->erase_cmd = multi_erase_cmd;
2275 else
2276 chip->erase_cmd = single_erase_cmd;
2277
2278 /* Do not replace user supplied command function ! */
2279 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
2280 chip->cmdfunc = nand_command_lp;
2281
2282 printk(KERN_INFO "NAND device: Manufacturer ID:"
2283 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, dev_id,
2284 nand_manuf_ids[maf_idx].name, type->name);
2285
2286 return type;
2287}
2288
2289/* module_text_address() isn't exported, and it's mostly a pointless
2290 test if this is a module _anyway_ -- they'd have to try _really_ hard
2291 to call us from in-kernel code if the core NAND support is modular. */
2292#ifdef MODULE
2293#define caller_is_module() (1)
2294#else
2295#define caller_is_module() \
2296 module_text_address((unsigned long)__builtin_return_address(0))
2297#endif
2488 2298
2489 if (!nand_flash_ids[i].name) { 2299/**
2490 printk (KERN_WARNING "No NAND device found!!!\n"); 2300 * nand_scan - [NAND Interface] Scan for the NAND device
2491 this->select_chip(mtd, -1); 2301 * @mtd: MTD device structure
2492 return 1; 2302 * @maxchips: Number of chips to scan for
2303 *
2304 * This fills out all the uninitialized function pointers
2305 * with the defaults.
2306 * The flash ID is read and the mtd/chip structures are
2307 * filled with the appropriate values.
2308 * The mtd->owner field must be set to the module of the caller
2309 *
2310 */
2311int nand_scan(struct mtd_info *mtd, int maxchips)
2312{
2313 int i, busw, nand_maf_id;
2314 struct nand_chip *chip = mtd->priv;
2315 struct nand_flash_dev *type;
2316
2317 /* Many callers got this wrong, so check for it for a while... */
2318 if (!mtd->owner && caller_is_module()) {
2319 printk(KERN_CRIT "nand_scan() called with NULL mtd->owner!\n");
2320 BUG();
2493 } 2321 }
2494 2322
2495 for (i=1; i < maxchips; i++) { 2323 /* Get buswidth to select the correct functions */
2496 this->select_chip(mtd, i); 2324 busw = chip->options & NAND_BUSWIDTH_16;
2325 /* Set the default functions */
2326 nand_set_defaults(chip, busw);
2497 2327
2498 /* Send the command for reading device ID */ 2328 /* Read the flash type */
2499 this->cmdfunc (mtd, NAND_CMD_READID, 0x00, -1); 2329 type = nand_get_flash_type(mtd, chip, busw, &nand_maf_id);
2330
2331 if (IS_ERR(type)) {
2332 printk(KERN_WARNING "No NAND device found!!!\n");
2333 chip->select_chip(mtd, -1);
2334 return PTR_ERR(type);
2335 }
2500 2336
2337 /* Check for a chip array */
2338 for (i = 1; i < maxchips; i++) {
2339 chip->select_chip(mtd, i);
2340 /* Send the command for reading device ID */
2341 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
2501 /* Read manufacturer and device IDs */ 2342 /* Read manufacturer and device IDs */
2502 if (nand_maf_id != this->read_byte(mtd) || 2343 if (nand_maf_id != chip->read_byte(mtd) ||
2503 nand_dev_id != this->read_byte(mtd)) 2344 type->id != chip->read_byte(mtd))
2504 break; 2345 break;
2505 } 2346 }
2506 if (i > 1) 2347 if (i > 1)
2507 printk(KERN_INFO "%d NAND chips detected\n", i); 2348 printk(KERN_INFO "%d NAND chips detected\n", i);
2508 2349
2509 /* Allocate buffers, if neccecary */ 2350 /* Store the number of chips and calc total size for mtd */
2510 if (!this->oob_buf) { 2351 chip->numchips = i;
2511 size_t len; 2352 mtd->size = i * chip->chipsize;
2512 len = mtd->oobsize << (this->phys_erase_shift - this->page_shift);
2513 this->oob_buf = kmalloc (len, GFP_KERNEL);
2514 if (!this->oob_buf) {
2515 printk (KERN_ERR "nand_scan(): Cannot allocate oob_buf\n");
2516 return -ENOMEM;
2517 }
2518 this->options |= NAND_OOBBUF_ALLOC;
2519 }
2520 2353
2521 if (!this->data_buf) { 2354 /* Preset the internal oob write buffer */
2522 size_t len; 2355 memset(chip->buffers.oobwbuf, 0xff, mtd->oobsize);
2523 len = mtd->oobblock + mtd->oobsize;
2524 this->data_buf = kmalloc (len, GFP_KERNEL);
2525 if (!this->data_buf) {
2526 if (this->options & NAND_OOBBUF_ALLOC)
2527 kfree (this->oob_buf);
2528 printk (KERN_ERR "nand_scan(): Cannot allocate data_buf\n");
2529 return -ENOMEM;
2530 }
2531 this->options |= NAND_DATABUF_ALLOC;
2532 }
2533 2356
2534 /* Store the number of chips and calc total size for mtd */ 2357 /*
2535 this->numchips = i; 2358 * If no default placement scheme is given, select an appropriate one
2536 mtd->size = i * this->chipsize; 2359 */
2537 /* Convert chipsize to number of pages per chip -1. */ 2360 if (!chip->ecc.layout) {
2538 this->pagemask = (this->chipsize >> this->page_shift) - 1;
2539 /* Preset the internal oob buffer */
2540 memset(this->oob_buf, 0xff, mtd->oobsize << (this->phys_erase_shift - this->page_shift));
2541
2542 /* If no default placement scheme is given, select an
2543 * appropriate one */
2544 if (!this->autooob) {
2545 /* Select the appropriate default oob placement scheme for
2546 * placement agnostic filesystems */
2547 switch (mtd->oobsize) { 2361 switch (mtd->oobsize) {
2548 case 8: 2362 case 8:
2549 this->autooob = &nand_oob_8; 2363 chip->ecc.layout = &nand_oob_8;
2550 break; 2364 break;
2551 case 16: 2365 case 16:
2552 this->autooob = &nand_oob_16; 2366 chip->ecc.layout = &nand_oob_16;
2553 break; 2367 break;
2554 case 64: 2368 case 64:
2555 this->autooob = &nand_oob_64; 2369 chip->ecc.layout = &nand_oob_64;
2556 break; 2370 break;
2557 default: 2371 default:
2558 printk (KERN_WARNING "No oob scheme defined for oobsize %d\n", 2372 printk(KERN_WARNING "No oob scheme defined for "
2559 mtd->oobsize); 2373 "oobsize %d\n", mtd->oobsize);
2560 BUG(); 2374 BUG();
2561 } 2375 }
2562 } 2376 }
2563 2377
2564 /* The number of bytes available for the filesystem to place fs dependend
2565 * oob data */
2566 mtd->oobavail = 0;
2567 for (i = 0; this->autooob->oobfree[i][1]; i++)
2568 mtd->oobavail += this->autooob->oobfree[i][1];
2569
2570 /* 2378 /*
2571 * check ECC mode, default to software 2379 * check ECC mode, default to software if 3byte/512byte hardware ECC is
2572 * if 3byte/512byte hardware ECC is selected and we have 256 byte pagesize 2380 * selected and we have 256 byte pagesize fallback to software ECC
2573 * fallback to software ECC 2381 */
2574 */ 2382 switch (chip->ecc.mode) {
2575 this->eccsize = 256; /* set default eccsize */ 2383 case NAND_ECC_HW:
2576 this->eccbytes = 3; 2384 /* Use standard hwecc read page function ? */
2577 2385 if (!chip->ecc.read_page)
2578 switch (this->eccmode) { 2386 chip->ecc.read_page = nand_read_page_hwecc;
2579 case NAND_ECC_HW12_2048: 2387 if (!chip->ecc.write_page)
2580 if (mtd->oobblock < 2048) { 2388 chip->ecc.write_page = nand_write_page_hwecc;
2581 printk(KERN_WARNING "2048 byte HW ECC not possible on %d byte page size, fallback to SW ECC\n", 2389 if (!chip->ecc.read_oob)
2582 mtd->oobblock); 2390 chip->ecc.read_oob = nand_read_oob_std;
2583 this->eccmode = NAND_ECC_SOFT; 2391 if (!chip->ecc.write_oob)
2584 this->calculate_ecc = nand_calculate_ecc; 2392 chip->ecc.write_oob = nand_write_oob_std;
2585 this->correct_data = nand_correct_data; 2393
2586 } else 2394 case NAND_ECC_HW_SYNDROME:
2587 this->eccsize = 2048; 2395 if (!chip->ecc.calculate || !chip->ecc.correct ||
2588 break; 2396 !chip->ecc.hwctl) {
2589 2397 printk(KERN_WARNING "No ECC functions supplied, "
2590 case NAND_ECC_HW3_512: 2398 "Hardware ECC not possible\n");
2591 case NAND_ECC_HW6_512: 2399 BUG();
2592 case NAND_ECC_HW8_512: 2400 }
2593 if (mtd->oobblock == 256) { 2401 /* Use standard syndrome read/write page function ? */
2594 printk (KERN_WARNING "512 byte HW ECC not possible on 256 Byte pagesize, fallback to SW ECC \n"); 2402 if (!chip->ecc.read_page)
2595 this->eccmode = NAND_ECC_SOFT; 2403 chip->ecc.read_page = nand_read_page_syndrome;
2596 this->calculate_ecc = nand_calculate_ecc; 2404 if (!chip->ecc.write_page)
2597 this->correct_data = nand_correct_data; 2405 chip->ecc.write_page = nand_write_page_syndrome;
2598 } else 2406 if (!chip->ecc.read_oob)
2599 this->eccsize = 512; /* set eccsize to 512 */ 2407 chip->ecc.read_oob = nand_read_oob_syndrome;
2600 break; 2408 if (!chip->ecc.write_oob)
2409 chip->ecc.write_oob = nand_write_oob_syndrome;
2410
2411 if (mtd->writesize >= chip->ecc.size)
2412 break;
2413 printk(KERN_WARNING "%d byte HW ECC not possible on "
2414 "%d byte page size, fallback to SW ECC\n",
2415 chip->ecc.size, mtd->writesize);
2416 chip->ecc.mode = NAND_ECC_SOFT;
2601 2417
2602 case NAND_ECC_HW3_256: 2418 case NAND_ECC_SOFT:
2419 chip->ecc.calculate = nand_calculate_ecc;
2420 chip->ecc.correct = nand_correct_data;
2421 chip->ecc.read_page = nand_read_page_swecc;
2422 chip->ecc.write_page = nand_write_page_swecc;
2423 chip->ecc.read_oob = nand_read_oob_std;
2424 chip->ecc.write_oob = nand_write_oob_std;
2425 chip->ecc.size = 256;
2426 chip->ecc.bytes = 3;
2603 break; 2427 break;
2604 2428
2605 case NAND_ECC_NONE: 2429 case NAND_ECC_NONE:
2606 printk (KERN_WARNING "NAND_ECC_NONE selected by board driver. This is not recommended !!\n"); 2430 printk(KERN_WARNING "NAND_ECC_NONE selected by board driver. "
2607 this->eccmode = NAND_ECC_NONE; 2431 "This is not recommended !!\n");
2432 chip->ecc.read_page = nand_read_page_raw;
2433 chip->ecc.write_page = nand_write_page_raw;
2434 chip->ecc.read_oob = nand_read_oob_std;
2435 chip->ecc.write_oob = nand_write_oob_std;
2436 chip->ecc.size = mtd->writesize;
2437 chip->ecc.bytes = 0;
2608 break; 2438 break;
2609
2610 case NAND_ECC_SOFT:
2611 this->calculate_ecc = nand_calculate_ecc;
2612 this->correct_data = nand_correct_data;
2613 break;
2614
2615 default: 2439 default:
2616 printk (KERN_WARNING "Invalid NAND_ECC_MODE %d\n", this->eccmode); 2440 printk(KERN_WARNING "Invalid NAND_ECC_MODE %d\n",
2617 BUG(); 2441 chip->ecc.mode);
2618 }
2619
2620 /* Check hardware ecc function availability and adjust number of ecc bytes per
2621 * calculation step
2622 */
2623 switch (this->eccmode) {
2624 case NAND_ECC_HW12_2048:
2625 this->eccbytes += 4;
2626 case NAND_ECC_HW8_512:
2627 this->eccbytes += 2;
2628 case NAND_ECC_HW6_512:
2629 this->eccbytes += 3;
2630 case NAND_ECC_HW3_512:
2631 case NAND_ECC_HW3_256:
2632 if (this->calculate_ecc && this->correct_data && this->enable_hwecc)
2633 break;
2634 printk (KERN_WARNING "No ECC functions supplied, Hardware ECC not possible\n");
2635 BUG(); 2442 BUG();
2636 } 2443 }
2637 2444
2638 mtd->eccsize = this->eccsize; 2445 /*
2639 2446 * The number of bytes available for a client to place data into
2640 /* Set the number of read / write steps for one page to ensure ECC generation */ 2447 * the out of band area
2641 switch (this->eccmode) { 2448 */
2642 case NAND_ECC_HW12_2048: 2449 chip->ecc.layout->oobavail = 0;
2643 this->eccsteps = mtd->oobblock / 2048; 2450 for (i = 0; chip->ecc.layout->oobfree[i].length; i++)
2644 break; 2451 chip->ecc.layout->oobavail +=
2645 case NAND_ECC_HW3_512: 2452 chip->ecc.layout->oobfree[i].length;
2646 case NAND_ECC_HW6_512:
2647 case NAND_ECC_HW8_512:
2648 this->eccsteps = mtd->oobblock / 512;
2649 break;
2650 case NAND_ECC_HW3_256:
2651 case NAND_ECC_SOFT:
2652 this->eccsteps = mtd->oobblock / 256;
2653 break;
2654 2453
2655 case NAND_ECC_NONE: 2454 /*
2656 this->eccsteps = 1; 2455 * Set the number of read / write steps for one page depending on ECC
2657 break; 2456 * mode
2457 */
2458 chip->ecc.steps = mtd->writesize / chip->ecc.size;
2459 if(chip->ecc.steps * chip->ecc.size != mtd->writesize) {
2460 printk(KERN_WARNING "Invalid ecc parameters\n");
2461 BUG();
2658 } 2462 }
2463 chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
2659 2464
2660 /* Initialize state, waitqueue and spinlock */ 2465 /* Initialize state */
2661 this->state = FL_READY; 2466 chip->state = FL_READY;
2662 init_waitqueue_head (&this->wq);
2663 spin_lock_init (&this->chip_lock);
2664 2467
2665 /* De-select the device */ 2468 /* De-select the device */
2666 this->select_chip(mtd, -1); 2469 chip->select_chip(mtd, -1);
2667 2470
2668 /* Invalidate the pagebuffer reference */ 2471 /* Invalidate the pagebuffer reference */
2669 this->pagebuf = -1; 2472 chip->pagebuf = -1;
2670 2473
2671 /* Fill in remaining MTD driver data */ 2474 /* Fill in remaining MTD driver data */
2672 mtd->type = MTD_NANDFLASH; 2475 mtd->type = MTD_NANDFLASH;
2673 mtd->flags = MTD_CAP_NANDFLASH | MTD_ECC; 2476 mtd->flags = MTD_CAP_NANDFLASH;
2674 mtd->ecctype = MTD_ECC_SW; 2477 mtd->ecctype = MTD_ECC_SW;
2675 mtd->erase = nand_erase; 2478 mtd->erase = nand_erase;
2676 mtd->point = NULL; 2479 mtd->point = NULL;
2677 mtd->unpoint = NULL; 2480 mtd->unpoint = NULL;
2678 mtd->read = nand_read; 2481 mtd->read = nand_read;
2679 mtd->write = nand_write; 2482 mtd->write = nand_write;
2680 mtd->read_ecc = nand_read_ecc;
2681 mtd->write_ecc = nand_write_ecc;
2682 mtd->read_oob = nand_read_oob; 2483 mtd->read_oob = nand_read_oob;
2683 mtd->write_oob = nand_write_oob; 2484 mtd->write_oob = nand_write_oob;
2684 mtd->readv = NULL;
2685 mtd->writev = nand_writev;
2686 mtd->writev_ecc = nand_writev_ecc;
2687 mtd->sync = nand_sync; 2485 mtd->sync = nand_sync;
2688 mtd->lock = NULL; 2486 mtd->lock = NULL;
2689 mtd->unlock = NULL; 2487 mtd->unlock = NULL;
@@ -2692,47 +2490,38 @@ int nand_scan (struct mtd_info *mtd, int maxchips)
2692 mtd->block_isbad = nand_block_isbad; 2490 mtd->block_isbad = nand_block_isbad;
2693 mtd->block_markbad = nand_block_markbad; 2491 mtd->block_markbad = nand_block_markbad;
2694 2492
2695 /* and make the autooob the default one */ 2493 /* propagate ecc.layout to mtd_info */
2696 memcpy(&mtd->oobinfo, this->autooob, sizeof(mtd->oobinfo)); 2494 mtd->ecclayout = chip->ecc.layout;
2697
2698 mtd->owner = THIS_MODULE;
2699 2495
2700 /* Check, if we should skip the bad block table scan */ 2496 /* Check, if we should skip the bad block table scan */
2701 if (this->options & NAND_SKIP_BBTSCAN) 2497 if (chip->options & NAND_SKIP_BBTSCAN)
2702 return 0; 2498 return 0;
2703 2499
2704 /* Build bad block table */ 2500 /* Build bad block table */
2705 return this->scan_bbt (mtd); 2501 return chip->scan_bbt(mtd);
2706} 2502}
2707 2503
2708/** 2504/**
2709 * nand_release - [NAND Interface] Free resources held by the NAND device 2505 * nand_release - [NAND Interface] Free resources held by the NAND device
2710 * @mtd: MTD device structure 2506 * @mtd: MTD device structure
2711*/ 2507*/
2712void nand_release (struct mtd_info *mtd) 2508void nand_release(struct mtd_info *mtd)
2713{ 2509{
2714 struct nand_chip *this = mtd->priv; 2510 struct nand_chip *chip = mtd->priv;
2715 2511
2716#ifdef CONFIG_MTD_PARTITIONS 2512#ifdef CONFIG_MTD_PARTITIONS
2717 /* Deregister partitions */ 2513 /* Deregister partitions */
2718 del_mtd_partitions (mtd); 2514 del_mtd_partitions(mtd);
2719#endif 2515#endif
2720 /* Deregister the device */ 2516 /* Deregister the device */
2721 del_mtd_device (mtd); 2517 del_mtd_device(mtd);
2722 2518
2723 /* Free bad block table memory */ 2519 /* Free bad block table memory */
2724 kfree (this->bbt); 2520 kfree(chip->bbt);
2725 /* Buffer allocated by nand_scan ? */
2726 if (this->options & NAND_OOBBUF_ALLOC)
2727 kfree (this->oob_buf);
2728 /* Buffer allocated by nand_scan ? */
2729 if (this->options & NAND_DATABUF_ALLOC)
2730 kfree (this->data_buf);
2731} 2521}
2732 2522
2733EXPORT_SYMBOL_GPL (nand_scan); 2523EXPORT_SYMBOL_GPL(nand_scan);
2734EXPORT_SYMBOL_GPL (nand_release); 2524EXPORT_SYMBOL_GPL(nand_release);
2735
2736 2525
2737static int __init nand_base_init(void) 2526static int __init nand_base_init(void)
2738{ 2527{
@@ -2748,6 +2537,6 @@ static void __exit nand_base_exit(void)
2748module_init(nand_base_init); 2537module_init(nand_base_init);
2749module_exit(nand_base_exit); 2538module_exit(nand_base_exit);
2750 2539
2751MODULE_LICENSE ("GPL"); 2540MODULE_LICENSE("GPL");
2752MODULE_AUTHOR ("Steven J. Hill <sjhill@realitydiluted.com>, Thomas Gleixner <tglx@linutronix.de>"); 2541MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>, Thomas Gleixner <tglx@linutronix.de>");
2753MODULE_DESCRIPTION ("Generic NAND flash driver code"); 2542MODULE_DESCRIPTION("Generic NAND flash driver code");
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index ca286999fe08..a612c4ea8194 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -48,7 +48,7 @@
48 * 48 *
49 * Following assumptions are made: 49 * Following assumptions are made:
50 * - bbts start at a page boundary, if autolocated on a block boundary 50 * - bbts start at a page boundary, if autolocated on a block boundary
51 * - the space neccecary for a bbt in FLASH does not exceed a block boundary 51 * - the space necessary for a bbt in FLASH does not exceed a block boundary
52 * 52 *
53 */ 53 */
54 54
@@ -60,7 +60,7 @@
60#include <linux/mtd/compatmac.h> 60#include <linux/mtd/compatmac.h>
61#include <linux/bitops.h> 61#include <linux/bitops.h>
62#include <linux/delay.h> 62#include <linux/delay.h>
63 63#include <linux/vmalloc.h>
64 64
65/** 65/**
66 * check_pattern - [GENERIC] check if a pattern is in the buffer 66 * check_pattern - [GENERIC] check if a pattern is in the buffer
@@ -75,7 +75,7 @@
75 * pattern area contain 0xff 75 * pattern area contain 0xff
76 * 76 *
77*/ 77*/
78static int check_pattern (uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td) 78static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
79{ 79{
80 int i, end = 0; 80 int i, end = 0;
81 uint8_t *p = buf; 81 uint8_t *p = buf;
@@ -116,7 +116,7 @@ static int check_pattern (uint8_t *buf, int len, int paglen, struct nand_bbt_des
116 * no optional empty check 116 * no optional empty check
117 * 117 *
118*/ 118*/
119static int check_short_pattern (uint8_t *buf, struct nand_bbt_descr *td) 119static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
120{ 120{
121 int i; 121 int i;
122 uint8_t *p = buf; 122 uint8_t *p = buf;
@@ -142,8 +142,8 @@ static int check_short_pattern (uint8_t *buf, struct nand_bbt_descr *td)
142 * Read the bad block table starting from page. 142 * Read the bad block table starting from page.
143 * 143 *
144 */ 144 */
145static int read_bbt (struct mtd_info *mtd, uint8_t *buf, int page, int num, 145static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
146 int bits, int offs, int reserved_block_code) 146 int bits, int offs, int reserved_block_code)
147{ 147{
148 int res, i, j, act = 0; 148 int res, i, j, act = 0;
149 struct nand_chip *this = mtd->priv; 149 struct nand_chip *this = mtd->priv;
@@ -152,17 +152,17 @@ static int read_bbt (struct mtd_info *mtd, uint8_t *buf, int page, int num,
152 uint8_t msk = (uint8_t) ((1 << bits) - 1); 152 uint8_t msk = (uint8_t) ((1 << bits) - 1);
153 153
154 totlen = (num * bits) >> 3; 154 totlen = (num * bits) >> 3;
155 from = ((loff_t)page) << this->page_shift; 155 from = ((loff_t) page) << this->page_shift;
156 156
157 while (totlen) { 157 while (totlen) {
158 len = min (totlen, (size_t) (1 << this->bbt_erase_shift)); 158 len = min(totlen, (size_t) (1 << this->bbt_erase_shift));
159 res = mtd->read_ecc (mtd, from, len, &retlen, buf, NULL, this->autooob); 159 res = mtd->read(mtd, from, len, &retlen, buf);
160 if (res < 0) { 160 if (res < 0) {
161 if (retlen != len) { 161 if (retlen != len) {
162 printk (KERN_INFO "nand_bbt: Error reading bad block table\n"); 162 printk(KERN_INFO "nand_bbt: Error reading bad block table\n");
163 return res; 163 return res;
164 } 164 }
165 printk (KERN_WARNING "nand_bbt: ECC error while reading bad block table\n"); 165 printk(KERN_WARNING "nand_bbt: ECC error while reading bad block table\n");
166 } 166 }
167 167
168 /* Analyse data */ 168 /* Analyse data */
@@ -172,22 +172,23 @@ static int read_bbt (struct mtd_info *mtd, uint8_t *buf, int page, int num,
172 uint8_t tmp = (dat >> j) & msk; 172 uint8_t tmp = (dat >> j) & msk;
173 if (tmp == msk) 173 if (tmp == msk)
174 continue; 174 continue;
175 if (reserved_block_code && 175 if (reserved_block_code && (tmp == reserved_block_code)) {
176 (tmp == reserved_block_code)) { 176 printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%08x\n",
177 printk (KERN_DEBUG "nand_read_bbt: Reserved block at 0x%08x\n", 177 ((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
178 ((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
179 this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06); 178 this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
179 mtd->ecc_stats.bbtblocks++;
180 continue; 180 continue;
181 } 181 }
182 /* Leave it for now, if its matured we can move this 182 /* Leave it for now, if its matured we can move this
183 * message to MTD_DEBUG_LEVEL0 */ 183 * message to MTD_DEBUG_LEVEL0 */
184 printk (KERN_DEBUG "nand_read_bbt: Bad block at 0x%08x\n", 184 printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%08x\n",
185 ((offs << 2) + (act >> 1)) << this->bbt_erase_shift); 185 ((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
186 /* Factory marked bad or worn out ? */ 186 /* Factory marked bad or worn out ? */
187 if (tmp == 0) 187 if (tmp == 0)
188 this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06); 188 this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
189 else 189 else
190 this->bbt[offs + (act >> 3)] |= 0x1 << (act & 0x06); 190 this->bbt[offs + (act >> 3)] |= 0x1 << (act & 0x06);
191 mtd->ecc_stats.badblocks++;
191 } 192 }
192 } 193 }
193 totlen -= len; 194 totlen -= len;
@@ -207,7 +208,7 @@ static int read_bbt (struct mtd_info *mtd, uint8_t *buf, int page, int num,
207 * Read the bad block table for all chips starting at a given page 208 * Read the bad block table for all chips starting at a given page
208 * We assume that the bbt bits are in consecutive order. 209 * We assume that the bbt bits are in consecutive order.
209*/ 210*/
210static int read_abs_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip) 211static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
211{ 212{
212 struct nand_chip *this = mtd->priv; 213 struct nand_chip *this = mtd->priv;
213 int res = 0, i; 214 int res = 0, i;
@@ -231,6 +232,42 @@ static int read_abs_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_des
231 return 0; 232 return 0;
232} 233}
233 234
235/*
236 * Scan read raw data from flash
237 */
238static int scan_read_raw(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
239 size_t len)
240{
241 struct mtd_oob_ops ops;
242
243 ops.mode = MTD_OOB_RAW;
244 ops.ooboffs = 0;
245 ops.ooblen = mtd->oobsize;
246 ops.oobbuf = buf;
247 ops.datbuf = buf;
248 ops.len = len;
249
250 return mtd->read_oob(mtd, offs, &ops);
251}
252
253/*
254 * Scan write data with oob to flash
255 */
256static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
257 uint8_t *buf, uint8_t *oob)
258{
259 struct mtd_oob_ops ops;
260
261 ops.mode = MTD_OOB_PLACE;
262 ops.ooboffs = 0;
263 ops.ooblen = mtd->oobsize;
264 ops.datbuf = buf;
265 ops.oobbuf = oob;
266 ops.len = len;
267
268 return mtd->write_oob(mtd, offs, &ops);
269}
270
234/** 271/**
235 * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page 272 * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
236 * @mtd: MTD device structure 273 * @mtd: MTD device structure
@@ -242,28 +279,85 @@ static int read_abs_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_des
242 * We assume that the bbt bits are in consecutive order. 279 * We assume that the bbt bits are in consecutive order.
243 * 280 *
244*/ 281*/
245static int read_abs_bbts (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, 282static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
246 struct nand_bbt_descr *md) 283 struct nand_bbt_descr *td, struct nand_bbt_descr *md)
247{ 284{
248 struct nand_chip *this = mtd->priv; 285 struct nand_chip *this = mtd->priv;
249 286
250 /* Read the primary version, if available */ 287 /* Read the primary version, if available */
251 if (td->options & NAND_BBT_VERSION) { 288 if (td->options & NAND_BBT_VERSION) {
252 nand_read_raw (mtd, buf, td->pages[0] << this->page_shift, mtd->oobblock, mtd->oobsize); 289 scan_read_raw(mtd, buf, td->pages[0] << this->page_shift,
253 td->version[0] = buf[mtd->oobblock + td->veroffs]; 290 mtd->writesize);
254 printk (KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", td->pages[0], td->version[0]); 291 td->version[0] = buf[mtd->writesize + td->veroffs];
292 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
293 td->pages[0], td->version[0]);
255 } 294 }
256 295
257 /* Read the mirror version, if available */ 296 /* Read the mirror version, if available */
258 if (md && (md->options & NAND_BBT_VERSION)) { 297 if (md && (md->options & NAND_BBT_VERSION)) {
259 nand_read_raw (mtd, buf, md->pages[0] << this->page_shift, mtd->oobblock, mtd->oobsize); 298 scan_read_raw(mtd, buf, md->pages[0] << this->page_shift,
260 md->version[0] = buf[mtd->oobblock + md->veroffs]; 299 mtd->writesize);
261 printk (KERN_DEBUG "Bad block table at page %d, version 0x%02X\n", md->pages[0], md->version[0]); 300 md->version[0] = buf[mtd->writesize + md->veroffs];
301 printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
302 md->pages[0], md->version[0]);
262 } 303 }
263
264 return 1; 304 return 1;
265} 305}
266 306
307/*
308 * Scan a given block full
309 */
310static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
311 loff_t offs, uint8_t *buf, size_t readlen,
312 int scanlen, int len)
313{
314 int ret, j;
315
316 ret = scan_read_raw(mtd, buf, offs, readlen);
317 if (ret)
318 return ret;
319
320 for (j = 0; j < len; j++, buf += scanlen) {
321 if (check_pattern(buf, scanlen, mtd->writesize, bd))
322 return 1;
323 }
324 return 0;
325}
326
327/*
328 * Scan a given block partially
329 */
330static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
331 loff_t offs, uint8_t *buf, int len)
332{
333 struct mtd_oob_ops ops;
334 int j, ret;
335
336 ops.len = mtd->oobsize;
337 ops.ooblen = mtd->oobsize;
338 ops.oobbuf = buf;
339 ops.ooboffs = 0;
340 ops.datbuf = NULL;
341 ops.mode = MTD_OOB_PLACE;
342
343 for (j = 0; j < len; j++) {
344 /*
345 * Read the full oob until read_oob is fixed to
346 * handle single byte reads for 16 bit
347 * buswidth
348 */
349 ret = mtd->read_oob(mtd, offs, &ops);
350 if (ret)
351 return ret;
352
353 if (check_short_pattern(buf, bd))
354 return 1;
355
356 offs += mtd->writesize;
357 }
358 return 0;
359}
360
267/** 361/**
268 * create_bbt - [GENERIC] Create a bad block table by scanning the device 362 * create_bbt - [GENERIC] Create a bad block table by scanning the device
269 * @mtd: MTD device structure 363 * @mtd: MTD device structure
@@ -275,15 +369,16 @@ static int read_abs_bbts (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_de
275 * Create a bad block table by scanning the device 369 * Create a bad block table by scanning the device
276 * for the given good/bad block identify pattern 370 * for the given good/bad block identify pattern
277 */ 371 */
278static int create_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd, int chip) 372static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
373 struct nand_bbt_descr *bd, int chip)
279{ 374{
280 struct nand_chip *this = mtd->priv; 375 struct nand_chip *this = mtd->priv;
281 int i, j, numblocks, len, scanlen; 376 int i, numblocks, len, scanlen;
282 int startblock; 377 int startblock;
283 loff_t from; 378 loff_t from;
284 size_t readlen, ooblen; 379 size_t readlen;
285 380
286 printk (KERN_INFO "Scanning device for bad blocks\n"); 381 printk(KERN_INFO "Scanning device for bad blocks\n");
287 382
288 if (bd->options & NAND_BBT_SCANALLPAGES) 383 if (bd->options & NAND_BBT_SCANALLPAGES)
289 len = 1 << (this->bbt_erase_shift - this->page_shift); 384 len = 1 << (this->bbt_erase_shift - this->page_shift);
@@ -296,25 +391,24 @@ static int create_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
296 391
297 if (!(bd->options & NAND_BBT_SCANEMPTY)) { 392 if (!(bd->options & NAND_BBT_SCANEMPTY)) {
298 /* We need only read few bytes from the OOB area */ 393 /* We need only read few bytes from the OOB area */
299 scanlen = ooblen = 0; 394 scanlen = 0;
300 readlen = bd->len; 395 readlen = bd->len;
301 } else { 396 } else {
302 /* Full page content should be read */ 397 /* Full page content should be read */
303 scanlen = mtd->oobblock + mtd->oobsize; 398 scanlen = mtd->writesize + mtd->oobsize;
304 readlen = len * mtd->oobblock; 399 readlen = len * mtd->writesize;
305 ooblen = len * mtd->oobsize;
306 } 400 }
307 401
308 if (chip == -1) { 402 if (chip == -1) {
309 /* Note that numblocks is 2 * (real numblocks) here, see i+=2 below as it 403 /* Note that numblocks is 2 * (real numblocks) here, see i+=2
310 * makes shifting and masking less painful */ 404 * below as it makes shifting and masking less painful */
311 numblocks = mtd->size >> (this->bbt_erase_shift - 1); 405 numblocks = mtd->size >> (this->bbt_erase_shift - 1);
312 startblock = 0; 406 startblock = 0;
313 from = 0; 407 from = 0;
314 } else { 408 } else {
315 if (chip >= this->numchips) { 409 if (chip >= this->numchips) {
316 printk (KERN_WARNING "create_bbt(): chipnr (%d) > available chips (%d)\n", 410 printk(KERN_WARNING "create_bbt(): chipnr (%d) > available chips (%d)\n",
317 chip + 1, this->numchips); 411 chip + 1, this->numchips);
318 return -EINVAL; 412 return -EINVAL;
319 } 413 }
320 numblocks = this->chipsize >> (this->bbt_erase_shift - 1); 414 numblocks = this->chipsize >> (this->bbt_erase_shift - 1);
@@ -326,36 +420,22 @@ static int create_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
326 for (i = startblock; i < numblocks;) { 420 for (i = startblock; i < numblocks;) {
327 int ret; 421 int ret;
328 422
329 if (bd->options & NAND_BBT_SCANEMPTY) 423 if (bd->options & NAND_BBT_SCANALLPAGES)
330 if ((ret = nand_read_raw (mtd, buf, from, readlen, ooblen))) 424 ret = scan_block_full(mtd, bd, from, buf, readlen,
331 return ret; 425 scanlen, len);
332 426 else
333 for (j = 0; j < len; j++) { 427 ret = scan_block_fast(mtd, bd, from, buf, len);
334 if (!(bd->options & NAND_BBT_SCANEMPTY)) { 428
335 size_t retlen; 429 if (ret < 0)
336 430 return ret;
337 /* Read the full oob until read_oob is fixed to 431
338 * handle single byte reads for 16 bit buswidth */ 432 if (ret) {
339 ret = mtd->read_oob(mtd, from + j * mtd->oobblock, 433 this->bbt[i >> 3] |= 0x03 << (i & 0x6);
340 mtd->oobsize, &retlen, buf); 434 printk(KERN_WARNING "Bad eraseblock %d at 0x%08x\n",
341 if (ret) 435 i >> 1, (unsigned int)from);
342 return ret; 436 mtd->ecc_stats.badblocks++;
343
344 if (check_short_pattern (buf, bd)) {
345 this->bbt[i >> 3] |= 0x03 << (i & 0x6);
346 printk (KERN_WARNING "Bad eraseblock %d at 0x%08x\n",
347 i >> 1, (unsigned int) from);
348 break;
349 }
350 } else {
351 if (check_pattern (&buf[j * scanlen], scanlen, mtd->oobblock, bd)) {
352 this->bbt[i >> 3] |= 0x03 << (i & 0x6);
353 printk (KERN_WARNING "Bad eraseblock %d at 0x%08x\n",
354 i >> 1, (unsigned int) from);
355 break;
356 }
357 }
358 } 437 }
438
359 i += 2; 439 i += 2;
360 from += (1 << this->bbt_erase_shift); 440 from += (1 << this->bbt_erase_shift);
361 } 441 }
@@ -374,22 +454,23 @@ static int create_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
374 * block. 454 * block.
375 * If the option NAND_BBT_PERCHIP is given, each chip is searched 455 * If the option NAND_BBT_PERCHIP is given, each chip is searched
376 * for a bbt, which contains the bad block information of this chip. 456 * for a bbt, which contains the bad block information of this chip.
377 * This is neccecary to provide support for certain DOC devices. 457 * This is necessary to provide support for certain DOC devices.
378 * 458 *
379 * The bbt ident pattern resides in the oob area of the first page 459 * The bbt ident pattern resides in the oob area of the first page
380 * in a block. 460 * in a block.
381 */ 461 */
382static int search_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td) 462static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
383{ 463{
384 struct nand_chip *this = mtd->priv; 464 struct nand_chip *this = mtd->priv;
385 int i, chips; 465 int i, chips;
386 int bits, startblock, block, dir; 466 int bits, startblock, block, dir;
387 int scanlen = mtd->oobblock + mtd->oobsize; 467 int scanlen = mtd->writesize + mtd->oobsize;
388 int bbtblocks; 468 int bbtblocks;
469 int blocktopage = this->bbt_erase_shift - this->page_shift;
389 470
390 /* Search direction top -> down ? */ 471 /* Search direction top -> down ? */
391 if (td->options & NAND_BBT_LASTBLOCK) { 472 if (td->options & NAND_BBT_LASTBLOCK) {
392 startblock = (mtd->size >> this->bbt_erase_shift) -1; 473 startblock = (mtd->size >> this->bbt_erase_shift) - 1;
393 dir = -1; 474 dir = -1;
394 } else { 475 } else {
395 startblock = 0; 476 startblock = 0;
@@ -415,13 +496,16 @@ static int search_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
415 td->pages[i] = -1; 496 td->pages[i] = -1;
416 /* Scan the maximum number of blocks */ 497 /* Scan the maximum number of blocks */
417 for (block = 0; block < td->maxblocks; block++) { 498 for (block = 0; block < td->maxblocks; block++) {
499
418 int actblock = startblock + dir * block; 500 int actblock = startblock + dir * block;
501 loff_t offs = actblock << this->bbt_erase_shift;
502
419 /* Read first page */ 503 /* Read first page */
420 nand_read_raw (mtd, buf, actblock << this->bbt_erase_shift, mtd->oobblock, mtd->oobsize); 504 scan_read_raw(mtd, buf, offs, mtd->writesize);
421 if (!check_pattern(buf, scanlen, mtd->oobblock, td)) { 505 if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
422 td->pages[i] = actblock << (this->bbt_erase_shift - this->page_shift); 506 td->pages[i] = actblock << blocktopage;
423 if (td->options & NAND_BBT_VERSION) { 507 if (td->options & NAND_BBT_VERSION) {
424 td->version[i] = buf[mtd->oobblock + td->veroffs]; 508 td->version[i] = buf[mtd->writesize + td->veroffs];
425 } 509 }
426 break; 510 break;
427 } 511 }
@@ -431,9 +515,10 @@ static int search_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
431 /* Check, if we found a bbt for each requested chip */ 515 /* Check, if we found a bbt for each requested chip */
432 for (i = 0; i < chips; i++) { 516 for (i = 0; i < chips; i++) {
433 if (td->pages[i] == -1) 517 if (td->pages[i] == -1)
434 printk (KERN_WARNING "Bad block table not found for chip %d\n", i); 518 printk(KERN_WARNING "Bad block table not found for chip %d\n", i);
435 else 519 else
436 printk (KERN_DEBUG "Bad block table found at page %d, version 0x%02X\n", td->pages[i], td->version[i]); 520 printk(KERN_DEBUG "Bad block table found at page %d, version 0x%02X\n", td->pages[i],
521 td->version[i]);
437 } 522 }
438 return 0; 523 return 0;
439} 524}
@@ -447,21 +532,19 @@ static int search_bbt (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
447 * 532 *
448 * Search and read the bad block table(s) 533 * Search and read the bad block table(s)
449*/ 534*/
450static int search_read_bbts (struct mtd_info *mtd, uint8_t *buf, 535static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md)
451 struct nand_bbt_descr *td, struct nand_bbt_descr *md)
452{ 536{
453 /* Search the primary table */ 537 /* Search the primary table */
454 search_bbt (mtd, buf, td); 538 search_bbt(mtd, buf, td);
455 539
456 /* Search the mirror table */ 540 /* Search the mirror table */
457 if (md) 541 if (md)
458 search_bbt (mtd, buf, md); 542 search_bbt(mtd, buf, md);
459 543
460 /* Force result check */ 544 /* Force result check */
461 return 1; 545 return 1;
462} 546}
463 547
464
465/** 548/**
466 * write_bbt - [GENERIC] (Re)write the bad block table 549 * write_bbt - [GENERIC] (Re)write the bad block table
467 * 550 *
@@ -474,25 +557,31 @@ static int search_read_bbts (struct mtd_info *mtd, uint8_t *buf,
474 * (Re)write the bad block table 557 * (Re)write the bad block table
475 * 558 *
476*/ 559*/
477static int write_bbt (struct mtd_info *mtd, uint8_t *buf, 560static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
478 struct nand_bbt_descr *td, struct nand_bbt_descr *md, int chipsel) 561 struct nand_bbt_descr *td, struct nand_bbt_descr *md,
562 int chipsel)
479{ 563{
480 struct nand_chip *this = mtd->priv; 564 struct nand_chip *this = mtd->priv;
481 struct nand_oobinfo oobinfo;
482 struct erase_info einfo; 565 struct erase_info einfo;
483 int i, j, res, chip = 0; 566 int i, j, res, chip = 0;
484 int bits, startblock, dir, page, offs, numblocks, sft, sftmsk; 567 int bits, startblock, dir, page, offs, numblocks, sft, sftmsk;
485 int nrchips, bbtoffs, pageoffs; 568 int nrchips, bbtoffs, pageoffs, ooboffs;
486 uint8_t msk[4]; 569 uint8_t msk[4];
487 uint8_t rcode = td->reserved_block_code; 570 uint8_t rcode = td->reserved_block_code;
488 size_t retlen, len = 0; 571 size_t retlen, len = 0;
489 loff_t to; 572 loff_t to;
573 struct mtd_oob_ops ops;
574
575 ops.ooblen = mtd->oobsize;
576 ops.ooboffs = 0;
577 ops.datbuf = NULL;
578 ops.mode = MTD_OOB_PLACE;
490 579
491 if (!rcode) 580 if (!rcode)
492 rcode = 0xff; 581 rcode = 0xff;
493 /* Write bad block table per chip rather than per device ? */ 582 /* Write bad block table per chip rather than per device ? */
494 if (td->options & NAND_BBT_PERCHIP) { 583 if (td->options & NAND_BBT_PERCHIP) {
495 numblocks = (int) (this->chipsize >> this->bbt_erase_shift); 584 numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
496 /* Full device write or specific chip ? */ 585 /* Full device write or specific chip ? */
497 if (chipsel == -1) { 586 if (chipsel == -1) {
498 nrchips = this->numchips; 587 nrchips = this->numchips;
@@ -501,7 +590,7 @@ static int write_bbt (struct mtd_info *mtd, uint8_t *buf,
501 chip = chipsel; 590 chip = chipsel;
502 } 591 }
503 } else { 592 } else {
504 numblocks = (int) (mtd->size >> this->bbt_erase_shift); 593 numblocks = (int)(mtd->size >> this->bbt_erase_shift);
505 nrchips = 1; 594 nrchips = 1;
506 } 595 }
507 596
@@ -530,27 +619,38 @@ static int write_bbt (struct mtd_info *mtd, uint8_t *buf,
530 for (i = 0; i < td->maxblocks; i++) { 619 for (i = 0; i < td->maxblocks; i++) {
531 int block = startblock + dir * i; 620 int block = startblock + dir * i;
532 /* Check, if the block is bad */ 621 /* Check, if the block is bad */
533 switch ((this->bbt[block >> 2] >> (2 * (block & 0x03))) & 0x03) { 622 switch ((this->bbt[block >> 2] >>
623 (2 * (block & 0x03))) & 0x03) {
534 case 0x01: 624 case 0x01:
535 case 0x03: 625 case 0x03:
536 continue; 626 continue;
537 } 627 }
538 page = block << (this->bbt_erase_shift - this->page_shift); 628 page = block <<
629 (this->bbt_erase_shift - this->page_shift);
539 /* Check, if the block is used by the mirror table */ 630 /* Check, if the block is used by the mirror table */
540 if (!md || md->pages[chip] != page) 631 if (!md || md->pages[chip] != page)
541 goto write; 632 goto write;
542 } 633 }
543 printk (KERN_ERR "No space left to write bad block table\n"); 634 printk(KERN_ERR "No space left to write bad block table\n");
544 return -ENOSPC; 635 return -ENOSPC;
545write: 636 write:
546 637
547 /* Set up shift count and masks for the flash table */ 638 /* Set up shift count and masks for the flash table */
548 bits = td->options & NAND_BBT_NRBITS_MSK; 639 bits = td->options & NAND_BBT_NRBITS_MSK;
640 msk[2] = ~rcode;
549 switch (bits) { 641 switch (bits) {
550 case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01; msk[2] = ~rcode; msk[3] = 0x01; break; 642 case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01;
551 case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01; msk[2] = ~rcode; msk[3] = 0x03; break; 643 msk[3] = 0x01;
552 case 4: sft = 1; sftmsk = 0x04; msk[0] = 0x00; msk[1] = 0x0C; msk[2] = ~rcode; msk[3] = 0x0f; break; 644 break;
553 case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F; msk[2] = ~rcode; msk[3] = 0xff; break; 645 case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01;
646 msk[3] = 0x03;
647 break;
648 case 4: sft = 1; sftmsk = 0x04; msk[0] = 0x00; msk[1] = 0x0C;
649 msk[3] = 0x0f;
650 break;
651 case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F;
652 msk[3] = 0xff;
653 break;
554 default: return -EINVAL; 654 default: return -EINVAL;
555 } 655 }
556 656
@@ -558,82 +658,92 @@ write:
558 658
559 to = ((loff_t) page) << this->page_shift; 659 to = ((loff_t) page) << this->page_shift;
560 660
561 memcpy (&oobinfo, this->autooob, sizeof(oobinfo));
562 oobinfo.useecc = MTD_NANDECC_PLACEONLY;
563
564 /* Must we save the block contents ? */ 661 /* Must we save the block contents ? */
565 if (td->options & NAND_BBT_SAVECONTENT) { 662 if (td->options & NAND_BBT_SAVECONTENT) {
566 /* Make it block aligned */ 663 /* Make it block aligned */
567 to &= ~((loff_t) ((1 << this->bbt_erase_shift) - 1)); 664 to &= ~((loff_t) ((1 << this->bbt_erase_shift) - 1));
568 len = 1 << this->bbt_erase_shift; 665 len = 1 << this->bbt_erase_shift;
569 res = mtd->read_ecc (mtd, to, len, &retlen, buf, &buf[len], &oobinfo); 666 res = mtd->read(mtd, to, len, &retlen, buf);
570 if (res < 0) { 667 if (res < 0) {
571 if (retlen != len) { 668 if (retlen != len) {
572 printk (KERN_INFO "nand_bbt: Error reading block for writing the bad block table\n"); 669 printk(KERN_INFO "nand_bbt: Error "
670 "reading block for writing "
671 "the bad block table\n");
573 return res; 672 return res;
574 } 673 }
575 printk (KERN_WARNING "nand_bbt: ECC error while reading block for writing bad block table\n"); 674 printk(KERN_WARNING "nand_bbt: ECC error "
675 "while reading block for writing "
676 "bad block table\n");
576 } 677 }
678 /* Read oob data */
679 ops.len = (len >> this->page_shift) * mtd->oobsize;
680 ops.oobbuf = &buf[len];
681 res = mtd->read_oob(mtd, to + mtd->writesize, &ops);
682 if (res < 0 || ops.retlen != ops.len)
683 goto outerr;
684
577 /* Calc the byte offset in the buffer */ 685 /* Calc the byte offset in the buffer */
578 pageoffs = page - (int)(to >> this->page_shift); 686 pageoffs = page - (int)(to >> this->page_shift);
579 offs = pageoffs << this->page_shift; 687 offs = pageoffs << this->page_shift;
580 /* Preset the bbt area with 0xff */ 688 /* Preset the bbt area with 0xff */
581 memset (&buf[offs], 0xff, (size_t)(numblocks >> sft)); 689 memset(&buf[offs], 0xff, (size_t) (numblocks >> sft));
582 /* Preset the bbt's oob area with 0xff */ 690 ooboffs = len + (pageoffs * mtd->oobsize);
583 memset (&buf[len + pageoffs * mtd->oobsize], 0xff, 691
584 ((len >> this->page_shift) - pageoffs) * mtd->oobsize);
585 if (td->options & NAND_BBT_VERSION) {
586 buf[len + (pageoffs * mtd->oobsize) + td->veroffs] = td->version[chip];
587 }
588 } else { 692 } else {
589 /* Calc length */ 693 /* Calc length */
590 len = (size_t) (numblocks >> sft); 694 len = (size_t) (numblocks >> sft);
591 /* Make it page aligned ! */ 695 /* Make it page aligned ! */
592 len = (len + (mtd->oobblock-1)) & ~(mtd->oobblock-1); 696 len = (len + (mtd->writesize - 1)) &
697 ~(mtd->writesize - 1);
593 /* Preset the buffer with 0xff */ 698 /* Preset the buffer with 0xff */
594 memset (buf, 0xff, len + (len >> this->page_shift) * mtd->oobsize); 699 memset(buf, 0xff, len +
700 (len >> this->page_shift)* mtd->oobsize);
595 offs = 0; 701 offs = 0;
702 ooboffs = len;
596 /* Pattern is located in oob area of first page */ 703 /* Pattern is located in oob area of first page */
597 memcpy (&buf[len + td->offs], td->pattern, td->len); 704 memcpy(&buf[ooboffs + td->offs], td->pattern, td->len);
598 if (td->options & NAND_BBT_VERSION) {
599 buf[len + td->veroffs] = td->version[chip];
600 }
601 } 705 }
602 706
707 if (td->options & NAND_BBT_VERSION)
708 buf[ooboffs + td->veroffs] = td->version[chip];
709
603 /* walk through the memory table */ 710 /* walk through the memory table */
604 for (i = 0; i < numblocks; ) { 711 for (i = 0; i < numblocks;) {
605 uint8_t dat; 712 uint8_t dat;
606 dat = this->bbt[bbtoffs + (i >> 2)]; 713 dat = this->bbt[bbtoffs + (i >> 2)];
607 for (j = 0; j < 4; j++ , i++) { 714 for (j = 0; j < 4; j++, i++) {
608 int sftcnt = (i << (3 - sft)) & sftmsk; 715 int sftcnt = (i << (3 - sft)) & sftmsk;
609 /* Do not store the reserved bbt blocks ! */ 716 /* Do not store the reserved bbt blocks ! */
610 buf[offs + (i >> sft)] &= ~(msk[dat & 0x03] << sftcnt); 717 buf[offs + (i >> sft)] &=
718 ~(msk[dat & 0x03] << sftcnt);
611 dat >>= 2; 719 dat >>= 2;
612 } 720 }
613 } 721 }
614 722
615 memset (&einfo, 0, sizeof (einfo)); 723 memset(&einfo, 0, sizeof(einfo));
616 einfo.mtd = mtd; 724 einfo.mtd = mtd;
617 einfo.addr = (unsigned long) to; 725 einfo.addr = (unsigned long)to;
618 einfo.len = 1 << this->bbt_erase_shift; 726 einfo.len = 1 << this->bbt_erase_shift;
619 res = nand_erase_nand (mtd, &einfo, 1); 727 res = nand_erase_nand(mtd, &einfo, 1);
620 if (res < 0) { 728 if (res < 0)
621 printk (KERN_WARNING "nand_bbt: Error during block erase: %d\n", res); 729 goto outerr;
622 return res;
623 }
624 730
625 res = mtd->write_ecc (mtd, to, len, &retlen, buf, &buf[len], &oobinfo); 731 res = scan_write_bbt(mtd, to, len, buf, &buf[len]);
626 if (res < 0) { 732 if (res < 0)
627 printk (KERN_WARNING "nand_bbt: Error while writing bad block table %d\n", res); 733 goto outerr;
628 return res; 734
629 } 735 printk(KERN_DEBUG "Bad block table written to 0x%08x, version "
630 printk (KERN_DEBUG "Bad block table written to 0x%08x, version 0x%02X\n", 736 "0x%02X\n", (unsigned int)to, td->version[chip]);
631 (unsigned int) to, td->version[chip]);
632 737
633 /* Mark it as used */ 738 /* Mark it as used */
634 td->pages[chip] = page; 739 td->pages[chip] = page;
635 } 740 }
636 return 0; 741 return 0;
742
743 outerr:
744 printk(KERN_WARNING
745 "nand_bbt: Error while writing bad block table %d\n", res);
746 return res;
637} 747}
638 748
639/** 749/**
@@ -644,27 +754,27 @@ write:
644 * The function creates a memory based bbt by scanning the device 754 * The function creates a memory based bbt by scanning the device
645 * for manufacturer / software marked good / bad blocks 755 * for manufacturer / software marked good / bad blocks
646*/ 756*/
647static inline int nand_memory_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd) 757static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
648{ 758{
649 struct nand_chip *this = mtd->priv; 759 struct nand_chip *this = mtd->priv;
650 760
651 bd->options &= ~NAND_BBT_SCANEMPTY; 761 bd->options &= ~NAND_BBT_SCANEMPTY;
652 return create_bbt (mtd, this->data_buf, bd, -1); 762 return create_bbt(mtd, this->buffers.databuf, bd, -1);
653} 763}
654 764
655/** 765/**
656 * check_create - [GENERIC] create and write bbt(s) if neccecary 766 * check_create - [GENERIC] create and write bbt(s) if necessary
657 * @mtd: MTD device structure 767 * @mtd: MTD device structure
658 * @buf: temporary buffer 768 * @buf: temporary buffer
659 * @bd: descriptor for the good/bad block search pattern 769 * @bd: descriptor for the good/bad block search pattern
660 * 770 *
661 * The function checks the results of the previous call to read_bbt 771 * The function checks the results of the previous call to read_bbt
662 * and creates / updates the bbt(s) if neccecary 772 * and creates / updates the bbt(s) if necessary
663 * Creation is neccecary if no bbt was found for the chip/device 773 * Creation is necessary if no bbt was found for the chip/device
664 * Update is neccecary if one of the tables is missing or the 774 * Update is necessary if one of the tables is missing or the
665 * version nr. of one table is less than the other 775 * version nr. of one table is less than the other
666*/ 776*/
667static int check_create (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd) 777static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
668{ 778{
669 int i, chips, writeops, chipsel, res; 779 int i, chips, writeops, chipsel, res;
670 struct nand_chip *this = mtd->priv; 780 struct nand_chip *this = mtd->priv;
@@ -732,35 +842,35 @@ static int check_create (struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_des
732 rd = td; 842 rd = td;
733 goto writecheck; 843 goto writecheck;
734 } 844 }
735create: 845 create:
736 /* Create the bad block table by scanning the device ? */ 846 /* Create the bad block table by scanning the device ? */
737 if (!(td->options & NAND_BBT_CREATE)) 847 if (!(td->options & NAND_BBT_CREATE))
738 continue; 848 continue;
739 849
740 /* Create the table in memory by scanning the chip(s) */ 850 /* Create the table in memory by scanning the chip(s) */
741 create_bbt (mtd, buf, bd, chipsel); 851 create_bbt(mtd, buf, bd, chipsel);
742 852
743 td->version[i] = 1; 853 td->version[i] = 1;
744 if (md) 854 if (md)
745 md->version[i] = 1; 855 md->version[i] = 1;
746writecheck: 856 writecheck:
747 /* read back first ? */ 857 /* read back first ? */
748 if (rd) 858 if (rd)
749 read_abs_bbt (mtd, buf, rd, chipsel); 859 read_abs_bbt(mtd, buf, rd, chipsel);
750 /* If they weren't versioned, read both. */ 860 /* If they weren't versioned, read both. */
751 if (rd2) 861 if (rd2)
752 read_abs_bbt (mtd, buf, rd2, chipsel); 862 read_abs_bbt(mtd, buf, rd2, chipsel);
753 863
754 /* Write the bad block table to the device ? */ 864 /* Write the bad block table to the device ? */
755 if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) { 865 if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
756 res = write_bbt (mtd, buf, td, md, chipsel); 866 res = write_bbt(mtd, buf, td, md, chipsel);
757 if (res < 0) 867 if (res < 0)
758 return res; 868 return res;
759 } 869 }
760 870
761 /* Write the mirror bad block table to the device ? */ 871 /* Write the mirror bad block table to the device ? */
762 if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) { 872 if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
763 res = write_bbt (mtd, buf, md, td, chipsel); 873 res = write_bbt(mtd, buf, md, td, chipsel);
764 if (res < 0) 874 if (res < 0)
765 return res; 875 return res;
766 } 876 }
@@ -777,7 +887,7 @@ writecheck:
777 * accidental erasures / writes. The regions are identified by 887 * accidental erasures / writes. The regions are identified by
778 * the mark 0x02. 888 * the mark 0x02.
779*/ 889*/
780static void mark_bbt_region (struct mtd_info *mtd, struct nand_bbt_descr *td) 890static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
781{ 891{
782 struct nand_chip *this = mtd->priv; 892 struct nand_chip *this = mtd->priv;
783 int i, j, chips, block, nrblocks, update; 893 int i, j, chips, block, nrblocks, update;
@@ -795,7 +905,8 @@ static void mark_bbt_region (struct mtd_info *mtd, struct nand_bbt_descr *td)
795 for (i = 0; i < chips; i++) { 905 for (i = 0; i < chips; i++) {
796 if ((td->options & NAND_BBT_ABSPAGE) || 906 if ((td->options & NAND_BBT_ABSPAGE) ||
797 !(td->options & NAND_BBT_WRITE)) { 907 !(td->options & NAND_BBT_WRITE)) {
798 if (td->pages[i] == -1) continue; 908 if (td->pages[i] == -1)
909 continue;
799 block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift); 910 block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
800 block <<= 1; 911 block <<= 1;
801 oldval = this->bbt[(block >> 3)]; 912 oldval = this->bbt[(block >> 3)];
@@ -815,7 +926,8 @@ static void mark_bbt_region (struct mtd_info *mtd, struct nand_bbt_descr *td)
815 oldval = this->bbt[(block >> 3)]; 926 oldval = this->bbt[(block >> 3)];
816 newval = oldval | (0x2 << (block & 0x06)); 927 newval = oldval | (0x2 << (block & 0x06));
817 this->bbt[(block >> 3)] = newval; 928 this->bbt[(block >> 3)] = newval;
818 if (oldval != newval) update = 1; 929 if (oldval != newval)
930 update = 1;
819 block += 2; 931 block += 2;
820 } 932 }
821 /* If we want reserved blocks to be recorded to flash, and some 933 /* If we want reserved blocks to be recorded to flash, and some
@@ -840,7 +952,7 @@ static void mark_bbt_region (struct mtd_info *mtd, struct nand_bbt_descr *td)
840 * by calling the nand_free_bbt function. 952 * by calling the nand_free_bbt function.
841 * 953 *
842*/ 954*/
843int nand_scan_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd) 955int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
844{ 956{
845 struct nand_chip *this = mtd->priv; 957 struct nand_chip *this = mtd->priv;
846 int len, res = 0; 958 int len, res = 0;
@@ -850,21 +962,21 @@ int nand_scan_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd)
850 962
851 len = mtd->size >> (this->bbt_erase_shift + 2); 963 len = mtd->size >> (this->bbt_erase_shift + 2);
852 /* Allocate memory (2bit per block) */ 964 /* Allocate memory (2bit per block) */
853 this->bbt = kmalloc (len, GFP_KERNEL); 965 this->bbt = kmalloc(len, GFP_KERNEL);
854 if (!this->bbt) { 966 if (!this->bbt) {
855 printk (KERN_ERR "nand_scan_bbt: Out of memory\n"); 967 printk(KERN_ERR "nand_scan_bbt: Out of memory\n");
856 return -ENOMEM; 968 return -ENOMEM;
857 } 969 }
858 /* Clear the memory bad block table */ 970 /* Clear the memory bad block table */
859 memset (this->bbt, 0x00, len); 971 memset(this->bbt, 0x00, len);
860 972
861 /* If no primary table decriptor is given, scan the device 973 /* If no primary table decriptor is given, scan the device
862 * to build a memory based bad block table 974 * to build a memory based bad block table
863 */ 975 */
864 if (!td) { 976 if (!td) {
865 if ((res = nand_memory_bbt(mtd, bd))) { 977 if ((res = nand_memory_bbt(mtd, bd))) {
866 printk (KERN_ERR "nand_bbt: Can't scan flash and build the RAM-based BBT\n"); 978 printk(KERN_ERR "nand_bbt: Can't scan flash and build the RAM-based BBT\n");
867 kfree (this->bbt); 979 kfree(this->bbt);
868 this->bbt = NULL; 980 this->bbt = NULL;
869 } 981 }
870 return res; 982 return res;
@@ -873,35 +985,34 @@ int nand_scan_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd)
873 /* Allocate a temporary buffer for one eraseblock incl. oob */ 985 /* Allocate a temporary buffer for one eraseblock incl. oob */
874 len = (1 << this->bbt_erase_shift); 986 len = (1 << this->bbt_erase_shift);
875 len += (len >> this->page_shift) * mtd->oobsize; 987 len += (len >> this->page_shift) * mtd->oobsize;
876 buf = kmalloc (len, GFP_KERNEL); 988 buf = vmalloc(len);
877 if (!buf) { 989 if (!buf) {
878 printk (KERN_ERR "nand_bbt: Out of memory\n"); 990 printk(KERN_ERR "nand_bbt: Out of memory\n");
879 kfree (this->bbt); 991 kfree(this->bbt);
880 this->bbt = NULL; 992 this->bbt = NULL;
881 return -ENOMEM; 993 return -ENOMEM;
882 } 994 }
883 995
884 /* Is the bbt at a given page ? */ 996 /* Is the bbt at a given page ? */
885 if (td->options & NAND_BBT_ABSPAGE) { 997 if (td->options & NAND_BBT_ABSPAGE) {
886 res = read_abs_bbts (mtd, buf, td, md); 998 res = read_abs_bbts(mtd, buf, td, md);
887 } else { 999 } else {
888 /* Search the bad block table using a pattern in oob */ 1000 /* Search the bad block table using a pattern in oob */
889 res = search_read_bbts (mtd, buf, td, md); 1001 res = search_read_bbts(mtd, buf, td, md);
890 } 1002 }
891 1003
892 if (res) 1004 if (res)
893 res = check_create (mtd, buf, bd); 1005 res = check_create(mtd, buf, bd);
894 1006
895 /* Prevent the bbt regions from erasing / writing */ 1007 /* Prevent the bbt regions from erasing / writing */
896 mark_bbt_region (mtd, td); 1008 mark_bbt_region(mtd, td);
897 if (md) 1009 if (md)
898 mark_bbt_region (mtd, md); 1010 mark_bbt_region(mtd, md);
899 1011
900 kfree (buf); 1012 vfree(buf);
901 return res; 1013 return res;
902} 1014}
903 1015
904
905/** 1016/**
906 * nand_update_bbt - [NAND Interface] update bad block table(s) 1017 * nand_update_bbt - [NAND Interface] update bad block table(s)
907 * @mtd: MTD device structure 1018 * @mtd: MTD device structure
@@ -909,7 +1020,7 @@ int nand_scan_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd)
909 * 1020 *
910 * The function updates the bad block table(s) 1021 * The function updates the bad block table(s)
911*/ 1022*/
912int nand_update_bbt (struct mtd_info *mtd, loff_t offs) 1023int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
913{ 1024{
914 struct nand_chip *this = mtd->priv; 1025 struct nand_chip *this = mtd->priv;
915 int len, res = 0, writeops = 0; 1026 int len, res = 0, writeops = 0;
@@ -925,9 +1036,9 @@ int nand_update_bbt (struct mtd_info *mtd, loff_t offs)
925 /* Allocate a temporary buffer for one eraseblock incl. oob */ 1036 /* Allocate a temporary buffer for one eraseblock incl. oob */
926 len = (1 << this->bbt_erase_shift); 1037 len = (1 << this->bbt_erase_shift);
927 len += (len >> this->page_shift) * mtd->oobsize; 1038 len += (len >> this->page_shift) * mtd->oobsize;
928 buf = kmalloc (len, GFP_KERNEL); 1039 buf = kmalloc(len, GFP_KERNEL);
929 if (!buf) { 1040 if (!buf) {
930 printk (KERN_ERR "nand_update_bbt: Out of memory\n"); 1041 printk(KERN_ERR "nand_update_bbt: Out of memory\n");
931 return -ENOMEM; 1042 return -ENOMEM;
932 } 1043 }
933 1044
@@ -935,7 +1046,7 @@ int nand_update_bbt (struct mtd_info *mtd, loff_t offs)
935 1046
936 /* Do we have a bbt per chip ? */ 1047 /* Do we have a bbt per chip ? */
937 if (td->options & NAND_BBT_PERCHIP) { 1048 if (td->options & NAND_BBT_PERCHIP) {
938 chip = (int) (offs >> this->chip_shift); 1049 chip = (int)(offs >> this->chip_shift);
939 chipsel = chip; 1050 chipsel = chip;
940 } else { 1051 } else {
941 chip = 0; 1052 chip = 0;
@@ -948,17 +1059,17 @@ int nand_update_bbt (struct mtd_info *mtd, loff_t offs)
948 1059
949 /* Write the bad block table to the device ? */ 1060 /* Write the bad block table to the device ? */
950 if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) { 1061 if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
951 res = write_bbt (mtd, buf, td, md, chipsel); 1062 res = write_bbt(mtd, buf, td, md, chipsel);
952 if (res < 0) 1063 if (res < 0)
953 goto out; 1064 goto out;
954 } 1065 }
955 /* Write the mirror bad block table to the device ? */ 1066 /* Write the mirror bad block table to the device ? */
956 if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) { 1067 if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
957 res = write_bbt (mtd, buf, md, td, chipsel); 1068 res = write_bbt(mtd, buf, md, td, chipsel);
958 } 1069 }
959 1070
960out: 1071 out:
961 kfree (buf); 1072 kfree(buf);
962 return res; 1073 return res;
963} 1074}
964 1075
@@ -981,14 +1092,14 @@ static struct nand_bbt_descr largepage_memorybased = {
981}; 1092};
982 1093
983static struct nand_bbt_descr smallpage_flashbased = { 1094static struct nand_bbt_descr smallpage_flashbased = {
984 .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES, 1095 .options = NAND_BBT_SCAN2NDPAGE,
985 .offs = 5, 1096 .offs = 5,
986 .len = 1, 1097 .len = 1,
987 .pattern = scan_ff_pattern 1098 .pattern = scan_ff_pattern
988}; 1099};
989 1100
990static struct nand_bbt_descr largepage_flashbased = { 1101static struct nand_bbt_descr largepage_flashbased = {
991 .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES, 1102 .options = NAND_BBT_SCAN2NDPAGE,
992 .offs = 0, 1103 .offs = 0,
993 .len = 2, 1104 .len = 2,
994 .pattern = scan_ff_pattern 1105 .pattern = scan_ff_pattern
@@ -1036,7 +1147,7 @@ static struct nand_bbt_descr bbt_mirror_descr = {
1036 * support for the device and calls the nand_scan_bbt function 1147 * support for the device and calls the nand_scan_bbt function
1037 * 1148 *
1038*/ 1149*/
1039int nand_default_bbt (struct mtd_info *mtd) 1150int nand_default_bbt(struct mtd_info *mtd)
1040{ 1151{
1041 struct nand_chip *this = mtd->priv; 1152 struct nand_chip *this = mtd->priv;
1042 1153
@@ -1046,7 +1157,7 @@ int nand_default_bbt (struct mtd_info *mtd)
1046 * of the good / bad information, so we _must_ store 1157 * of the good / bad information, so we _must_ store
1047 * this information in a good / bad table during 1158 * this information in a good / bad table during
1048 * startup 1159 * startup
1049 */ 1160 */
1050 if (this->options & NAND_IS_AND) { 1161 if (this->options & NAND_IS_AND) {
1051 /* Use the default pattern descriptors */ 1162 /* Use the default pattern descriptors */
1052 if (!this->bbt_td) { 1163 if (!this->bbt_td) {
@@ -1054,10 +1165,9 @@ int nand_default_bbt (struct mtd_info *mtd)
1054 this->bbt_md = &bbt_mirror_descr; 1165 this->bbt_md = &bbt_mirror_descr;
1055 } 1166 }
1056 this->options |= NAND_USE_FLASH_BBT; 1167 this->options |= NAND_USE_FLASH_BBT;
1057 return nand_scan_bbt (mtd, &agand_flashbased); 1168 return nand_scan_bbt(mtd, &agand_flashbased);
1058 } 1169 }
1059 1170
1060
1061 /* Is a flash based bad block table requested ? */ 1171 /* Is a flash based bad block table requested ? */
1062 if (this->options & NAND_USE_FLASH_BBT) { 1172 if (this->options & NAND_USE_FLASH_BBT) {
1063 /* Use the default pattern descriptors */ 1173 /* Use the default pattern descriptors */
@@ -1066,18 +1176,17 @@ int nand_default_bbt (struct mtd_info *mtd)
1066 this->bbt_md = &bbt_mirror_descr; 1176 this->bbt_md = &bbt_mirror_descr;
1067 } 1177 }
1068 if (!this->badblock_pattern) { 1178 if (!this->badblock_pattern) {
1069 this->badblock_pattern = (mtd->oobblock > 512) ? 1179 this->badblock_pattern = (mtd->writesize > 512) ? &largepage_flashbased : &smallpage_flashbased;
1070 &largepage_flashbased : &smallpage_flashbased;
1071 } 1180 }
1072 } else { 1181 } else {
1073 this->bbt_td = NULL; 1182 this->bbt_td = NULL;
1074 this->bbt_md = NULL; 1183 this->bbt_md = NULL;
1075 if (!this->badblock_pattern) { 1184 if (!this->badblock_pattern) {
1076 this->badblock_pattern = (mtd->oobblock > 512) ? 1185 this->badblock_pattern = (mtd->writesize > 512) ?
1077 &largepage_memorybased : &smallpage_memorybased; 1186 &largepage_memorybased : &smallpage_memorybased;
1078 } 1187 }
1079 } 1188 }
1080 return nand_scan_bbt (mtd, this->badblock_pattern); 1189 return nand_scan_bbt(mtd, this->badblock_pattern);
1081} 1190}
1082 1191
1083/** 1192/**
@@ -1087,26 +1196,29 @@ int nand_default_bbt (struct mtd_info *mtd)
1087 * @allowbbt: allow access to bad block table region 1196 * @allowbbt: allow access to bad block table region
1088 * 1197 *
1089*/ 1198*/
1090int nand_isbad_bbt (struct mtd_info *mtd, loff_t offs, int allowbbt) 1199int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
1091{ 1200{
1092 struct nand_chip *this = mtd->priv; 1201 struct nand_chip *this = mtd->priv;
1093 int block; 1202 int block;
1094 uint8_t res; 1203 uint8_t res;
1095 1204
1096 /* Get block number * 2 */ 1205 /* Get block number * 2 */
1097 block = (int) (offs >> (this->bbt_erase_shift - 1)); 1206 block = (int)(offs >> (this->bbt_erase_shift - 1));
1098 res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03; 1207 res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
1099 1208
1100 DEBUG (MTD_DEBUG_LEVEL2, "nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n", 1209 DEBUG(MTD_DEBUG_LEVEL2, "nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
1101 (unsigned int)offs, block >> 1, res); 1210 (unsigned int)offs, block >> 1, res);
1102 1211
1103 switch ((int)res) { 1212 switch ((int)res) {
1104 case 0x00: return 0; 1213 case 0x00:
1105 case 0x01: return 1; 1214 return 0;
1106 case 0x02: return allowbbt ? 0 : 1; 1215 case 0x01:
1216 return 1;
1217 case 0x02:
1218 return allowbbt ? 0 : 1;
1107 } 1219 }
1108 return 1; 1220 return 1;
1109} 1221}
1110 1222
1111EXPORT_SYMBOL (nand_scan_bbt); 1223EXPORT_SYMBOL(nand_scan_bbt);
1112EXPORT_SYMBOL (nand_default_bbt); 1224EXPORT_SYMBOL(nand_default_bbt);
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 40ac909150a3..2a163e4084df 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -7,6 +7,8 @@
7 * Copyright (C) 2000-2004 Steven J. Hill (sjhill@realitydiluted.com) 7 * Copyright (C) 2000-2004 Steven J. Hill (sjhill@realitydiluted.com)
8 * Toshiba America Electronics Components, Inc. 8 * Toshiba America Electronics Components, Inc.
9 * 9 *
10 * Copyright (C) 2006 Thomas Gleixner <tglx@linutronix.de>
11 *
10 * $Id: nand_ecc.c,v 1.15 2005/11/07 11:14:30 gleixner Exp $ 12 * $Id: nand_ecc.c,v 1.15 2005/11/07 11:14:30 gleixner Exp $
11 * 13 *
12 * This file is free software; you can redistribute it and/or modify it 14 * This file is free software; you can redistribute it and/or modify it
@@ -62,90 +64,76 @@ static const u_char nand_ecc_precalc_table[] = {
62 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00 64 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00
63}; 65};
64 66
65
66/** 67/**
67 * nand_trans_result - [GENERIC] create non-inverted ECC 68 * nand_calculate_ecc - [NAND Interface] Calculate 3 byte ECC code
68 * @reg2: line parity reg 2 69 * for 256 byte block
69 * @reg3: line parity reg 3
70 * @ecc_code: ecc
71 *
72 * Creates non-inverted ECC code from line parity
73 */
74static void nand_trans_result(u_char reg2, u_char reg3,
75 u_char *ecc_code)
76{
77 u_char a, b, i, tmp1, tmp2;
78
79 /* Initialize variables */
80 a = b = 0x80;
81 tmp1 = tmp2 = 0;
82
83 /* Calculate first ECC byte */
84 for (i = 0; i < 4; i++) {
85 if (reg3 & a) /* LP15,13,11,9 --> ecc_code[0] */
86 tmp1 |= b;
87 b >>= 1;
88 if (reg2 & a) /* LP14,12,10,8 --> ecc_code[0] */
89 tmp1 |= b;
90 b >>= 1;
91 a >>= 1;
92 }
93
94 /* Calculate second ECC byte */
95 b = 0x80;
96 for (i = 0; i < 4; i++) {
97 if (reg3 & a) /* LP7,5,3,1 --> ecc_code[1] */
98 tmp2 |= b;
99 b >>= 1;
100 if (reg2 & a) /* LP6,4,2,0 --> ecc_code[1] */
101 tmp2 |= b;
102 b >>= 1;
103 a >>= 1;
104 }
105
106 /* Store two of the ECC bytes */
107 ecc_code[0] = tmp1;
108 ecc_code[1] = tmp2;
109}
110
111/**
112 * nand_calculate_ecc - [NAND Interface] Calculate 3 byte ECC code for 256 byte block
113 * @mtd: MTD block structure 70 * @mtd: MTD block structure
114 * @dat: raw data 71 * @dat: raw data
115 * @ecc_code: buffer for ECC 72 * @ecc_code: buffer for ECC
116 */ 73 */
117int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) 74int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
75 u_char *ecc_code)
118{ 76{
119 u_char idx, reg1, reg2, reg3; 77 uint8_t idx, reg1, reg2, reg3, tmp1, tmp2;
120 int j; 78 int i;
121 79
122 /* Initialize variables */ 80 /* Initialize variables */
123 reg1 = reg2 = reg3 = 0; 81 reg1 = reg2 = reg3 = 0;
124 ecc_code[0] = ecc_code[1] = ecc_code[2] = 0;
125 82
126 /* Build up column parity */ 83 /* Build up column parity */
127 for(j = 0; j < 256; j++) { 84 for(i = 0; i < 256; i++) {
128
129 /* Get CP0 - CP5 from table */ 85 /* Get CP0 - CP5 from table */
130 idx = nand_ecc_precalc_table[dat[j]]; 86 idx = nand_ecc_precalc_table[*dat++];
131 reg1 ^= (idx & 0x3f); 87 reg1 ^= (idx & 0x3f);
132 88
133 /* All bit XOR = 1 ? */ 89 /* All bit XOR = 1 ? */
134 if (idx & 0x40) { 90 if (idx & 0x40) {
135 reg3 ^= (u_char) j; 91 reg3 ^= (uint8_t) i;
136 reg2 ^= ~((u_char) j); 92 reg2 ^= ~((uint8_t) i);
137 } 93 }
138 } 94 }
139 95
140 /* Create non-inverted ECC code from line parity */ 96 /* Create non-inverted ECC code from line parity */
141 nand_trans_result(reg2, reg3, ecc_code); 97 tmp1 = (reg3 & 0x80) >> 0; /* B7 -> B7 */
98 tmp1 |= (reg2 & 0x80) >> 1; /* B7 -> B6 */
99 tmp1 |= (reg3 & 0x40) >> 1; /* B6 -> B5 */
100 tmp1 |= (reg2 & 0x40) >> 2; /* B6 -> B4 */
101 tmp1 |= (reg3 & 0x20) >> 2; /* B5 -> B3 */
102 tmp1 |= (reg2 & 0x20) >> 3; /* B5 -> B2 */
103 tmp1 |= (reg3 & 0x10) >> 3; /* B4 -> B1 */
104 tmp1 |= (reg2 & 0x10) >> 4; /* B4 -> B0 */
105
106 tmp2 = (reg3 & 0x08) << 4; /* B3 -> B7 */
107 tmp2 |= (reg2 & 0x08) << 3; /* B3 -> B6 */
108 tmp2 |= (reg3 & 0x04) << 3; /* B2 -> B5 */
109 tmp2 |= (reg2 & 0x04) << 2; /* B2 -> B4 */
110 tmp2 |= (reg3 & 0x02) << 2; /* B1 -> B3 */
111 tmp2 |= (reg2 & 0x02) << 1; /* B1 -> B2 */
112 tmp2 |= (reg3 & 0x01) << 1; /* B0 -> B1 */
113 tmp2 |= (reg2 & 0x01) << 0; /* B7 -> B0 */
142 114
143 /* Calculate final ECC code */ 115 /* Calculate final ECC code */
144 ecc_code[0] = ~ecc_code[0]; 116#ifdef CONFIG_NAND_ECC_SMC
145 ecc_code[1] = ~ecc_code[1]; 117 ecc_code[0] = ~tmp2;
118 ecc_code[1] = ~tmp1;
119#else
120 ecc_code[0] = ~tmp1;
121 ecc_code[1] = ~tmp2;
122#endif
146 ecc_code[2] = ((~reg1) << 2) | 0x03; 123 ecc_code[2] = ((~reg1) << 2) | 0x03;
124
147 return 0; 125 return 0;
148} 126}
127EXPORT_SYMBOL(nand_calculate_ecc);
128
129static inline int countbits(uint32_t byte)
130{
131 int res = 0;
132
133 for (;byte; byte >>= 1)
134 res += byte & 0x01;
135 return res;
136}
149 137
150/** 138/**
151 * nand_correct_data - [NAND Interface] Detect and correct bit error(s) 139 * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
@@ -156,93 +144,54 @@ int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code
156 * 144 *
157 * Detect and correct a 1 bit error for 256 byte block 145 * Detect and correct a 1 bit error for 256 byte block
158 */ 146 */
159int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc) 147int nand_correct_data(struct mtd_info *mtd, u_char *dat,
148 u_char *read_ecc, u_char *calc_ecc)
160{ 149{
161 u_char a, b, c, d1, d2, d3, add, bit, i; 150 uint8_t s0, s1, s2;
151
152#ifdef CONFIG_NAND_ECC_SMC
153 s0 = calc_ecc[0] ^ read_ecc[0];
154 s1 = calc_ecc[1] ^ read_ecc[1];
155 s2 = calc_ecc[2] ^ read_ecc[2];
156#else
157 s1 = calc_ecc[0] ^ read_ecc[0];
158 s0 = calc_ecc[1] ^ read_ecc[1];
159 s2 = calc_ecc[2] ^ read_ecc[2];
160#endif
161 if ((s0 | s1 | s2) == 0)
162 return 0;
162 163
163 /* Do error detection */ 164 /* Check for a single bit error */
164 d1 = calc_ecc[0] ^ read_ecc[0]; 165 if( ((s0 ^ (s0 >> 1)) & 0x55) == 0x55 &&
165 d2 = calc_ecc[1] ^ read_ecc[1]; 166 ((s1 ^ (s1 >> 1)) & 0x55) == 0x55 &&
166 d3 = calc_ecc[2] ^ read_ecc[2]; 167 ((s2 ^ (s2 >> 1)) & 0x54) == 0x54) {
167 168
168 if ((d1 | d2 | d3) == 0) { 169 uint32_t byteoffs, bitnum;
169 /* No errors */ 170
170 return 0; 171 byteoffs = (s1 << 0) & 0x80;
171 } 172 byteoffs |= (s1 << 1) & 0x40;
172 else { 173 byteoffs |= (s1 << 2) & 0x20;
173 a = (d1 ^ (d1 >> 1)) & 0x55; 174 byteoffs |= (s1 << 3) & 0x10;
174 b = (d2 ^ (d2 >> 1)) & 0x55; 175
175 c = (d3 ^ (d3 >> 1)) & 0x54; 176 byteoffs |= (s0 >> 4) & 0x08;
176 177 byteoffs |= (s0 >> 3) & 0x04;
177 /* Found and will correct single bit error in the data */ 178 byteoffs |= (s0 >> 2) & 0x02;
178 if ((a == 0x55) && (b == 0x55) && (c == 0x54)) { 179 byteoffs |= (s0 >> 1) & 0x01;
179 c = 0x80; 180
180 add = 0; 181 bitnum = (s2 >> 5) & 0x04;
181 a = 0x80; 182 bitnum |= (s2 >> 4) & 0x02;
182 for (i=0; i<4; i++) { 183 bitnum |= (s2 >> 3) & 0x01;
183 if (d1 & c) 184
184 add |= a; 185 dat[byteoffs] ^= (1 << bitnum);
185 c >>= 2; 186
186 a >>= 1; 187 return 1;
187 }
188 c = 0x80;
189 for (i=0; i<4; i++) {
190 if (d2 & c)
191 add |= a;
192 c >>= 2;
193 a >>= 1;
194 }
195 bit = 0;
196 b = 0x04;
197 c = 0x80;
198 for (i=0; i<3; i++) {
199 if (d3 & c)
200 bit |= b;
201 c >>= 2;
202 b >>= 1;
203 }
204 b = 0x01;
205 a = dat[add];
206 a ^= (b << bit);
207 dat[add] = a;
208 return 1;
209 }
210 else {
211 i = 0;
212 while (d1) {
213 if (d1 & 0x01)
214 ++i;
215 d1 >>= 1;
216 }
217 while (d2) {
218 if (d2 & 0x01)
219 ++i;
220 d2 >>= 1;
221 }
222 while (d3) {
223 if (d3 & 0x01)
224 ++i;
225 d3 >>= 1;
226 }
227 if (i == 1) {
228 /* ECC Code Error Correction */
229 read_ecc[0] = calc_ecc[0];
230 read_ecc[1] = calc_ecc[1];
231 read_ecc[2] = calc_ecc[2];
232 return 2;
233 }
234 else {
235 /* Uncorrectable Error */
236 return -1;
237 }
238 }
239 } 188 }
240 189
241 /* Should never happen */ 190 if(countbits(s0 | ((uint32_t)s1 << 8) | ((uint32_t)s2 <<16)) == 1)
191 return 1;
192
242 return -1; 193 return -1;
243} 194}
244
245EXPORT_SYMBOL(nand_calculate_ecc);
246EXPORT_SYMBOL(nand_correct_data); 195EXPORT_SYMBOL(nand_correct_data);
247 196
248MODULE_LICENSE("GPL"); 197MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index dbc7e55a4247..2e2cdf2fc91d 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -18,99 +18,110 @@
18* Name. ID code, pagesize, chipsize in MegaByte, eraseblock size, 18* Name. ID code, pagesize, chipsize in MegaByte, eraseblock size,
19* options 19* options
20* 20*
21* Pagesize; 0, 256, 512 21* Pagesize; 0, 256, 512
22* 0 get this information from the extended chip ID 22* 0 get this information from the extended chip ID
23+ 256 256 Byte page size 23+ 256 256 Byte page size
24* 512 512 Byte page size 24* 512 512 Byte page size
25*/ 25*/
26struct nand_flash_dev nand_flash_ids[] = { 26struct nand_flash_dev nand_flash_ids[] = {
27 {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0}, 27 {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0},
28 {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0}, 28 {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0},
29 {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0}, 29 {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0},
30 {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, 0}, 30 {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, 0},
31 {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, 0}, 31 {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, 0},
32 {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, 0}, 32 {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, 0},
33 {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, 0}, 33 {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, 0},
34 {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, 0}, 34 {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, 0},
35 {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, 0}, 35 {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, 0},
36 {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, 0}, 36 {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, 0},
37 37
38 {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, 0}, 38 {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, 0},
39 {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0}, 39 {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0},
40 {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16}, 40 {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16},
41 {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16}, 41 {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16},
42 42
43 {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0}, 43 {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0},
44 {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0}, 44 {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0},
45 {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16}, 45 {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16},
46 {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16}, 46 {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16},
47 47
48 {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, 0}, 48 {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, 0},
49 {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, 0}, 49 {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, 0},
50 {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16}, 50 {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16},
51 {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16}, 51 {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16},
52 52
53 {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, 0}, 53 {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, 0},
54 {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, 0}, 54 {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, 0},
55 {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16}, 55 {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16},
56 {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16}, 56 {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16},
57 57
58 {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, 0}, 58 {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, 0},
59 {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, 0}, 59 {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, 0},
60 {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, 0}, 60 {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, 0},
61 {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16}, 61 {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16},
62 {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16}, 62 {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16},
63 {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16}, 63 {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16},
64 {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16}, 64 {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16},
65 65
66 {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, 0}, 66 {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, 0},
67 67
68 /* These are the new chips with large page size. The pagesize 68 /*
69 * and the erasesize is determined from the extended id bytes 69 * These are the new chips with large page size. The pagesize and the
70 */ 70 * erasesize is determined from the extended id bytes
71 */
72#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR)
73#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
74
71 /*512 Megabit */ 75 /*512 Megabit */
72 {"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, 76 {"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, LP_OPTIONS},
73 {"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, 77 {"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, LP_OPTIONS},
74 {"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, 78 {"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, LP_OPTIONS16},
75 {"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, 79 {"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, LP_OPTIONS16},
76 80
77 /* 1 Gigabit */ 81 /* 1 Gigabit */
78 {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, 82 {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS},
79 {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, 83 {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, LP_OPTIONS},
80 {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, 84 {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16},
81 {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, 85 {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16},
82 86
83 /* 2 Gigabit */ 87 /* 2 Gigabit */
84 {"NAND 256MiB 1,8V 8-bit", 0xAA, 0, 256, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, 88 {"NAND 256MiB 1,8V 8-bit", 0xAA, 0, 256, 0, LP_OPTIONS},
85 {"NAND 256MiB 3,3V 8-bit", 0xDA, 0, 256, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, 89 {"NAND 256MiB 3,3V 8-bit", 0xDA, 0, 256, 0, LP_OPTIONS},
86 {"NAND 256MiB 1,8V 16-bit", 0xBA, 0, 256, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, 90 {"NAND 256MiB 1,8V 16-bit", 0xBA, 0, 256, 0, LP_OPTIONS16},
87 {"NAND 256MiB 3,3V 16-bit", 0xCA, 0, 256, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, 91 {"NAND 256MiB 3,3V 16-bit", 0xCA, 0, 256, 0, LP_OPTIONS16},
88 92
89 /* 4 Gigabit */ 93 /* 4 Gigabit */
90 {"NAND 512MiB 1,8V 8-bit", 0xAC, 0, 512, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, 94 {"NAND 512MiB 1,8V 8-bit", 0xAC, 0, 512, 0, LP_OPTIONS},
91 {"NAND 512MiB 3,3V 8-bit", 0xDC, 0, 512, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, 95 {"NAND 512MiB 3,3V 8-bit", 0xDC, 0, 512, 0, LP_OPTIONS},
92 {"NAND 512MiB 1,8V 16-bit", 0xBC, 0, 512, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, 96 {"NAND 512MiB 1,8V 16-bit", 0xBC, 0, 512, 0, LP_OPTIONS16},
93 {"NAND 512MiB 3,3V 16-bit", 0xCC, 0, 512, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, 97 {"NAND 512MiB 3,3V 16-bit", 0xCC, 0, 512, 0, LP_OPTIONS16},
94 98
95 /* 8 Gigabit */ 99 /* 8 Gigabit */
96 {"NAND 1GiB 1,8V 8-bit", 0xA3, 0, 1024, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, 100 {"NAND 1GiB 1,8V 8-bit", 0xA3, 0, 1024, 0, LP_OPTIONS},
97 {"NAND 1GiB 3,3V 8-bit", 0xD3, 0, 1024, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, 101 {"NAND 1GiB 3,3V 8-bit", 0xD3, 0, 1024, 0, LP_OPTIONS},
98 {"NAND 1GiB 1,8V 16-bit", 0xB3, 0, 1024, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, 102 {"NAND 1GiB 1,8V 16-bit", 0xB3, 0, 1024, 0, LP_OPTIONS16},
99 {"NAND 1GiB 3,3V 16-bit", 0xC3, 0, 1024, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, 103 {"NAND 1GiB 3,3V 16-bit", 0xC3, 0, 1024, 0, LP_OPTIONS16},
100 104
101 /* 16 Gigabit */ 105 /* 16 Gigabit */
102 {"NAND 2GiB 1,8V 8-bit", 0xA5, 0, 2048, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, 106 {"NAND 2GiB 1,8V 8-bit", 0xA5, 0, 2048, 0, LP_OPTIONS},
103 {"NAND 2GiB 3,3V 8-bit", 0xD5, 0, 2048, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, 107 {"NAND 2GiB 3,3V 8-bit", 0xD5, 0, 2048, 0, LP_OPTIONS},
104 {"NAND 2GiB 1,8V 16-bit", 0xB5, 0, 2048, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, 108 {"NAND 2GiB 1,8V 16-bit", 0xB5, 0, 2048, 0, LP_OPTIONS16},
105 {"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, 109 {"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, LP_OPTIONS16},
106 110
107 /* Renesas AND 1 Gigabit. Those chips do not support extended id and have a strange page/block layout ! 111 /*
108 * The chosen minimum erasesize is 4 * 2 * 2048 = 16384 Byte, as those chips have an array of 4 page planes 112 * Renesas AND 1 Gigabit. Those chips do not support extended id and
109 * 1 block = 2 pages, but due to plane arrangement the blocks 0-3 consists of page 0 + 4,1 + 5, 2 + 6, 3 + 7 113 * have a strange page/block layout ! The chosen minimum erasesize is
110 * Anyway JFFS2 would increase the eraseblock size so we chose a combined one which can be erased in one go 114 * 4 * 2 * 2048 = 16384 Byte, as those chips have an array of 4 page
111 * There are more speed improvements for reads and writes possible, but not implemented now 115 * planes 1 block = 2 pages, but due to plane arrangement the blocks
116 * 0-3 consists of page 0 + 4,1 + 5, 2 + 6, 3 + 7 Anyway JFFS2 would
117 * increase the eraseblock size so we chose a combined one which can be
118 * erased in one go There are more speed improvements for reads and
119 * writes possible, but not implemented now
112 */ 120 */
113 {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000, NAND_IS_AND | NAND_NO_AUTOINCR | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH}, 121 {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000,
122 NAND_IS_AND | NAND_NO_AUTOINCR |NAND_NO_READRDY | NAND_4PAGE_ARRAY |
123 BBT_AUTO_REFRESH
124 },
114 125
115 {NULL,} 126 {NULL,}
116}; 127};
@@ -125,13 +136,13 @@ struct nand_manufacturers nand_manuf_ids[] = {
125 {NAND_MFR_NATIONAL, "National"}, 136 {NAND_MFR_NATIONAL, "National"},
126 {NAND_MFR_RENESAS, "Renesas"}, 137 {NAND_MFR_RENESAS, "Renesas"},
127 {NAND_MFR_STMICRO, "ST Micro"}, 138 {NAND_MFR_STMICRO, "ST Micro"},
128 {NAND_MFR_HYNIX, "Hynix"}, 139 {NAND_MFR_HYNIX, "Hynix"},
129 {0x0, "Unknown"} 140 {0x0, "Unknown"}
130}; 141};
131 142
132EXPORT_SYMBOL (nand_manuf_ids); 143EXPORT_SYMBOL(nand_manuf_ids);
133EXPORT_SYMBOL (nand_flash_ids); 144EXPORT_SYMBOL(nand_flash_ids);
134 145
135MODULE_LICENSE ("GPL"); 146MODULE_LICENSE("GPL");
136MODULE_AUTHOR ("Thomas Gleixner <tglx@linutronix.de>"); 147MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
137MODULE_DESCRIPTION ("Nand device & manufacturer ID's"); 148MODULE_DESCRIPTION("Nand device & manufacturer IDs");
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index a0af92cc7efd..ebd64abc8be8 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -369,7 +369,7 @@ init_nandsim(struct mtd_info *mtd)
369 /* Initialize the NAND flash parameters */ 369 /* Initialize the NAND flash parameters */
370 ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8; 370 ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8;
371 ns->geom.totsz = mtd->size; 371 ns->geom.totsz = mtd->size;
372 ns->geom.pgsz = mtd->oobblock; 372 ns->geom.pgsz = mtd->writesize;
373 ns->geom.oobsz = mtd->oobsize; 373 ns->geom.oobsz = mtd->oobsize;
374 ns->geom.secsz = mtd->erasesize; 374 ns->geom.secsz = mtd->erasesize;
375 ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz; 375 ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
@@ -1071,68 +1071,6 @@ switch_state(struct nandsim *ns)
1071 } 1071 }
1072} 1072}
1073 1073
1074static void
1075ns_hwcontrol(struct mtd_info *mtd, int cmd)
1076{
1077 struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
1078
1079 switch (cmd) {
1080
1081 /* set CLE line high */
1082 case NAND_CTL_SETCLE:
1083 NS_DBG("ns_hwcontrol: start command latch cycles\n");
1084 ns->lines.cle = 1;
1085 break;
1086
1087 /* set CLE line low */
1088 case NAND_CTL_CLRCLE:
1089 NS_DBG("ns_hwcontrol: stop command latch cycles\n");
1090 ns->lines.cle = 0;
1091 break;
1092
1093 /* set ALE line high */
1094 case NAND_CTL_SETALE:
1095 NS_DBG("ns_hwcontrol: start address latch cycles\n");
1096 ns->lines.ale = 1;
1097 break;
1098
1099 /* set ALE line low */
1100 case NAND_CTL_CLRALE:
1101 NS_DBG("ns_hwcontrol: stop address latch cycles\n");
1102 ns->lines.ale = 0;
1103 break;
1104
1105 /* set WP line high */
1106 case NAND_CTL_SETWP:
1107 NS_DBG("ns_hwcontrol: enable write protection\n");
1108 ns->lines.wp = 1;
1109 break;
1110
1111 /* set WP line low */
1112 case NAND_CTL_CLRWP:
1113 NS_DBG("ns_hwcontrol: disable write protection\n");
1114 ns->lines.wp = 0;
1115 break;
1116
1117 /* set CE line low */
1118 case NAND_CTL_SETNCE:
1119 NS_DBG("ns_hwcontrol: enable chip\n");
1120 ns->lines.ce = 1;
1121 break;
1122
1123 /* set CE line high */
1124 case NAND_CTL_CLRNCE:
1125 NS_DBG("ns_hwcontrol: disable chip\n");
1126 ns->lines.ce = 0;
1127 break;
1128
1129 default:
1130 NS_ERR("hwcontrol: unknown command\n");
1131 }
1132
1133 return;
1134}
1135
1136static u_char 1074static u_char
1137ns_nand_read_byte(struct mtd_info *mtd) 1075ns_nand_read_byte(struct mtd_info *mtd)
1138{ 1076{
@@ -1359,6 +1297,18 @@ ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
1359 return; 1297 return;
1360} 1298}
1361 1299
1300static void ns_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask)
1301{
1302 struct nandsim *ns = ((struct nand_chip *)mtd->priv)->priv;
1303
1304 ns->lines.cle = bitmask & NAND_CLE ? 1 : 0;
1305 ns->lines.ale = bitmask & NAND_ALE ? 1 : 0;
1306 ns->lines.ce = bitmask & NAND_NCE ? 1 : 0;
1307
1308 if (cmd != NAND_CMD_NONE)
1309 ns_nand_write_byte(mtd, cmd);
1310}
1311
1362static int 1312static int
1363ns_device_ready(struct mtd_info *mtd) 1313ns_device_ready(struct mtd_info *mtd)
1364{ 1314{
@@ -1377,17 +1327,6 @@ ns_nand_read_word(struct mtd_info *mtd)
1377} 1327}
1378 1328
1379static void 1329static void
1380ns_nand_write_word(struct mtd_info *mtd, uint16_t word)
1381{
1382 struct nand_chip *chip = (struct nand_chip *)mtd->priv;
1383
1384 NS_DBG("write_word\n");
1385
1386 chip->write_byte(mtd, word & 0xFF);
1387 chip->write_byte(mtd, word >> 8);
1388}
1389
1390static void
1391ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) 1330ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
1392{ 1331{
1393 struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv; 1332 struct nandsim *ns = (struct nandsim *)((struct nand_chip *)mtd->priv)->priv;
@@ -1514,16 +1453,14 @@ static int __init ns_init_module(void)
1514 /* 1453 /*
1515 * Register simulator's callbacks. 1454 * Register simulator's callbacks.
1516 */ 1455 */
1517 chip->hwcontrol = ns_hwcontrol; 1456 chip->cmd_ctrl = ns_hwcontrol;
1518 chip->read_byte = ns_nand_read_byte; 1457 chip->read_byte = ns_nand_read_byte;
1519 chip->dev_ready = ns_device_ready; 1458 chip->dev_ready = ns_device_ready;
1520 chip->write_byte = ns_nand_write_byte;
1521 chip->write_buf = ns_nand_write_buf; 1459 chip->write_buf = ns_nand_write_buf;
1522 chip->read_buf = ns_nand_read_buf; 1460 chip->read_buf = ns_nand_read_buf;
1523 chip->verify_buf = ns_nand_verify_buf; 1461 chip->verify_buf = ns_nand_verify_buf;
1524 chip->write_word = ns_nand_write_word;
1525 chip->read_word = ns_nand_read_word; 1462 chip->read_word = ns_nand_read_word;
1526 chip->eccmode = NAND_ECC_SOFT; 1463 chip->ecc.mode = NAND_ECC_SOFT;
1527 chip->options |= NAND_SKIP_BBTSCAN; 1464 chip->options |= NAND_SKIP_BBTSCAN;
1528 1465
1529 /* 1466 /*
@@ -1546,6 +1483,8 @@ static int __init ns_init_module(void)
1546 chip->options |= NAND_BUSWIDTH_16; 1483 chip->options |= NAND_BUSWIDTH_16;
1547 } 1484 }
1548 1485
1486 nsmtd->owner = THIS_MODULE;
1487
1549 if ((retval = nand_scan(nsmtd, 1)) != 0) { 1488 if ((retval = nand_scan(nsmtd, 1)) != 0) {
1550 NS_ERR("can't register NAND Simulator\n"); 1489 NS_ERR("can't register NAND Simulator\n");
1551 if (retval > 0) 1490 if (retval > 0)
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
new file mode 100644
index 000000000000..fe8d38514ba6
--- /dev/null
+++ b/drivers/mtd/nand/ndfc.c
@@ -0,0 +1,311 @@
1/*
2 * drivers/mtd/ndfc.c
3 *
4 * Overview:
5 * Platform independend driver for NDFC (NanD Flash Controller)
6 * integrated into EP440 cores
7 *
8 * Author: Thomas Gleixner
9 *
10 * Copyright 2006 IBM
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 */
18#include <linux/module.h>
19#include <linux/mtd/nand.h>
20#include <linux/mtd/nand_ecc.h>
21#include <linux/mtd/partitions.h>
22#include <linux/mtd/ndfc.h>
23#include <linux/mtd/mtd.h>
24#include <linux/platform_device.h>
25
26#include <asm/io.h>
27#include <asm/ibm44x.h>
28
29struct ndfc_nand_mtd {
30 struct mtd_info mtd;
31 struct nand_chip chip;
32 struct platform_nand_chip *pl_chip;
33};
34
35static struct ndfc_nand_mtd ndfc_mtd[NDFC_MAX_BANKS];
36
37struct ndfc_controller {
38 void __iomem *ndfcbase;
39 struct nand_hw_control ndfc_control;
40 atomic_t childs_active;
41};
42
43static struct ndfc_controller ndfc_ctrl;
44
45static void ndfc_select_chip(struct mtd_info *mtd, int chip)
46{
47 uint32_t ccr;
48 struct ndfc_controller *ndfc = &ndfc_ctrl;
49 struct nand_chip *nandchip = mtd->priv;
50 struct ndfc_nand_mtd *nandmtd = nandchip->priv;
51 struct platform_nand_chip *pchip = nandmtd->pl_chip;
52
53 ccr = __raw_readl(ndfc->ndfcbase + NDFC_CCR);
54 if (chip >= 0) {
55 ccr &= ~NDFC_CCR_BS_MASK;
56 ccr |= NDFC_CCR_BS(chip + pchip->chip_offset);
57 } else
58 ccr |= NDFC_CCR_RESET_CE;
59 writel(ccr, ndfc->ndfcbase + NDFC_CCR);
60}
61
62static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
63{
64 struct nand_chip *chip = mtd->priv;
65
66 if (cmd == NAND_CMD_NONE)
67 return;
68
69 if (ctrl & NAND_CLE)
70 writel(cmd & 0xFF, chip->IO_ADDR_W + NDFC_CMD);
71 else
72 writel(cmd & 0xFF, chip->IO_ADDR_W + NDFC_ALE);
73}
74
75static int ndfc_ready(struct mtd_info *mtd)
76{
77 struct ndfc_controller *ndfc = &ndfc_ctrl;
78
79 return __raw_readl(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY;
80}
81
82static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode)
83{
84 uint32_t ccr;
85 struct ndfc_controller *ndfc = &ndfc_ctrl;
86
87 ccr = __raw_readl(ndfc->ndfcbase + NDFC_CCR);
88 ccr |= NDFC_CCR_RESET_ECC;
89 __raw_writel(ccr, ndfc->ndfcbase + NDFC_CCR);
90 wmb();
91}
92
93static int ndfc_calculate_ecc(struct mtd_info *mtd,
94 const u_char *dat, u_char *ecc_code)
95{
96 struct ndfc_controller *ndfc = &ndfc_ctrl;
97 uint32_t ecc;
98 uint8_t *p = (uint8_t *)&ecc;
99
100 wmb();
101 ecc = __raw_readl(ndfc->ndfcbase + NDFC_ECC);
102 ecc_code[0] = p[1];
103 ecc_code[1] = p[2];
104 ecc_code[2] = p[3];
105
106 return 0;
107}
108
109/*
110 * Speedups for buffer read/write/verify
111 *
112 * NDFC allows 32bit read/write of data. So we can speed up the buffer
113 * functions. No further checking, as nand_base will always read/write
114 * page aligned.
115 */
116static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
117{
118 struct ndfc_controller *ndfc = &ndfc_ctrl;
119 uint32_t *p = (uint32_t *) buf;
120
121 for(;len > 0; len -= 4)
122 *p++ = __raw_readl(ndfc->ndfcbase + NDFC_DATA);
123}
124
125static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
126{
127 struct ndfc_controller *ndfc = &ndfc_ctrl;
128 uint32_t *p = (uint32_t *) buf;
129
130 for(;len > 0; len -= 4)
131 __raw_writel(*p++, ndfc->ndfcbase + NDFC_DATA);
132}
133
134static int ndfc_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
135{
136 struct ndfc_controller *ndfc = &ndfc_ctrl;
137 uint32_t *p = (uint32_t *) buf;
138
139 for(;len > 0; len -= 4)
140 if (*p++ != __raw_readl(ndfc->ndfcbase + NDFC_DATA))
141 return -EFAULT;
142 return 0;
143}
144
145/*
146 * Initialize chip structure
147 */
148static void ndfc_chip_init(struct ndfc_nand_mtd *mtd)
149{
150 struct ndfc_controller *ndfc = &ndfc_ctrl;
151 struct nand_chip *chip = &mtd->chip;
152
153 chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA;
154 chip->IO_ADDR_W = ndfc->ndfcbase + NDFC_DATA;
155 chip->cmd_ctrl = ndfc_hwcontrol;
156 chip->dev_ready = ndfc_ready;
157 chip->select_chip = ndfc_select_chip;
158 chip->chip_delay = 50;
159 chip->priv = mtd;
160 chip->options = mtd->pl_chip->options;
161 chip->controller = &ndfc->ndfc_control;
162 chip->read_buf = ndfc_read_buf;
163 chip->write_buf = ndfc_write_buf;
164 chip->verify_buf = ndfc_verify_buf;
165 chip->ecc.correct = nand_correct_data;
166 chip->ecc.hwctl = ndfc_enable_hwecc;
167 chip->ecc.calculate = ndfc_calculate_ecc;
168 chip->ecc.mode = NAND_ECC_HW;
169 chip->ecc.size = 256;
170 chip->ecc.bytes = 3;
171 chip->ecclayout = mtd->pl_chip->ecclayout;
172 mtd->mtd.priv = chip;
173 mtd->mtd.owner = THIS_MODULE;
174}
175
176static int ndfc_chip_probe(struct platform_device *pdev)
177{
178 struct platform_nand_chip *nc = pdev->dev.platform_data;
179 struct ndfc_chip_settings *settings = nc->priv;
180 struct ndfc_controller *ndfc = &ndfc_ctrl;
181 struct ndfc_nand_mtd *nandmtd;
182
183 if (nc->chip_offset >= NDFC_MAX_BANKS || nc->nr_chips > NDFC_MAX_BANKS)
184 return -EINVAL;
185
186 /* Set the bank settings */
187 __raw_writel(settings->bank_settings,
188 ndfc->ndfcbase + NDFC_BCFG0 + (nc->chip_offset << 2));
189
190 nandmtd = &ndfc_mtd[pdev->id];
191 if (nandmtd->pl_chip)
192 return -EBUSY;
193
194 nandmtd->pl_chip = nc;
195 ndfc_chip_init(nandmtd);
196
197 /* Scan for chips */
198 if (nand_scan(&nandmtd->mtd, nc->nr_chips)) {
199 nandmtd->pl_chip = NULL;
200 return -ENODEV;
201 }
202
203#ifdef CONFIG_MTD_PARTITIONS
204 printk("Number of partitions %d\n", nc->nr_partitions);
205 if (nc->nr_partitions) {
206 /* Add the full device, so complete dumps can be made */
207 add_mtd_device(&nandmtd->mtd);
208 add_mtd_partitions(&nandmtd->mtd, nc->partitions,
209 nc->nr_partitions);
210
211 } else
212#else
213 add_mtd_device(&nandmtd->mtd);
214#endif
215
216 atomic_inc(&ndfc->childs_active);
217 return 0;
218}
219
220static int ndfc_chip_remove(struct platform_device *pdev)
221{
222 return 0;
223}
224
225static int ndfc_nand_probe(struct platform_device *pdev)
226{
227 struct platform_nand_ctrl *nc = pdev->dev.platform_data;
228 struct ndfc_controller_settings *settings = nc->priv;
229 struct resource *res = pdev->resource;
230 struct ndfc_controller *ndfc = &ndfc_ctrl;
231 unsigned long long phys = settings->ndfc_erpn | res->start;
232
233 ndfc->ndfcbase = ioremap64(phys, res->end - res->start + 1);
234 if (!ndfc->ndfcbase) {
235 printk(KERN_ERR "NDFC: ioremap failed\n");
236 return -EIO;
237 }
238
239 __raw_writel(settings->ccr_settings, ndfc->ndfcbase + NDFC_CCR);
240
241 spin_lock_init(&ndfc->ndfc_control.lock);
242 init_waitqueue_head(&ndfc->ndfc_control.wq);
243
244 platform_set_drvdata(pdev, ndfc);
245
246 printk("NDFC NAND Driver initialized. Chip-Rev: 0x%08x\n",
247 __raw_readl(ndfc->ndfcbase + NDFC_REVID));
248
249 return 0;
250}
251
252static int ndfc_nand_remove(struct platform_device *pdev)
253{
254 struct ndfc_controller *ndfc = platform_get_drvdata(pdev);
255
256 if (atomic_read(&ndfc->childs_active))
257 return -EBUSY;
258
259 if (ndfc) {
260 platform_set_drvdata(pdev, NULL);
261 iounmap(ndfc_ctrl.ndfcbase);
262 ndfc_ctrl.ndfcbase = NULL;
263 }
264 return 0;
265}
266
267/* driver device registration */
268
269static struct platform_driver ndfc_chip_driver = {
270 .probe = ndfc_chip_probe,
271 .remove = ndfc_chip_remove,
272 .driver = {
273 .name = "ndfc-chip",
274 .owner = THIS_MODULE,
275 },
276};
277
278static struct platform_driver ndfc_nand_driver = {
279 .probe = ndfc_nand_probe,
280 .remove = ndfc_nand_remove,
281 .driver = {
282 .name = "ndfc-nand",
283 .owner = THIS_MODULE,
284 },
285};
286
287static int __init ndfc_nand_init(void)
288{
289 int ret;
290
291 spin_lock_init(&ndfc_ctrl.ndfc_control.lock);
292 init_waitqueue_head(&ndfc_ctrl.ndfc_control.wq);
293
294 ret = platform_driver_register(&ndfc_nand_driver);
295 if (!ret)
296 ret = platform_driver_register(&ndfc_chip_driver);
297 return ret;
298}
299
300static void __exit ndfc_nand_exit(void)
301{
302 platform_driver_unregister(&ndfc_chip_driver);
303 platform_driver_unregister(&ndfc_nand_driver);
304}
305
306module_init(ndfc_nand_init);
307module_exit(ndfc_nand_exit);
308
309MODULE_LICENSE("GPL");
310MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
311MODULE_DESCRIPTION("Platform driver for NDFC");
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
index 91a95f34a6ee..22fa65c12ab9 100644
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -58,21 +58,21 @@
58/* 58/*
59 * MTD structure for PPChameleonEVB board 59 * MTD structure for PPChameleonEVB board
60 */ 60 */
61static struct mtd_info *ppchameleon_mtd = NULL; 61static struct mtd_info *ppchameleon_mtd = NULL;
62static struct mtd_info *ppchameleonevb_mtd = NULL; 62static struct mtd_info *ppchameleonevb_mtd = NULL;
63 63
64/* 64/*
65 * Module stuff 65 * Module stuff
66 */ 66 */
67static unsigned long ppchameleon_fio_pbase = CFG_NAND0_PADDR; 67static unsigned long ppchameleon_fio_pbase = CFG_NAND0_PADDR;
68static unsigned long ppchameleonevb_fio_pbase = CFG_NAND1_PADDR; 68static unsigned long ppchameleonevb_fio_pbase = CFG_NAND1_PADDR;
69 69
70#ifdef MODULE 70#ifdef MODULE
71module_param(ppchameleon_fio_pbase, ulong, 0); 71module_param(ppchameleon_fio_pbase, ulong, 0);
72module_param(ppchameleonevb_fio_pbase, ulong, 0); 72module_param(ppchameleonevb_fio_pbase, ulong, 0);
73#else 73#else
74__setup("ppchameleon_fio_pbase=",ppchameleon_fio_pbase); 74__setup("ppchameleon_fio_pbase=", ppchameleon_fio_pbase);
75__setup("ppchameleonevb_fio_pbase=",ppchameleonevb_fio_pbase); 75__setup("ppchameleonevb_fio_pbase=", ppchameleonevb_fio_pbase);
76#endif 76#endif
77 77
78#ifdef CONFIG_MTD_PARTITIONS 78#ifdef CONFIG_MTD_PARTITIONS
@@ -80,82 +80,96 @@ __setup("ppchameleonevb_fio_pbase=",ppchameleonevb_fio_pbase);
80 * Define static partitions for flash devices 80 * Define static partitions for flash devices
81 */ 81 */
82static struct mtd_partition partition_info_hi[] = { 82static struct mtd_partition partition_info_hi[] = {
83 { name: "PPChameleon HI Nand Flash", 83 { .name = "PPChameleon HI Nand Flash",
84 offset: 0, 84 offset = 0,
85 size: 128*1024*1024 } 85 .size = 128 * 1024 * 1024
86 }
86}; 87};
87 88
88static struct mtd_partition partition_info_me[] = { 89static struct mtd_partition partition_info_me[] = {
89 { name: "PPChameleon ME Nand Flash", 90 { .name = "PPChameleon ME Nand Flash",
90 offset: 0, 91 .offset = 0,
91 size: 32*1024*1024 } 92 .size = 32 * 1024 * 1024
93 }
92}; 94};
93 95
94static struct mtd_partition partition_info_evb[] = { 96static struct mtd_partition partition_info_evb[] = {
95 { name: "PPChameleonEVB Nand Flash", 97 { .name = "PPChameleonEVB Nand Flash",
96 offset: 0, 98 .offset = 0,
97 size: 32*1024*1024 } 99 .size = 32 * 1024 * 1024
100 }
98}; 101};
99 102
100#define NUM_PARTITIONS 1 103#define NUM_PARTITIONS 1
101 104
102extern int parse_cmdline_partitions(struct mtd_info *master, 105extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, const char *mtd_id);
103 struct mtd_partition **pparts,
104 const char *mtd_id);
105#endif 106#endif
106 107
107
108/* 108/*
109 * hardware specific access to control-lines 109 * hardware specific access to control-lines
110 */ 110 */
111static void ppchameleon_hwcontrol(struct mtd_info *mtdinfo, int cmd) 111static void ppchameleon_hwcontrol(struct mtd_info *mtdinfo, int cmd,
112 unsigned int ctrl)
112{ 113{
113 switch(cmd) { 114 struct nand_chip *chip = mtd->priv;
114 115
115 case NAND_CTL_SETCLE: 116 if (ctrl & NAND_CTRL_CHANGE) {
116 MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND0_PADDR); 117#error Missing headerfiles. No way to fix this. -tglx
117 break; 118 switch (cmd) {
118 case NAND_CTL_CLRCLE: 119 case NAND_CTL_SETCLE:
119 MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND0_PADDR); 120 MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND0_PADDR);
120 break; 121 break;
121 case NAND_CTL_SETALE: 122 case NAND_CTL_CLRCLE:
122 MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND0_PADDR); 123 MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND0_PADDR);
123 break; 124 break;
124 case NAND_CTL_CLRALE: 125 case NAND_CTL_SETALE:
125 MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND0_PADDR); 126 MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND0_PADDR);
126 break; 127 break;
127 case NAND_CTL_SETNCE: 128 case NAND_CTL_CLRALE:
129 MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND0_PADDR);
130 break;
131 case NAND_CTL_SETNCE:
128 MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND0_PADDR); 132 MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND0_PADDR);
129 break; 133 break;
130 case NAND_CTL_CLRNCE: 134 case NAND_CTL_CLRNCE:
131 MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND0_PADDR); 135 MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND0_PADDR);
132 break; 136 break;
137 }
133 } 138 }
139 if (cmd != NAND_CMD_NONE)
140 writeb(cmd, chip->IO_ADDR_W);
134} 141}
135 142
136static void ppchameleonevb_hwcontrol(struct mtd_info *mtdinfo, int cmd) 143static void ppchameleonevb_hwcontrol(struct mtd_info *mtdinfo, int cmd,
144 unsigned int ctrl)
137{ 145{
138 switch(cmd) { 146 struct nand_chip *chip = mtd->priv;
139 147
140 case NAND_CTL_SETCLE: 148 if (ctrl & NAND_CTRL_CHANGE) {
141 MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND1_PADDR); 149#error Missing headerfiles. No way to fix this. -tglx
142 break; 150 switch (cmd) {
143 case NAND_CTL_CLRCLE: 151 case NAND_CTL_SETCLE:
144 MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND1_PADDR); 152 MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND1_PADDR);
145 break; 153 break;
146 case NAND_CTL_SETALE: 154 case NAND_CTL_CLRCLE:
147 MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND1_PADDR); 155 MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND1_PADDR);
148 break; 156 break;
149 case NAND_CTL_CLRALE: 157 case NAND_CTL_SETALE:
150 MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND1_PADDR); 158 MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND1_PADDR);
151 break; 159 break;
152 case NAND_CTL_SETNCE: 160 case NAND_CTL_CLRALE:
153 MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND1_PADDR); 161 MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND1_PADDR);
154 break; 162 break;
155 case NAND_CTL_CLRNCE: 163 case NAND_CTL_SETNCE:
156 MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND1_PADDR); 164 MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND1_PADDR);
157 break; 165 break;
166 case NAND_CTL_CLRNCE:
167 MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND1_PADDR);
168 break;
169 }
158 } 170 }
171 if (cmd != NAND_CMD_NONE)
172 writeb(cmd, chip->IO_ADDR_W);
159} 173}
160 174
161#ifdef USE_READY_BUSY_PIN 175#ifdef USE_READY_BUSY_PIN
@@ -164,15 +178,15 @@ static void ppchameleonevb_hwcontrol(struct mtd_info *mtdinfo, int cmd)
164 */ 178 */
165static int ppchameleon_device_ready(struct mtd_info *minfo) 179static int ppchameleon_device_ready(struct mtd_info *minfo)
166{ 180{
167 if (in_be32((volatile unsigned*)GPIO0_IR) & NAND_RB_GPIO_PIN) 181 if (in_be32((volatile unsigned *)GPIO0_IR) & NAND_RB_GPIO_PIN)
168 return 1; 182 return 1;
169 return 0; 183 return 0;
170} 184}
171 185
172static int ppchameleonevb_device_ready(struct mtd_info *minfo) 186static int ppchameleonevb_device_ready(struct mtd_info *minfo)
173{ 187{
174 if (in_be32((volatile unsigned*)GPIO0_IR) & NAND_EVB_RB_GPIO_PIN) 188 if (in_be32((volatile unsigned *)GPIO0_IR) & NAND_EVB_RB_GPIO_PIN)
175 return 1; 189 return 1;
176 return 0; 190 return 0;
177} 191}
178#endif 192#endif
@@ -185,7 +199,7 @@ const char *part_probes_evb[] = { "cmdlinepart", NULL };
185/* 199/*
186 * Main initialization routine 200 * Main initialization routine
187 */ 201 */
188static int __init ppchameleonevb_init (void) 202static int __init ppchameleonevb_init(void)
189{ 203{
190 struct nand_chip *this; 204 struct nand_chip *this;
191 const char *part_type = 0; 205 const char *part_type = 0;
@@ -194,13 +208,11 @@ static int __init ppchameleonevb_init (void)
194 void __iomem *ppchameleon_fio_base; 208 void __iomem *ppchameleon_fio_base;
195 void __iomem *ppchameleonevb_fio_base; 209 void __iomem *ppchameleonevb_fio_base;
196 210
197
198 /********************************* 211 /*********************************
199 * Processor module NAND (if any) * 212 * Processor module NAND (if any) *
200 *********************************/ 213 *********************************/
201 /* Allocate memory for MTD device structure and private data */ 214 /* Allocate memory for MTD device structure and private data */
202 ppchameleon_mtd = kmalloc(sizeof(struct mtd_info) + 215 ppchameleon_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
203 sizeof(struct nand_chip), GFP_KERNEL);
204 if (!ppchameleon_mtd) { 216 if (!ppchameleon_mtd) {
205 printk("Unable to allocate PPChameleon NAND MTD device structure.\n"); 217 printk("Unable to allocate PPChameleon NAND MTD device structure.\n");
206 return -ENOMEM; 218 return -ENOMEM;
@@ -208,63 +220,65 @@ static int __init ppchameleonevb_init (void)
208 220
209 /* map physical address */ 221 /* map physical address */
210 ppchameleon_fio_base = ioremap(ppchameleon_fio_pbase, SZ_4M); 222 ppchameleon_fio_base = ioremap(ppchameleon_fio_pbase, SZ_4M);
211 if(!ppchameleon_fio_base) { 223 if (!ppchameleon_fio_base) {
212 printk("ioremap PPChameleon NAND flash failed\n"); 224 printk("ioremap PPChameleon NAND flash failed\n");
213 kfree(ppchameleon_mtd); 225 kfree(ppchameleon_mtd);
214 return -EIO; 226 return -EIO;
215 } 227 }
216 228
217 /* Get pointer to private data */ 229 /* Get pointer to private data */
218 this = (struct nand_chip *) (&ppchameleon_mtd[1]); 230 this = (struct nand_chip *)(&ppchameleon_mtd[1]);
219 231
220 /* Initialize structures */ 232 /* Initialize structures */
221 memset((char *) ppchameleon_mtd, 0, sizeof(struct mtd_info)); 233 memset(ppchameleon_mtd, 0, sizeof(struct mtd_info));
222 memset((char *) this, 0, sizeof(struct nand_chip)); 234 memset(this, 0, sizeof(struct nand_chip));
223 235
224 /* Link the private data with the MTD structure */ 236 /* Link the private data with the MTD structure */
225 ppchameleon_mtd->priv = this; 237 ppchameleon_mtd->priv = this;
238 ppchameleon_mtd->owner = THIS_MODULE;
226 239
227 /* Initialize GPIOs */ 240 /* Initialize GPIOs */
228 /* Pin mapping for NAND chip */ 241 /* Pin mapping for NAND chip */
229 /* 242 /*
230 CE GPIO_01 243 CE GPIO_01
231 CLE GPIO_02 244 CLE GPIO_02
232 ALE GPIO_03 245 ALE GPIO_03
233 R/B GPIO_04 246 R/B GPIO_04
234 */ 247 */
235 /* output select */ 248 /* output select */
236 out_be32((volatile unsigned*)GPIO0_OSRH, in_be32((volatile unsigned*)GPIO0_OSRH) & 0xC0FFFFFF); 249 out_be32((volatile unsigned *)GPIO0_OSRH, in_be32((volatile unsigned *)GPIO0_OSRH) & 0xC0FFFFFF);
237 /* three-state select */ 250 /* three-state select */
238 out_be32((volatile unsigned*)GPIO0_TSRH, in_be32((volatile unsigned*)GPIO0_TSRH) & 0xC0FFFFFF); 251 out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xC0FFFFFF);
239 /* enable output driver */ 252 /* enable output driver */
240 out_be32((volatile unsigned*)GPIO0_TCR, in_be32((volatile unsigned*)GPIO0_TCR) | NAND_nCE_GPIO_PIN | NAND_CLE_GPIO_PIN | NAND_ALE_GPIO_PIN); 253 out_be32((volatile unsigned *)GPIO0_TCR,
254 in_be32((volatile unsigned *)GPIO0_TCR) | NAND_nCE_GPIO_PIN | NAND_CLE_GPIO_PIN | NAND_ALE_GPIO_PIN);
241#ifdef USE_READY_BUSY_PIN 255#ifdef USE_READY_BUSY_PIN
242 /* three-state select */ 256 /* three-state select */
243 out_be32((volatile unsigned*)GPIO0_TSRH, in_be32((volatile unsigned*)GPIO0_TSRH) & 0xFF3FFFFF); 257 out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xFF3FFFFF);
244 /* high-impedecence */ 258 /* high-impedecence */
245 out_be32((volatile unsigned*)GPIO0_TCR, in_be32((volatile unsigned*)GPIO0_TCR) & (~NAND_RB_GPIO_PIN)); 259 out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) & (~NAND_RB_GPIO_PIN));
246 /* input select */ 260 /* input select */
247 out_be32((volatile unsigned*)GPIO0_ISR1H, (in_be32((volatile unsigned*)GPIO0_ISR1H) & 0xFF3FFFFF) | 0x00400000); 261 out_be32((volatile unsigned *)GPIO0_ISR1H,
262 (in_be32((volatile unsigned *)GPIO0_ISR1H) & 0xFF3FFFFF) | 0x00400000);
248#endif 263#endif
249 264
250 /* insert callbacks */ 265 /* insert callbacks */
251 this->IO_ADDR_R = ppchameleon_fio_base; 266 this->IO_ADDR_R = ppchameleon_fio_base;
252 this->IO_ADDR_W = ppchameleon_fio_base; 267 this->IO_ADDR_W = ppchameleon_fio_base;
253 this->hwcontrol = ppchameleon_hwcontrol; 268 this->cmd_ctrl = ppchameleon_hwcontrol;
254#ifdef USE_READY_BUSY_PIN 269#ifdef USE_READY_BUSY_PIN
255 this->dev_ready = ppchameleon_device_ready; 270 this->dev_ready = ppchameleon_device_ready;
256#endif 271#endif
257 this->chip_delay = NAND_BIG_DELAY_US; 272 this->chip_delay = NAND_BIG_DELAY_US;
258 /* ECC mode */ 273 /* ECC mode */
259 this->eccmode = NAND_ECC_SOFT; 274 this->ecc.mode = NAND_ECC_SOFT;
260 275
261 /* Scan to find existence of the device (it could not be mounted) */ 276 /* Scan to find existence of the device (it could not be mounted) */
262 if (nand_scan (ppchameleon_mtd, 1)) { 277 if (nand_scan(ppchameleon_mtd, 1)) {
263 iounmap((void *)ppchameleon_fio_base); 278 iounmap((void *)ppchameleon_fio_base);
264 kfree (ppchameleon_mtd); 279 kfree(ppchameleon_mtd);
265 goto nand_evb_init; 280 goto nand_evb_init;
266 } 281 }
267
268#ifndef USE_READY_BUSY_PIN 282#ifndef USE_READY_BUSY_PIN
269 /* Adjust delay if necessary */ 283 /* Adjust delay if necessary */
270 if (ppchameleon_mtd->size == NAND_SMALL_SIZE) 284 if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
@@ -275,12 +289,11 @@ static int __init ppchameleonevb_init (void)
275 ppchameleon_mtd->name = "ppchameleon-nand"; 289 ppchameleon_mtd->name = "ppchameleon-nand";
276 mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0); 290 mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0);
277 if (mtd_parts_nb > 0) 291 if (mtd_parts_nb > 0)
278 part_type = "command line"; 292 part_type = "command line";
279 else 293 else
280 mtd_parts_nb = 0; 294 mtd_parts_nb = 0;
281#endif 295#endif
282 if (mtd_parts_nb == 0) 296 if (mtd_parts_nb == 0) {
283 {
284 if (ppchameleon_mtd->size == NAND_SMALL_SIZE) 297 if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
285 mtd_parts = partition_info_me; 298 mtd_parts = partition_info_me;
286 else 299 else
@@ -293,13 +306,12 @@ static int __init ppchameleonevb_init (void)
293 printk(KERN_NOTICE "Using %s partition definition\n", part_type); 306 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
294 add_mtd_partitions(ppchameleon_mtd, mtd_parts, mtd_parts_nb); 307 add_mtd_partitions(ppchameleon_mtd, mtd_parts, mtd_parts_nb);
295 308
296nand_evb_init: 309 nand_evb_init:
297 /**************************** 310 /****************************
298 * EVB NAND (always present) * 311 * EVB NAND (always present) *
299 ****************************/ 312 ****************************/
300 /* Allocate memory for MTD device structure and private data */ 313 /* Allocate memory for MTD device structure and private data */
301 ppchameleonevb_mtd = kmalloc(sizeof(struct mtd_info) + 314 ppchameleonevb_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
302 sizeof(struct nand_chip), GFP_KERNEL);
303 if (!ppchameleonevb_mtd) { 315 if (!ppchameleonevb_mtd) {
304 printk("Unable to allocate PPChameleonEVB NAND MTD device structure.\n"); 316 printk("Unable to allocate PPChameleonEVB NAND MTD device structure.\n");
305 return -ENOMEM; 317 return -ENOMEM;
@@ -307,77 +319,76 @@ nand_evb_init:
307 319
308 /* map physical address */ 320 /* map physical address */
309 ppchameleonevb_fio_base = ioremap(ppchameleonevb_fio_pbase, SZ_4M); 321 ppchameleonevb_fio_base = ioremap(ppchameleonevb_fio_pbase, SZ_4M);
310 if(!ppchameleonevb_fio_base) { 322 if (!ppchameleonevb_fio_base) {
311 printk("ioremap PPChameleonEVB NAND flash failed\n"); 323 printk("ioremap PPChameleonEVB NAND flash failed\n");
312 kfree(ppchameleonevb_mtd); 324 kfree(ppchameleonevb_mtd);
313 return -EIO; 325 return -EIO;
314 } 326 }
315 327
316 /* Get pointer to private data */ 328 /* Get pointer to private data */
317 this = (struct nand_chip *) (&ppchameleonevb_mtd[1]); 329 this = (struct nand_chip *)(&ppchameleonevb_mtd[1]);
318 330
319 /* Initialize structures */ 331 /* Initialize structures */
320 memset((char *) ppchameleonevb_mtd, 0, sizeof(struct mtd_info)); 332 memset(ppchameleonevb_mtd, 0, sizeof(struct mtd_info));
321 memset((char *) this, 0, sizeof(struct nand_chip)); 333 memset(this, 0, sizeof(struct nand_chip));
322 334
323 /* Link the private data with the MTD structure */ 335 /* Link the private data with the MTD structure */
324 ppchameleonevb_mtd->priv = this; 336 ppchameleonevb_mtd->priv = this;
325 337
326 /* Initialize GPIOs */ 338 /* Initialize GPIOs */
327 /* Pin mapping for NAND chip */ 339 /* Pin mapping for NAND chip */
328 /* 340 /*
329 CE GPIO_14 341 CE GPIO_14
330 CLE GPIO_15 342 CLE GPIO_15
331 ALE GPIO_16 343 ALE GPIO_16
332 R/B GPIO_31 344 R/B GPIO_31
333 */ 345 */
334 /* output select */ 346 /* output select */
335 out_be32((volatile unsigned*)GPIO0_OSRH, in_be32((volatile unsigned*)GPIO0_OSRH) & 0xFFFFFFF0); 347 out_be32((volatile unsigned *)GPIO0_OSRH, in_be32((volatile unsigned *)GPIO0_OSRH) & 0xFFFFFFF0);
336 out_be32((volatile unsigned*)GPIO0_OSRL, in_be32((volatile unsigned*)GPIO0_OSRL) & 0x3FFFFFFF); 348 out_be32((volatile unsigned *)GPIO0_OSRL, in_be32((volatile unsigned *)GPIO0_OSRL) & 0x3FFFFFFF);
337 /* three-state select */ 349 /* three-state select */
338 out_be32((volatile unsigned*)GPIO0_TSRH, in_be32((volatile unsigned*)GPIO0_TSRH) & 0xFFFFFFF0); 350 out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xFFFFFFF0);
339 out_be32((volatile unsigned*)GPIO0_TSRL, in_be32((volatile unsigned*)GPIO0_TSRL) & 0x3FFFFFFF); 351 out_be32((volatile unsigned *)GPIO0_TSRL, in_be32((volatile unsigned *)GPIO0_TSRL) & 0x3FFFFFFF);
340 /* enable output driver */ 352 /* enable output driver */
341 out_be32((volatile unsigned*)GPIO0_TCR, in_be32((volatile unsigned*)GPIO0_TCR) | NAND_EVB_nCE_GPIO_PIN | 353 out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) | NAND_EVB_nCE_GPIO_PIN |
342 NAND_EVB_CLE_GPIO_PIN | NAND_EVB_ALE_GPIO_PIN); 354 NAND_EVB_CLE_GPIO_PIN | NAND_EVB_ALE_GPIO_PIN);
343#ifdef USE_READY_BUSY_PIN 355#ifdef USE_READY_BUSY_PIN
344 /* three-state select */ 356 /* three-state select */
345 out_be32((volatile unsigned*)GPIO0_TSRL, in_be32((volatile unsigned*)GPIO0_TSRL) & 0xFFFFFFFC); 357 out_be32((volatile unsigned *)GPIO0_TSRL, in_be32((volatile unsigned *)GPIO0_TSRL) & 0xFFFFFFFC);
346 /* high-impedecence */ 358 /* high-impedecence */
347 out_be32((volatile unsigned*)GPIO0_TCR, in_be32((volatile unsigned*)GPIO0_TCR) & (~NAND_EVB_RB_GPIO_PIN)); 359 out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) & (~NAND_EVB_RB_GPIO_PIN));
348 /* input select */ 360 /* input select */
349 out_be32((volatile unsigned*)GPIO0_ISR1L, (in_be32((volatile unsigned*)GPIO0_ISR1L) & 0xFFFFFFFC) | 0x00000001); 361 out_be32((volatile unsigned *)GPIO0_ISR1L,
362 (in_be32((volatile unsigned *)GPIO0_ISR1L) & 0xFFFFFFFC) | 0x00000001);
350#endif 363#endif
351 364
352 /* insert callbacks */ 365 /* insert callbacks */
353 this->IO_ADDR_R = ppchameleonevb_fio_base; 366 this->IO_ADDR_R = ppchameleonevb_fio_base;
354 this->IO_ADDR_W = ppchameleonevb_fio_base; 367 this->IO_ADDR_W = ppchameleonevb_fio_base;
355 this->hwcontrol = ppchameleonevb_hwcontrol; 368 this->cmd_ctrl = ppchameleonevb_hwcontrol;
356#ifdef USE_READY_BUSY_PIN 369#ifdef USE_READY_BUSY_PIN
357 this->dev_ready = ppchameleonevb_device_ready; 370 this->dev_ready = ppchameleonevb_device_ready;
358#endif 371#endif
359 this->chip_delay = NAND_SMALL_DELAY_US; 372 this->chip_delay = NAND_SMALL_DELAY_US;
360 373
361 /* ECC mode */ 374 /* ECC mode */
362 this->eccmode = NAND_ECC_SOFT; 375 this->ecc.mode = NAND_ECC_SOFT;
363 376
364 /* Scan to find existence of the device */ 377 /* Scan to find existence of the device */
365 if (nand_scan (ppchameleonevb_mtd, 1)) { 378 if (nand_scan(ppchameleonevb_mtd, 1)) {
366 iounmap((void *)ppchameleonevb_fio_base); 379 iounmap((void *)ppchameleonevb_fio_base);
367 kfree (ppchameleonevb_mtd); 380 kfree(ppchameleonevb_mtd);
368 return -ENXIO; 381 return -ENXIO;
369 } 382 }
370
371#ifdef CONFIG_MTD_PARTITIONS 383#ifdef CONFIG_MTD_PARTITIONS
372 ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME; 384 ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
373 mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0); 385 mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0);
374 if (mtd_parts_nb > 0) 386 if (mtd_parts_nb > 0)
375 part_type = "command line"; 387 part_type = "command line";
376 else 388 else
377 mtd_parts_nb = 0; 389 mtd_parts_nb = 0;
378#endif 390#endif
379 if (mtd_parts_nb == 0) 391 if (mtd_parts_nb == 0) {
380 {
381 mtd_parts = partition_info_evb; 392 mtd_parts = partition_info_evb;
382 mtd_parts_nb = NUM_PARTITIONS; 393 mtd_parts_nb = NUM_PARTITIONS;
383 part_type = "static"; 394 part_type = "static";
@@ -390,18 +401,19 @@ nand_evb_init:
390 /* Return happy */ 401 /* Return happy */
391 return 0; 402 return 0;
392} 403}
404
393module_init(ppchameleonevb_init); 405module_init(ppchameleonevb_init);
394 406
395/* 407/*
396 * Clean up routine 408 * Clean up routine
397 */ 409 */
398static void __exit ppchameleonevb_cleanup (void) 410static void __exit ppchameleonevb_cleanup(void)
399{ 411{
400 struct nand_chip *this; 412 struct nand_chip *this;
401 413
402 /* Release resources, unregister device(s) */ 414 /* Release resources, unregister device(s) */
403 nand_release (ppchameleon_mtd); 415 nand_release(ppchameleon_mtd);
404 nand_release (ppchameleonevb_mtd); 416 nand_release(ppchameleonevb_mtd);
405 417
406 /* Release iomaps */ 418 /* Release iomaps */
407 this = (struct nand_chip *) &ppchameleon_mtd[1]; 419 this = (struct nand_chip *) &ppchameleon_mtd[1];
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
index 4129c03dfd90..f8c49645324d 100644
--- a/drivers/mtd/nand/rtc_from4.c
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -97,12 +97,12 @@ static struct mtd_info *rtc_from4_mtd = NULL;
97static void __iomem *rtc_from4_fio_base = (void *)P2SEGADDR(RTC_FROM4_FIO_BASE); 97static void __iomem *rtc_from4_fio_base = (void *)P2SEGADDR(RTC_FROM4_FIO_BASE);
98 98
99static const struct mtd_partition partition_info[] = { 99static const struct mtd_partition partition_info[] = {
100 { 100 {
101 .name = "Renesas flash partition 1", 101 .name = "Renesas flash partition 1",
102 .offset = 0, 102 .offset = 0,
103 .size = MTDPART_SIZ_FULL 103 .size = MTDPART_SIZ_FULL},
104 },
105}; 104};
105
106#define NUM_PARTITIONS 1 106#define NUM_PARTITIONS 1
107 107
108/* 108/*
@@ -111,8 +111,8 @@ static const struct mtd_partition partition_info[] = {
111 * NAND_BBT_CREATE and/or NAND_BBT_WRITE 111 * NAND_BBT_CREATE and/or NAND_BBT_WRITE
112 * 112 *
113 */ 113 */
114static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' }; 114static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' };
115static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' }; 115static uint8_t mirror_pattern[] = { '1', 't', 'b', 'B' };
116 116
117static struct nand_bbt_descr rtc_from4_bbt_main_descr = { 117static struct nand_bbt_descr rtc_from4_bbt_main_descr = {
118 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE 118 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
@@ -134,8 +134,6 @@ static struct nand_bbt_descr rtc_from4_bbt_mirror_descr = {
134 .pattern = mirror_pattern 134 .pattern = mirror_pattern
135}; 135};
136 136
137
138
139#ifdef RTC_FROM4_HWECC 137#ifdef RTC_FROM4_HWECC
140 138
141/* the Reed Solomon control structure */ 139/* the Reed Solomon control structure */
@@ -144,15 +142,14 @@ static struct rs_control *rs_decoder;
144/* 142/*
145 * hardware specific Out Of Band information 143 * hardware specific Out Of Band information
146 */ 144 */
147static struct nand_oobinfo rtc_from4_nand_oobinfo = { 145static struct nand_ecclayout rtc_from4_nand_oobinfo = {
148 .useecc = MTD_NANDECC_AUTOPLACE,
149 .eccbytes = 32, 146 .eccbytes = 32,
150 .eccpos = { 147 .eccpos = {
151 0, 1, 2, 3, 4, 5, 6, 7, 148 0, 1, 2, 3, 4, 5, 6, 7,
152 8, 9, 10, 11, 12, 13, 14, 15, 149 8, 9, 10, 11, 12, 13, 14, 15,
153 16, 17, 18, 19, 20, 21, 22, 23, 150 16, 17, 18, 19, 20, 21, 22, 23,
154 24, 25, 26, 27, 28, 29, 30, 31}, 151 24, 25, 26, 27, 28, 29, 30, 31},
155 .oobfree = { {32, 32} } 152 .oobfree = {{32, 32}}
156}; 153};
157 154
158/* Aargh. I missed the reversed bit order, when I 155/* Aargh. I missed the reversed bit order, when I
@@ -162,44 +159,42 @@ static struct nand_oobinfo rtc_from4_nand_oobinfo = {
162 * of the ecc byte which we get from the FPGA 159 * of the ecc byte which we get from the FPGA
163 */ 160 */
164static uint8_t revbits[256] = { 161static uint8_t revbits[256] = {
165 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 162 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
166 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, 163 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
167 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, 164 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
168 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, 165 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
169 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, 166 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
170 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, 167 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
171 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 168 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
172 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, 169 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
173 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, 170 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
174 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, 171 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
175 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, 172 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
176 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, 173 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
177 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 174 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
178 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, 175 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
179 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, 176 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
180 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, 177 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
181 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, 178 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
182 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, 179 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
183 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 180 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
184 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, 181 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
185 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, 182 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
186 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, 183 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
187 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, 184 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
188 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, 185 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
189 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 186 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
190 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, 187 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
191 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, 188 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
192 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, 189 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
193 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, 190 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
194 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, 191 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
195 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 192 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
196 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff, 193 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
197}; 194};
198 195
199#endif 196#endif
200 197
201
202
203/* 198/*
204 * rtc_from4_hwcontrol - hardware specific access to control-lines 199 * rtc_from4_hwcontrol - hardware specific access to control-lines
205 * @mtd: MTD device structure 200 * @mtd: MTD device structure
@@ -212,35 +207,20 @@ static uint8_t revbits[256] = {
212 * Address lines (A24-A22), so no action is required here. 207 * Address lines (A24-A22), so no action is required here.
213 * 208 *
214 */ 209 */
215static void rtc_from4_hwcontrol(struct mtd_info *mtd, int cmd) 210static void rtc_from4_hwcontrol(struct mtd_info *mtd, int cmd,
211 unsigned int ctrl)
216{ 212{
217 struct nand_chip* this = (struct nand_chip *) (mtd->priv); 213 struct nand_chip *chip = (mtd->priv);
218
219 switch(cmd) {
220 214
221 case NAND_CTL_SETCLE: 215 if (cmd == NAND_CMD_NONE)
222 this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_CLE); 216 return;
223 break;
224 case NAND_CTL_CLRCLE:
225 this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W & ~RTC_FROM4_CLE);
226 break;
227
228 case NAND_CTL_SETALE:
229 this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_ALE);
230 break;
231 case NAND_CTL_CLRALE:
232 this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W & ~RTC_FROM4_ALE);
233 break;
234 217
235 case NAND_CTL_SETNCE: 218 if (ctrl & NAND_CLE)
236 break; 219 writeb(cmd, chip->IO_ADDR_W | RTC_FROM4_CLE);
237 case NAND_CTL_CLRNCE: 220 else
238 break; 221 writeb(cmd, chip->IO_ADDR_W | RTC_FROM4_ALE);
239
240 }
241} 222}
242 223
243
244/* 224/*
245 * rtc_from4_nand_select_chip - hardware specific chip select 225 * rtc_from4_nand_select_chip - hardware specific chip select
246 * @mtd: MTD device structure 226 * @mtd: MTD device structure
@@ -252,26 +232,25 @@ static void rtc_from4_hwcontrol(struct mtd_info *mtd, int cmd)
252 */ 232 */
253static void rtc_from4_nand_select_chip(struct mtd_info *mtd, int chip) 233static void rtc_from4_nand_select_chip(struct mtd_info *mtd, int chip)
254{ 234{
255 struct nand_chip *this = mtd->priv; 235 struct nand_chip *this = mtd->priv;
256 236
257 this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R & ~RTC_FROM4_NAND_ADDR_MASK); 237 this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R & ~RTC_FROM4_NAND_ADDR_MASK);
258 this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W & ~RTC_FROM4_NAND_ADDR_MASK); 238 this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W & ~RTC_FROM4_NAND_ADDR_MASK);
259 239
260 switch(chip) { 240 switch (chip) {
261 241
262 case 0: /* select slot 3 chip */ 242 case 0: /* select slot 3 chip */
263 this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT3); 243 this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT3);
264 this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT3); 244 this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT3);
265 break; 245 break;
266 case 1: /* select slot 4 chip */ 246 case 1: /* select slot 4 chip */
267 this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT4); 247 this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT4);
268 this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT4); 248 this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT4);
269 break; 249 break;
270 250
271 } 251 }
272} 252}
273 253
274
275/* 254/*
276 * rtc_from4_nand_device_ready - hardware specific ready/busy check 255 * rtc_from4_nand_device_ready - hardware specific ready/busy check
277 * @mtd: MTD device structure 256 * @mtd: MTD device structure
@@ -290,7 +269,6 @@ static int rtc_from4_nand_device_ready(struct mtd_info *mtd)
290 269
291} 270}
292 271
293
294/* 272/*
295 * deplete - code to perform device recovery in case there was a power loss 273 * deplete - code to perform device recovery in case there was a power loss
296 * @mtd: MTD device structure 274 * @mtd: MTD device structure
@@ -306,24 +284,23 @@ static int rtc_from4_nand_device_ready(struct mtd_info *mtd)
306 */ 284 */
307static void deplete(struct mtd_info *mtd, int chip) 285static void deplete(struct mtd_info *mtd, int chip)
308{ 286{
309 struct nand_chip *this = mtd->priv; 287 struct nand_chip *this = mtd->priv;
310 288
311 /* wait until device is ready */ 289 /* wait until device is ready */
312 while (!this->dev_ready(mtd)); 290 while (!this->dev_ready(mtd)) ;
313 291
314 this->select_chip(mtd, chip); 292 this->select_chip(mtd, chip);
315 293
316 /* Send the commands for device recovery, phase 1 */ 294 /* Send the commands for device recovery, phase 1 */
317 this->cmdfunc (mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0000); 295 this->cmdfunc(mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0000);
318 this->cmdfunc (mtd, NAND_CMD_DEPLETE2, -1, -1); 296 this->cmdfunc(mtd, NAND_CMD_DEPLETE2, -1, -1);
319 297
320 /* Send the commands for device recovery, phase 2 */ 298 /* Send the commands for device recovery, phase 2 */
321 this->cmdfunc (mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0004); 299 this->cmdfunc(mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0004);
322 this->cmdfunc (mtd, NAND_CMD_DEPLETE2, -1, -1); 300 this->cmdfunc(mtd, NAND_CMD_DEPLETE2, -1, -1);
323 301
324} 302}
325 303
326
327#ifdef RTC_FROM4_HWECC 304#ifdef RTC_FROM4_HWECC
328/* 305/*
329 * rtc_from4_enable_hwecc - hardware specific hardware ECC enable function 306 * rtc_from4_enable_hwecc - hardware specific hardware ECC enable function
@@ -335,39 +312,35 @@ static void deplete(struct mtd_info *mtd, int chip)
335 */ 312 */
336static void rtc_from4_enable_hwecc(struct mtd_info *mtd, int mode) 313static void rtc_from4_enable_hwecc(struct mtd_info *mtd, int mode)
337{ 314{
338 volatile unsigned short * rs_ecc_ctl = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CTL); 315 volatile unsigned short *rs_ecc_ctl = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CTL);
339 unsigned short status; 316 unsigned short status;
340 317
341 switch (mode) { 318 switch (mode) {
342 case NAND_ECC_READ : 319 case NAND_ECC_READ:
343 status = RTC_FROM4_RS_ECC_CTL_CLR 320 status = RTC_FROM4_RS_ECC_CTL_CLR | RTC_FROM4_RS_ECC_CTL_FD_E;
344 | RTC_FROM4_RS_ECC_CTL_FD_E;
345 321
346 *rs_ecc_ctl = status; 322 *rs_ecc_ctl = status;
347 break; 323 break;
348 324
349 case NAND_ECC_READSYN : 325 case NAND_ECC_READSYN:
350 status = 0x00; 326 status = 0x00;
351 327
352 *rs_ecc_ctl = status; 328 *rs_ecc_ctl = status;
353 break; 329 break;
354 330
355 case NAND_ECC_WRITE : 331 case NAND_ECC_WRITE:
356 status = RTC_FROM4_RS_ECC_CTL_CLR 332 status = RTC_FROM4_RS_ECC_CTL_CLR | RTC_FROM4_RS_ECC_CTL_GEN | RTC_FROM4_RS_ECC_CTL_FD_E;
357 | RTC_FROM4_RS_ECC_CTL_GEN
358 | RTC_FROM4_RS_ECC_CTL_FD_E;
359 333
360 *rs_ecc_ctl = status; 334 *rs_ecc_ctl = status;
361 break; 335 break;
362 336
363 default: 337 default:
364 BUG(); 338 BUG();
365 break; 339 break;
366 } 340 }
367 341
368} 342}
369 343
370
371/* 344/*
372 * rtc_from4_calculate_ecc - hardware specific code to read ECC code 345 * rtc_from4_calculate_ecc - hardware specific code to read ECC code
373 * @mtd: MTD device structure 346 * @mtd: MTD device structure
@@ -383,7 +356,7 @@ static void rtc_from4_enable_hwecc(struct mtd_info *mtd, int mode)
383 */ 356 */
384static void rtc_from4_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) 357static void rtc_from4_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
385{ 358{
386 volatile unsigned short * rs_eccn = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECCN); 359 volatile unsigned short *rs_eccn = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECCN);
387 unsigned short value; 360 unsigned short value;
388 int i; 361 int i;
389 362
@@ -395,7 +368,6 @@ static void rtc_from4_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_c
395 ecc_code[7] |= 0x0f; /* set the last four bits (not used) */ 368 ecc_code[7] |= 0x0f; /* set the last four bits (not used) */
396} 369}
397 370
398
399/* 371/*
400 * rtc_from4_correct_data - hardware specific code to correct data using ECC code 372 * rtc_from4_correct_data - hardware specific code to correct data using ECC code
401 * @mtd: MTD device structure 373 * @mtd: MTD device structure
@@ -414,7 +386,7 @@ static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_cha
414 unsigned short status; 386 unsigned short status;
415 uint16_t par[6], syn[6]; 387 uint16_t par[6], syn[6];
416 uint8_t ecc[8]; 388 uint8_t ecc[8];
417 volatile unsigned short *rs_ecc; 389 volatile unsigned short *rs_ecc;
418 390
419 status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CHK)); 391 status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CHK));
420 392
@@ -424,23 +396,18 @@ static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_cha
424 396
425 /* Read the syndrom pattern from the FPGA and correct the bitorder */ 397 /* Read the syndrom pattern from the FPGA and correct the bitorder */
426 rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC); 398 rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC);
427 for (i = 0; i < 8; i++) { 399 for (i = 0; i < 8; i++) {
428 ecc[i] = revbits[(*rs_ecc) & 0xFF]; 400 ecc[i] = revbits[(*rs_ecc) & 0xFF];
429 rs_ecc++; 401 rs_ecc++;
430 } 402 }
431 403
432 /* convert into 6 10bit syndrome fields */ 404 /* convert into 6 10bit syndrome fields */
433 par[5] = rs_decoder->index_of[(((uint16_t)ecc[0] >> 0) & 0x0ff) | 405 par[5] = rs_decoder->index_of[(((uint16_t) ecc[0] >> 0) & 0x0ff) | (((uint16_t) ecc[1] << 8) & 0x300)];
434 (((uint16_t)ecc[1] << 8) & 0x300)]; 406 par[4] = rs_decoder->index_of[(((uint16_t) ecc[1] >> 2) & 0x03f) | (((uint16_t) ecc[2] << 6) & 0x3c0)];
435 par[4] = rs_decoder->index_of[(((uint16_t)ecc[1] >> 2) & 0x03f) | 407 par[3] = rs_decoder->index_of[(((uint16_t) ecc[2] >> 4) & 0x00f) | (((uint16_t) ecc[3] << 4) & 0x3f0)];
436 (((uint16_t)ecc[2] << 6) & 0x3c0)]; 408 par[2] = rs_decoder->index_of[(((uint16_t) ecc[3] >> 6) & 0x003) | (((uint16_t) ecc[4] << 2) & 0x3fc)];
437 par[3] = rs_decoder->index_of[(((uint16_t)ecc[2] >> 4) & 0x00f) | 409 par[1] = rs_decoder->index_of[(((uint16_t) ecc[5] >> 0) & 0x0ff) | (((uint16_t) ecc[6] << 8) & 0x300)];
438 (((uint16_t)ecc[3] << 4) & 0x3f0)]; 410 par[0] = (((uint16_t) ecc[6] >> 2) & 0x03f) | (((uint16_t) ecc[7] << 6) & 0x3c0);
439 par[2] = rs_decoder->index_of[(((uint16_t)ecc[3] >> 6) & 0x003) |
440 (((uint16_t)ecc[4] << 2) & 0x3fc)];
441 par[1] = rs_decoder->index_of[(((uint16_t)ecc[5] >> 0) & 0x0ff) |
442 (((uint16_t)ecc[6] << 8) & 0x300)];
443 par[0] = (((uint16_t)ecc[6] >> 2) & 0x03f) | (((uint16_t)ecc[7] << 6) & 0x3c0);
444 411
445 /* Convert to computable syndrome */ 412 /* Convert to computable syndrome */
446 for (i = 0; i < 6; i++) { 413 for (i = 0; i < 6; i++) {
@@ -453,16 +420,14 @@ static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_cha
453 syn[i] = rs_decoder->index_of[syn[i]]; 420 syn[i] = rs_decoder->index_of[syn[i]];
454 } 421 }
455 422
456 /* Let the library code do its magic.*/ 423 /* Let the library code do its magic. */
457 res = decode_rs8(rs_decoder, (uint8_t *)buf, par, 512, syn, 0, NULL, 0xff, NULL); 424 res = decode_rs8(rs_decoder, (uint8_t *) buf, par, 512, syn, 0, NULL, 0xff, NULL);
458 if (res > 0) { 425 if (res > 0) {
459 DEBUG (MTD_DEBUG_LEVEL0, "rtc_from4_correct_data: " 426 DEBUG(MTD_DEBUG_LEVEL0, "rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res);
460 "ECC corrected %d errors on read\n", res);
461 } 427 }
462 return res; 428 return res;
463} 429}
464 430
465
466/** 431/**
467 * rtc_from4_errstat - perform additional error status checks 432 * rtc_from4_errstat - perform additional error status checks
468 * @mtd: MTD device structure 433 * @mtd: MTD device structure
@@ -478,54 +443,66 @@ static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_cha
478 * note: see pages 34..37 of data sheet for details. 443 * note: see pages 34..37 of data sheet for details.
479 * 444 *
480 */ 445 */
481static int rtc_from4_errstat(struct mtd_info *mtd, struct nand_chip *this, int state, int status, int page) 446static int rtc_from4_errstat(struct mtd_info *mtd, struct nand_chip *this,
447 int state, int status, int page)
482{ 448{
483 int er_stat=0; 449 int er_stat = 0;
484 int rtn, retlen; 450 int rtn, retlen;
485 size_t len; 451 size_t len;
486 uint8_t *buf; 452 uint8_t *buf;
487 int i; 453 int i;
488 454
489 this->cmdfunc (mtd, NAND_CMD_STATUS_CLEAR, -1, -1); 455 this->cmdfunc(mtd, NAND_CMD_STATUS_CLEAR, -1, -1);
490 456
491 if (state == FL_ERASING) { 457 if (state == FL_ERASING) {
492 for (i=0; i<4; i++) { 458
493 if (status & 1<<(i+1)) { 459 for (i = 0; i < 4; i++) {
494 this->cmdfunc (mtd, (NAND_CMD_STATUS_ERROR + i + 1), -1, -1); 460 if (!(status & 1 << (i + 1)))
495 rtn = this->read_byte(mtd); 461 continue;
496 this->cmdfunc (mtd, NAND_CMD_STATUS_RESET, -1, -1); 462 this->cmdfunc(mtd, (NAND_CMD_STATUS_ERROR + i + 1),
497 if (!(rtn & ERR_STAT_ECC_AVAILABLE)) { 463 -1, -1);
498 er_stat |= 1<<(i+1); /* err_ecc_not_avail */ 464 rtn = this->read_byte(mtd);
499 } 465 this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1);
500 } 466
467 /* err_ecc_not_avail */
468 if (!(rtn & ERR_STAT_ECC_AVAILABLE))
469 er_stat |= 1 << (i + 1);
501 } 470 }
471
502 } else if (state == FL_WRITING) { 472 } else if (state == FL_WRITING) {
473
474 unsigned long corrected = mtd->ecc_stats.corrected;
475
503 /* single bank write logic */ 476 /* single bank write logic */
504 this->cmdfunc (mtd, NAND_CMD_STATUS_ERROR, -1, -1); 477 this->cmdfunc(mtd, NAND_CMD_STATUS_ERROR, -1, -1);
505 rtn = this->read_byte(mtd); 478 rtn = this->read_byte(mtd);
506 this->cmdfunc (mtd, NAND_CMD_STATUS_RESET, -1, -1); 479 this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1);
480
507 if (!(rtn & ERR_STAT_ECC_AVAILABLE)) { 481 if (!(rtn & ERR_STAT_ECC_AVAILABLE)) {
508 er_stat |= 1<<1; /* err_ecc_not_avail */ 482 /* err_ecc_not_avail */
509 } else { 483 er_stat |= 1 << 1;
510 len = mtd->oobblock; 484 goto out;
511 buf = kmalloc (len, GFP_KERNEL); 485 }
512 if (!buf) { 486
513 printk (KERN_ERR "rtc_from4_errstat: Out of memory!\n"); 487 len = mtd->writesize;
514 er_stat = 1; /* if we can't check, assume failed */ 488 buf = kmalloc(len, GFP_KERNEL);
515 } else { 489 if (!buf) {
516 /* recovery read */ 490 printk(KERN_ERR "rtc_from4_errstat: Out of memory!\n");
517 /* page read */ 491 er_stat = 1;
518 rtn = nand_do_read_ecc (mtd, page, len, &retlen, buf, NULL, this->autooob, 1); 492 goto out;
519 if (rtn) { /* if read failed or > 1-bit error corrected */
520 er_stat |= 1<<1; /* ECC read failed */
521 }
522 kfree(buf);
523 }
524 } 493 }
494
495 /* recovery read */
496 rtn = nand_do_read(mtd, page, len, &retlen, buf);
497
498 /* if read failed or > 1-bit error corrected */
499 if (rtn || (mtd->ecc_stats.corrected - corrected) > 1) {
500 er_stat |= 1 << 1;
501 kfree(buf);
525 } 502 }
526 503
527 rtn = status; 504 rtn = status;
528 if (er_stat == 0) { /* if ECC is available */ 505 if (er_stat == 0) { /* if ECC is available */
529 rtn = (status & ~NAND_STATUS_FAIL); /* clear the error bit */ 506 rtn = (status & ~NAND_STATUS_FAIL); /* clear the error bit */
530 } 507 }
531 508
@@ -533,33 +510,32 @@ static int rtc_from4_errstat(struct mtd_info *mtd, struct nand_chip *this, int s
533} 510}
534#endif 511#endif
535 512
536
537/* 513/*
538 * Main initialization routine 514 * Main initialization routine
539 */ 515 */
540int __init rtc_from4_init (void) 516static int __init rtc_from4_init(void)
541{ 517{
542 struct nand_chip *this; 518 struct nand_chip *this;
543 unsigned short bcr1, bcr2, wcr2; 519 unsigned short bcr1, bcr2, wcr2;
544 int i; 520 int i;
545 521
546 /* Allocate memory for MTD device structure and private data */ 522 /* Allocate memory for MTD device structure and private data */
547 rtc_from4_mtd = kmalloc(sizeof(struct mtd_info) + sizeof (struct nand_chip), 523 rtc_from4_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
548 GFP_KERNEL);
549 if (!rtc_from4_mtd) { 524 if (!rtc_from4_mtd) {
550 printk ("Unable to allocate Renesas NAND MTD device structure.\n"); 525 printk("Unable to allocate Renesas NAND MTD device structure.\n");
551 return -ENOMEM; 526 return -ENOMEM;
552 } 527 }
553 528
554 /* Get pointer to private data */ 529 /* Get pointer to private data */
555 this = (struct nand_chip *) (&rtc_from4_mtd[1]); 530 this = (struct nand_chip *)(&rtc_from4_mtd[1]);
556 531
557 /* Initialize structures */ 532 /* Initialize structures */
558 memset((char *) rtc_from4_mtd, 0, sizeof(struct mtd_info)); 533 memset(rtc_from4_mtd, 0, sizeof(struct mtd_info));
559 memset((char *) this, 0, sizeof(struct nand_chip)); 534 memset(this, 0, sizeof(struct nand_chip));
560 535
561 /* Link the private data with the MTD structure */ 536 /* Link the private data with the MTD structure */
562 rtc_from4_mtd->priv = this; 537 rtc_from4_mtd->priv = this;
538 rtc_from4_mtd->owner = THIS_MODULE;
563 539
564 /* set area 5 as PCMCIA mode to clear the spec of tDH(Data hold time;9ns min) */ 540 /* set area 5 as PCMCIA mode to clear the spec of tDH(Data hold time;9ns min) */
565 bcr1 = *SH77X9_BCR1 & ~0x0002; 541 bcr1 = *SH77X9_BCR1 & ~0x0002;
@@ -580,9 +556,9 @@ int __init rtc_from4_init (void)
580 this->IO_ADDR_R = rtc_from4_fio_base; 556 this->IO_ADDR_R = rtc_from4_fio_base;
581 this->IO_ADDR_W = rtc_from4_fio_base; 557 this->IO_ADDR_W = rtc_from4_fio_base;
582 /* Set address of hardware control function */ 558 /* Set address of hardware control function */
583 this->hwcontrol = rtc_from4_hwcontrol; 559 this->cmd_ctrl = rtc_from4_hwcontrol;
584 /* Set address of chip select function */ 560 /* Set address of chip select function */
585 this->select_chip = rtc_from4_nand_select_chip; 561 this->select_chip = rtc_from4_nand_select_chip;
586 /* command delay time (in us) */ 562 /* command delay time (in us) */
587 this->chip_delay = 100; 563 this->chip_delay = 100;
588 /* return the status of the Ready/Busy line */ 564 /* return the status of the Ready/Busy line */
@@ -591,19 +567,20 @@ int __init rtc_from4_init (void)
591#ifdef RTC_FROM4_HWECC 567#ifdef RTC_FROM4_HWECC
592 printk(KERN_INFO "rtc_from4_init: using hardware ECC detection.\n"); 568 printk(KERN_INFO "rtc_from4_init: using hardware ECC detection.\n");
593 569
594 this->eccmode = NAND_ECC_HW8_512; 570 this->ecc.mode = NAND_ECC_HW_SYNDROME;
595 this->options |= NAND_HWECC_SYNDROME; 571 this->ecc.size = 512;
572 this->ecc.bytes = 8;
596 /* return the status of extra status and ECC checks */ 573 /* return the status of extra status and ECC checks */
597 this->errstat = rtc_from4_errstat; 574 this->errstat = rtc_from4_errstat;
598 /* set the nand_oobinfo to support FPGA H/W error detection */ 575 /* set the nand_oobinfo to support FPGA H/W error detection */
599 this->autooob = &rtc_from4_nand_oobinfo; 576 this->ecc.layout = &rtc_from4_nand_oobinfo;
600 this->enable_hwecc = rtc_from4_enable_hwecc; 577 this->ecc.hwctl = rtc_from4_enable_hwecc;
601 this->calculate_ecc = rtc_from4_calculate_ecc; 578 this->ecc.calculate = rtc_from4_calculate_ecc;
602 this->correct_data = rtc_from4_correct_data; 579 this->ecc.correct = rtc_from4_correct_data;
603#else 580#else
604 printk(KERN_INFO "rtc_from4_init: using software ECC detection.\n"); 581 printk(KERN_INFO "rtc_from4_init: using software ECC detection.\n");
605 582
606 this->eccmode = NAND_ECC_SOFT; 583 this->ecc.mode = NAND_ECC_SOFT;
607#endif 584#endif
608 585
609 /* set the bad block tables to support debugging */ 586 /* set the bad block tables to support debugging */
@@ -617,7 +594,7 @@ int __init rtc_from4_init (void)
617 } 594 }
618 595
619 /* Perform 'device recovery' for each chip in case there was a power loss. */ 596 /* Perform 'device recovery' for each chip in case there was a power loss. */
620 for (i=0; i < this->numchips; i++) { 597 for (i = 0; i < this->numchips; i++) {
621 deplete(rtc_from4_mtd, i); 598 deplete(rtc_from4_mtd, i);
622 } 599 }
623 600
@@ -643,7 +620,7 @@ int __init rtc_from4_init (void)
643 */ 620 */
644 rs_decoder = init_rs(10, 0x409, 0, 1, 6); 621 rs_decoder = init_rs(10, 0x409, 0, 1, 6);
645 if (!rs_decoder) { 622 if (!rs_decoder) {
646 printk (KERN_ERR "Could not create a RS decoder\n"); 623 printk(KERN_ERR "Could not create a RS decoder\n");
647 nand_release(rtc_from4_mtd); 624 nand_release(rtc_from4_mtd);
648 kfree(rtc_from4_mtd); 625 kfree(rtc_from4_mtd);
649 return -ENOMEM; 626 return -ENOMEM;
@@ -652,20 +629,19 @@ int __init rtc_from4_init (void)
652 /* Return happy */ 629 /* Return happy */
653 return 0; 630 return 0;
654} 631}
655module_init(rtc_from4_init);
656 632
633module_init(rtc_from4_init);
657 634
658/* 635/*
659 * Clean up routine 636 * Clean up routine
660 */ 637 */
661#ifdef MODULE 638static void __exit rtc_from4_cleanup(void)
662static void __exit rtc_from4_cleanup (void)
663{ 639{
664 /* Release resource, unregister partitions */ 640 /* Release resource, unregister partitions */
665 nand_release(rtc_from4_mtd); 641 nand_release(rtc_from4_mtd);
666 642
667 /* Free the MTD device structure */ 643 /* Free the MTD device structure */
668 kfree (rtc_from4_mtd); 644 kfree(rtc_from4_mtd);
669 645
670#ifdef RTC_FROM4_HWECC 646#ifdef RTC_FROM4_HWECC
671 /* Free the reed solomon resources */ 647 /* Free the reed solomon resources */
@@ -674,10 +650,9 @@ static void __exit rtc_from4_cleanup (void)
674 } 650 }
675#endif 651#endif
676} 652}
653
677module_exit(rtc_from4_cleanup); 654module_exit(rtc_from4_cleanup);
678#endif
679 655
680MODULE_LICENSE("GPL"); 656MODULE_LICENSE("GPL");
681MODULE_AUTHOR("d.marlin <dmarlin@redhat.com"); 657MODULE_AUTHOR("d.marlin <dmarlin@redhat.com");
682MODULE_DESCRIPTION("Board-specific glue layer for AG-AND flash on Renesas FROM_BOARD4"); 658MODULE_DESCRIPTION("Board-specific glue layer for AG-AND flash on Renesas FROM_BOARD4");
683
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 5b55599739f3..2c262fe03d8a 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -18,8 +18,9 @@
18 * 20-Jun-2005 BJD Updated s3c2440 support, fixed timing bug 18 * 20-Jun-2005 BJD Updated s3c2440 support, fixed timing bug
19 * 08-Jul-2005 BJD Fix OOPS when no platform data supplied 19 * 08-Jul-2005 BJD Fix OOPS when no platform data supplied
20 * 20-Oct-2005 BJD Fix timing calculation bug 20 * 20-Oct-2005 BJD Fix timing calculation bug
21 * 14-Jan-2006 BJD Allow clock to be stopped when idle
21 * 22 *
22 * $Id: s3c2410.c,v 1.20 2005/11/07 11:14:31 gleixner Exp $ 23 * $Id: s3c2410.c,v 1.23 2006/04/01 18:06:29 bjd Exp $
23 * 24 *
24 * This program is free software; you can redistribute it and/or modify 25 * This program is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by 26 * it under the terms of the GNU General Public License as published by
@@ -36,9 +37,6 @@
36 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 37 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
37*/ 38*/
38 39
39#include <config/mtd/nand/s3c2410/hwecc.h>
40#include <config/mtd/nand/s3c2410/debug.h>
41
42#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG 40#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG
43#define DEBUG 41#define DEBUG
44#endif 42#endif
@@ -73,14 +71,20 @@ static int hardware_ecc = 1;
73static int hardware_ecc = 0; 71static int hardware_ecc = 0;
74#endif 72#endif
75 73
74#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
75static int clock_stop = 1;
76#else
77static const int clock_stop = 0;
78#endif
79
80
76/* new oob placement block for use with hardware ecc generation 81/* new oob placement block for use with hardware ecc generation
77 */ 82 */
78 83
79static struct nand_oobinfo nand_hw_eccoob = { 84static struct nand_ecclayout nand_hw_eccoob = {
80 .useecc = MTD_NANDECC_AUTOPLACE, 85 .eccbytes = 3,
81 .eccbytes = 3, 86 .eccpos = {0, 1, 2},
82 .eccpos = {0, 1, 2 }, 87 .oobfree = {{8, 8}}
83 .oobfree = { {8, 8} }
84}; 88};
85 89
86/* controller and mtd information */ 90/* controller and mtd information */
@@ -135,6 +139,11 @@ static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
135 return dev->dev.platform_data; 139 return dev->dev.platform_data;
136} 140}
137 141
142static inline int allow_clk_stop(struct s3c2410_nand_info *info)
143{
144 return clock_stop;
145}
146
138/* timing calculations */ 147/* timing calculations */
139 148
140#define NS_IN_KHZ 1000000 149#define NS_IN_KHZ 1000000
@@ -149,8 +158,7 @@ static int s3c2410_nand_calc_rate(int wanted, unsigned long clk, int max)
149 pr_debug("result %d from %ld, %d\n", result, clk, wanted); 158 pr_debug("result %d from %ld, %d\n", result, clk, wanted);
150 159
151 if (result > max) { 160 if (result > max) {
152 printk("%d ns is too big for current clock rate %ld\n", 161 printk("%d ns is too big for current clock rate %ld\n", wanted, clk);
153 wanted, clk);
154 return -1; 162 return -1;
155 } 163 }
156 164
@@ -164,8 +172,7 @@ static int s3c2410_nand_calc_rate(int wanted, unsigned long clk, int max)
164 172
165/* controller setup */ 173/* controller setup */
166 174
167static int s3c2410_nand_inithw(struct s3c2410_nand_info *info, 175static int s3c2410_nand_inithw(struct s3c2410_nand_info *info, struct platform_device *pdev)
168 struct platform_device *pdev)
169{ 176{
170 struct s3c2410_platform_nand *plat = to_nand_plat(pdev); 177 struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
171 unsigned long clkrate = clk_get_rate(info->clk); 178 unsigned long clkrate = clk_get_rate(info->clk);
@@ -177,7 +184,7 @@ static int s3c2410_nand_inithw(struct s3c2410_nand_info *info,
177 clkrate /= 1000; /* turn clock into kHz for ease of use */ 184 clkrate /= 1000; /* turn clock into kHz for ease of use */
178 185
179 if (plat != NULL) { 186 if (plat != NULL) {
180 tacls = s3c2410_nand_calc_rate(plat->tacls, clkrate, 4); 187 tacls = s3c2410_nand_calc_rate(plat->tacls, clkrate, 4);
181 twrph0 = s3c2410_nand_calc_rate(plat->twrph0, clkrate, 8); 188 twrph0 = s3c2410_nand_calc_rate(plat->twrph0, clkrate, 8);
182 twrph1 = s3c2410_nand_calc_rate(plat->twrph1, clkrate, 8); 189 twrph1 = s3c2410_nand_calc_rate(plat->twrph1, clkrate, 8);
183 } else { 190 } else {
@@ -193,19 +200,22 @@ static int s3c2410_nand_inithw(struct s3c2410_nand_info *info,
193 } 200 }
194 201
195 printk(KERN_INFO PFX "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n", 202 printk(KERN_INFO PFX "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
196 tacls, to_ns(tacls, clkrate), 203 tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), twrph1, to_ns(twrph1, clkrate));
197 twrph0, to_ns(twrph0, clkrate),
198 twrph1, to_ns(twrph1, clkrate));
199 204
200 if (!info->is_s3c2440) { 205 if (!info->is_s3c2440) {
201 cfg = S3C2410_NFCONF_EN; 206 cfg = S3C2410_NFCONF_EN;
202 cfg |= S3C2410_NFCONF_TACLS(tacls-1); 207 cfg |= S3C2410_NFCONF_TACLS(tacls - 1);
203 cfg |= S3C2410_NFCONF_TWRPH0(twrph0-1); 208 cfg |= S3C2410_NFCONF_TWRPH0(twrph0 - 1);
204 cfg |= S3C2410_NFCONF_TWRPH1(twrph1-1); 209 cfg |= S3C2410_NFCONF_TWRPH1(twrph1 - 1);
205 } else { 210 } else {
206 cfg = S3C2440_NFCONF_TACLS(tacls-1); 211 cfg = S3C2440_NFCONF_TACLS(tacls - 1);
207 cfg |= S3C2440_NFCONF_TWRPH0(twrph0-1); 212 cfg |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
208 cfg |= S3C2440_NFCONF_TWRPH1(twrph1-1); 213 cfg |= S3C2440_NFCONF_TWRPH1(twrph1 - 1);
214
215 /* enable the controller and de-assert nFCE */
216
217 writel(S3C2440_NFCONT_ENABLE | S3C2440_NFCONT_ENABLE,
218 info->regs + S3C2440_NFCONT);
209 } 219 }
210 220
211 pr_debug(PFX "NF_CONF is 0x%lx\n", cfg); 221 pr_debug(PFX "NF_CONF is 0x%lx\n", cfg);
@@ -229,7 +239,10 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
229 info = nmtd->info; 239 info = nmtd->info;
230 240
231 bit = (info->is_s3c2440) ? S3C2440_NFCONT_nFCE : S3C2410_NFCONF_nFCE; 241 bit = (info->is_s3c2440) ? S3C2440_NFCONT_nFCE : S3C2410_NFCONF_nFCE;
232 reg = info->regs+((info->is_s3c2440) ? S3C2440_NFCONT:S3C2410_NFCONF); 242 reg = info->regs + ((info->is_s3c2440) ? S3C2440_NFCONT : S3C2410_NFCONF);
243
244 if (chip != -1 && allow_clk_stop(info))
245 clk_enable(info->clk);
233 246
234 cur = readl(reg); 247 cur = readl(reg);
235 248
@@ -243,77 +256,51 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
243 256
244 if (info->platform != NULL) { 257 if (info->platform != NULL) {
245 if (info->platform->select_chip != NULL) 258 if (info->platform->select_chip != NULL)
246 (info->platform->select_chip)(nmtd->set, chip); 259 (info->platform->select_chip) (nmtd->set, chip);
247 } 260 }
248 261
249 cur &= ~bit; 262 cur &= ~bit;
250 } 263 }
251 264
252 writel(cur, reg); 265 writel(cur, reg);
266
267 if (chip == -1 && allow_clk_stop(info))
268 clk_disable(info->clk);
253} 269}
254 270
255/* command and control functions 271/* s3c2410_nand_hwcontrol
256 *
257 * Note, these all use tglx's method of changing the IO_ADDR_W field
258 * to make the code simpler, and use the nand layer's code to issue the
259 * command and address sequences via the proper IO ports.
260 * 272 *
273 * Issue command and address cycles to the chip
261*/ 274*/
262 275
263static void s3c2410_nand_hwcontrol(struct mtd_info *mtd, int cmd) 276static void s3c2410_nand_hwcontrol(struct mtd_info *mtd, int cmd,
277 unsigned int ctrl)
264{ 278{
265 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 279 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
266 struct nand_chip *chip = mtd->priv; 280
267 281 if (cmd == NAND_CMD_NONE)
268 switch (cmd) { 282 return;
269 case NAND_CTL_SETNCE: 283
270 case NAND_CTL_CLRNCE: 284 if (ctrl & NAND_CLE)
271 printk(KERN_ERR "%s: called for NCE\n", __FUNCTION__); 285 writeb(cmd, info->regs + S3C2410_NFCMD);
272 break; 286 else
273 287 writeb(cmd, info->regs + S3C2410_NFADDR);
274 case NAND_CTL_SETCLE:
275 chip->IO_ADDR_W = info->regs + S3C2410_NFCMD;
276 break;
277
278 case NAND_CTL_SETALE:
279 chip->IO_ADDR_W = info->regs + S3C2410_NFADDR;
280 break;
281
282 /* NAND_CTL_CLRCLE: */
283 /* NAND_CTL_CLRALE: */
284 default:
285 chip->IO_ADDR_W = info->regs + S3C2410_NFDATA;
286 break;
287 }
288} 288}
289 289
290/* command and control functions */ 290/* command and control functions */
291 291
292static void s3c2440_nand_hwcontrol(struct mtd_info *mtd, int cmd) 292static void s3c2440_nand_hwcontrol(struct mtd_info *mtd, int cmd,
293 unsigned int ctrl)
293{ 294{
294 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 295 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
295 struct nand_chip *chip = mtd->priv; 296
296 297 if (cmd == NAND_CMD_NONE)
297 switch (cmd) { 298 return;
298 case NAND_CTL_SETNCE: 299
299 case NAND_CTL_CLRNCE: 300 if (ctrl & NAND_CLE)
300 printk(KERN_ERR "%s: called for NCE\n", __FUNCTION__); 301 writeb(cmd, info->regs + S3C2440_NFCMD);
301 break; 302 else
302 303 writeb(cmd, info->regs + S3C2440_NFADDR);
303 case NAND_CTL_SETCLE:
304 chip->IO_ADDR_W = info->regs + S3C2440_NFCMD;
305 break;
306
307 case NAND_CTL_SETALE:
308 chip->IO_ADDR_W = info->regs + S3C2440_NFADDR;
309 break;
310
311 /* NAND_CTL_CLRCLE: */
312 /* NAND_CTL_CLRALE: */
313 default:
314 chip->IO_ADDR_W = info->regs + S3C2440_NFDATA;
315 break;
316 }
317} 304}
318 305
319/* s3c2410_nand_devready() 306/* s3c2410_nand_devready()
@@ -330,22 +317,16 @@ static int s3c2410_nand_devready(struct mtd_info *mtd)
330 return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY; 317 return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY;
331} 318}
332 319
333
334/* ECC handling functions */ 320/* ECC handling functions */
335 321
336static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat, 322static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc)
337 u_char *read_ecc, u_char *calc_ecc)
338{ 323{
339 pr_debug("s3c2410_nand_correct_data(%p,%p,%p,%p)\n", 324 pr_debug("s3c2410_nand_correct_data(%p,%p,%p,%p)\n", mtd, dat, read_ecc, calc_ecc);
340 mtd, dat, read_ecc, calc_ecc);
341 325
342 pr_debug("eccs: read %02x,%02x,%02x vs calc %02x,%02x,%02x\n", 326 pr_debug("eccs: read %02x,%02x,%02x vs calc %02x,%02x,%02x\n",
343 read_ecc[0], read_ecc[1], read_ecc[2], 327 read_ecc[0], read_ecc[1], read_ecc[2], calc_ecc[0], calc_ecc[1], calc_ecc[2]);
344 calc_ecc[0], calc_ecc[1], calc_ecc[2]);
345 328
346 if (read_ecc[0] == calc_ecc[0] && 329 if (read_ecc[0] == calc_ecc[0] && read_ecc[1] == calc_ecc[1] && read_ecc[2] == calc_ecc[2])
347 read_ecc[1] == calc_ecc[1] &&
348 read_ecc[2] == calc_ecc[2])
349 return 0; 330 return 0;
350 331
351 /* we curently have no method for correcting the error */ 332 /* we curently have no method for correcting the error */
@@ -378,8 +359,7 @@ static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode)
378 writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT); 359 writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT);
379} 360}
380 361
381static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, 362static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
382 const u_char *dat, u_char *ecc_code)
383{ 363{
384 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 364 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
385 365
@@ -387,15 +367,12 @@ static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd,
387 ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1); 367 ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1);
388 ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2); 368 ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2);
389 369
390 pr_debug("calculate_ecc: returning ecc %02x,%02x,%02x\n", 370 pr_debug("calculate_ecc: returning ecc %02x,%02x,%02x\n", ecc_code[0], ecc_code[1], ecc_code[2]);
391 ecc_code[0], ecc_code[1], ecc_code[2]);
392 371
393 return 0; 372 return 0;
394} 373}
395 374
396 375static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
397static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd,
398 const u_char *dat, u_char *ecc_code)
399{ 376{
400 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); 377 struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
401 unsigned long ecc = readl(info->regs + S3C2440_NFMECC0); 378 unsigned long ecc = readl(info->regs + S3C2440_NFMECC0);
@@ -404,13 +381,11 @@ static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd,
404 ecc_code[1] = ecc >> 8; 381 ecc_code[1] = ecc >> 8;
405 ecc_code[2] = ecc >> 16; 382 ecc_code[2] = ecc >> 16;
406 383
407 pr_debug("calculate_ecc: returning ecc %02x,%02x,%02x\n", 384 pr_debug("calculate_ecc: returning ecc %02x,%02x,%02x\n", ecc_code[0], ecc_code[1], ecc_code[2]);
408 ecc_code[0], ecc_code[1], ecc_code[2]);
409 385
410 return 0; 386 return 0;
411} 387}
412 388
413
414/* over-ride the standard functions for a little more speed. We can 389/* over-ride the standard functions for a little more speed. We can
415 * use read/write block to move the data buffers to/from the controller 390 * use read/write block to move the data buffers to/from the controller
416*/ 391*/
@@ -421,8 +396,7 @@ static void s3c2410_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
421 readsb(this->IO_ADDR_R, buf, len); 396 readsb(this->IO_ADDR_R, buf, len);
422} 397}
423 398
424static void s3c2410_nand_write_buf(struct mtd_info *mtd, 399static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
425 const u_char *buf, int len)
426{ 400{
427 struct nand_chip *this = mtd->priv; 401 struct nand_chip *this = mtd->priv;
428 writesb(this->IO_ADDR_W, buf, len); 402 writesb(this->IO_ADDR_W, buf, len);
@@ -459,7 +433,8 @@ static int s3c2410_nand_remove(struct platform_device *pdev)
459 /* free the common resources */ 433 /* free the common resources */
460 434
461 if (info->clk != NULL && !IS_ERR(info->clk)) { 435 if (info->clk != NULL && !IS_ERR(info->clk)) {
462 clk_disable(info->clk); 436 if (!allow_clk_stop(info))
437 clk_disable(info->clk);
463 clk_put(info->clk); 438 clk_put(info->clk);
464 } 439 }
465 440
@@ -488,9 +463,7 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
488 return add_mtd_device(&mtd->mtd); 463 return add_mtd_device(&mtd->mtd);
489 464
490 if (set->nr_partitions > 0 && set->partitions != NULL) { 465 if (set->nr_partitions > 0 && set->partitions != NULL) {
491 return add_mtd_partitions(&mtd->mtd, 466 return add_mtd_partitions(&mtd->mtd, set->partitions, set->nr_partitions);
492 set->partitions,
493 set->nr_partitions);
494 } 467 }
495 468
496 return add_mtd_device(&mtd->mtd); 469 return add_mtd_device(&mtd->mtd);
@@ -517,7 +490,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
517 490
518 chip->IO_ADDR_R = info->regs + S3C2410_NFDATA; 491 chip->IO_ADDR_R = info->regs + S3C2410_NFDATA;
519 chip->IO_ADDR_W = info->regs + S3C2410_NFDATA; 492 chip->IO_ADDR_W = info->regs + S3C2410_NFDATA;
520 chip->hwcontrol = s3c2410_nand_hwcontrol; 493 chip->cmd_ctrl = s3c2410_nand_hwcontrol;
521 chip->dev_ready = s3c2410_nand_devready; 494 chip->dev_ready = s3c2410_nand_devready;
522 chip->write_buf = s3c2410_nand_write_buf; 495 chip->write_buf = s3c2410_nand_write_buf;
523 chip->read_buf = s3c2410_nand_read_buf; 496 chip->read_buf = s3c2410_nand_read_buf;
@@ -530,26 +503,29 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
530 if (info->is_s3c2440) { 503 if (info->is_s3c2440) {
531 chip->IO_ADDR_R = info->regs + S3C2440_NFDATA; 504 chip->IO_ADDR_R = info->regs + S3C2440_NFDATA;
532 chip->IO_ADDR_W = info->regs + S3C2440_NFDATA; 505 chip->IO_ADDR_W = info->regs + S3C2440_NFDATA;
533 chip->hwcontrol = s3c2440_nand_hwcontrol; 506 chip->cmd_ctrl = s3c2440_nand_hwcontrol;
534 } 507 }
535 508
536 nmtd->info = info; 509 nmtd->info = info;
537 nmtd->mtd.priv = chip; 510 nmtd->mtd.priv = chip;
511 nmtd->mtd.owner = THIS_MODULE;
538 nmtd->set = set; 512 nmtd->set = set;
539 513
540 if (hardware_ecc) { 514 if (hardware_ecc) {
541 chip->correct_data = s3c2410_nand_correct_data; 515 chip->ecc.correct = s3c2410_nand_correct_data;
542 chip->enable_hwecc = s3c2410_nand_enable_hwecc; 516 chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
543 chip->calculate_ecc = s3c2410_nand_calculate_ecc; 517 chip->ecc.calculate = s3c2410_nand_calculate_ecc;
544 chip->eccmode = NAND_ECC_HW3_512; 518 chip->ecc.mode = NAND_ECC_HW;
545 chip->autooob = &nand_hw_eccoob; 519 chip->ecc.size = 512;
520 chip->ecc.bytes = 3;
521 chip->ecc.layout = &nand_hw_eccoob;
546 522
547 if (info->is_s3c2440) { 523 if (info->is_s3c2440) {
548 chip->enable_hwecc = s3c2440_nand_enable_hwecc; 524 chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
549 chip->calculate_ecc = s3c2440_nand_calculate_ecc; 525 chip->ecc.calculate = s3c2440_nand_calculate_ecc;
550 } 526 }
551 } else { 527 } else {
552 chip->eccmode = NAND_ECC_SOFT; 528 chip->ecc.mode = NAND_ECC_SOFT;
553 } 529 }
554} 530}
555 531
@@ -654,13 +630,11 @@ static int s3c24xx_nand_probe(struct platform_device *pdev, int is_s3c2440)
654 nmtd = info->mtds; 630 nmtd = info->mtds;
655 631
656 for (setno = 0; setno < nr_sets; setno++, nmtd++) { 632 for (setno = 0; setno < nr_sets; setno++, nmtd++) {
657 pr_debug("initialising set %d (%p, info %p)\n", 633 pr_debug("initialising set %d (%p, info %p)\n", setno, nmtd, info);
658 setno, nmtd, info);
659 634
660 s3c2410_nand_init_chip(info, nmtd, sets); 635 s3c2410_nand_init_chip(info, nmtd, sets);
661 636
662 nmtd->scan_res = nand_scan(&nmtd->mtd, 637 nmtd->scan_res = nand_scan(&nmtd->mtd, (sets) ? sets->nr_chips : 1);
663 (sets) ? sets->nr_chips : 1);
664 638
665 if (nmtd->scan_res == 0) { 639 if (nmtd->scan_res == 0) {
666 s3c2410_nand_add_partition(info, nmtd, sets); 640 s3c2410_nand_add_partition(info, nmtd, sets);
@@ -670,6 +644,11 @@ static int s3c24xx_nand_probe(struct platform_device *pdev, int is_s3c2440)
670 sets++; 644 sets++;
671 } 645 }
672 646
647 if (allow_clk_stop(info)) {
648 dev_info(&pdev->dev, "clock idle support enabled\n");
649 clk_disable(info->clk);
650 }
651
673 pr_debug("initialised ok\n"); 652 pr_debug("initialised ok\n");
674 return 0; 653 return 0;
675 654
@@ -681,6 +660,41 @@ static int s3c24xx_nand_probe(struct platform_device *pdev, int is_s3c2440)
681 return err; 660 return err;
682} 661}
683 662
663/* PM Support */
664#ifdef CONFIG_PM
665
666static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
667{
668 struct s3c2410_nand_info *info = platform_get_drvdata(dev);
669
670 if (info) {
671 if (!allow_clk_stop(info))
672 clk_disable(info->clk);
673 }
674
675 return 0;
676}
677
678static int s3c24xx_nand_resume(struct platform_device *dev)
679{
680 struct s3c2410_nand_info *info = platform_get_drvdata(dev);
681
682 if (info) {
683 clk_enable(info->clk);
684 s3c2410_nand_inithw(info, dev);
685
686 if (allow_clk_stop(info))
687 clk_disable(info->clk);
688 }
689
690 return 0;
691}
692
693#else
694#define s3c24xx_nand_suspend NULL
695#define s3c24xx_nand_resume NULL
696#endif
697
684/* driver device registration */ 698/* driver device registration */
685 699
686static int s3c2410_nand_probe(struct platform_device *dev) 700static int s3c2410_nand_probe(struct platform_device *dev)
@@ -696,6 +710,8 @@ static int s3c2440_nand_probe(struct platform_device *dev)
696static struct platform_driver s3c2410_nand_driver = { 710static struct platform_driver s3c2410_nand_driver = {
697 .probe = s3c2410_nand_probe, 711 .probe = s3c2410_nand_probe,
698 .remove = s3c2410_nand_remove, 712 .remove = s3c2410_nand_remove,
713 .suspend = s3c24xx_nand_suspend,
714 .resume = s3c24xx_nand_resume,
699 .driver = { 715 .driver = {
700 .name = "s3c2410-nand", 716 .name = "s3c2410-nand",
701 .owner = THIS_MODULE, 717 .owner = THIS_MODULE,
@@ -705,6 +721,8 @@ static struct platform_driver s3c2410_nand_driver = {
705static struct platform_driver s3c2440_nand_driver = { 721static struct platform_driver s3c2440_nand_driver = {
706 .probe = s3c2440_nand_probe, 722 .probe = s3c2440_nand_probe,
707 .remove = s3c2410_nand_remove, 723 .remove = s3c2410_nand_remove,
724 .suspend = s3c24xx_nand_suspend,
725 .resume = s3c24xx_nand_resume,
708 .driver = { 726 .driver = {
709 .name = "s3c2440-nand", 727 .name = "s3c2440-nand",
710 .owner = THIS_MODULE, 728 .owner = THIS_MODULE,
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 1924a4f137c7..21743658d150 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -46,7 +46,6 @@ static int sharpsl_phys_base = 0x0C000000;
46#define FLCLE (1 << 1) 46#define FLCLE (1 << 1)
47#define FLCE0 (1 << 0) 47#define FLCE0 (1 << 0)
48 48
49
50/* 49/*
51 * MTD structure for SharpSL 50 * MTD structure for SharpSL
52 */ 51 */
@@ -60,50 +59,44 @@ static struct mtd_info *sharpsl_mtd = NULL;
60static int nr_partitions; 59static int nr_partitions;
61static struct mtd_partition sharpsl_nand_default_partition_info[] = { 60static struct mtd_partition sharpsl_nand_default_partition_info[] = {
62 { 61 {
63 .name = "System Area", 62 .name = "System Area",
64 .offset = 0, 63 .offset = 0,
65 .size = 7 * 1024 * 1024, 64 .size = 7 * 1024 * 1024,
66 }, 65 },
67 { 66 {
68 .name = "Root Filesystem", 67 .name = "Root Filesystem",
69 .offset = 7 * 1024 * 1024, 68 .offset = 7 * 1024 * 1024,
70 .size = 30 * 1024 * 1024, 69 .size = 30 * 1024 * 1024,
71 }, 70 },
72 { 71 {
73 .name = "Home Filesystem", 72 .name = "Home Filesystem",
74 .offset = MTDPART_OFS_APPEND , 73 .offset = MTDPART_OFS_APPEND,
75 .size = MTDPART_SIZ_FULL , 74 .size = MTDPART_SIZ_FULL,
76 }, 75 },
77}; 76};
78 77
79/* 78/*
80 * hardware specific access to control-lines 79 * hardware specific access to control-lines
80 * ctrl:
81 * NAND_CNE: bit 0 -> bit 0 & 4
82 * NAND_CLE: bit 1 -> bit 1
83 * NAND_ALE: bit 2 -> bit 2
84 *
81 */ 85 */
82static void 86static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd,
83sharpsl_nand_hwcontrol(struct mtd_info* mtd, int cmd) 87 unsigned int ctrl)
84{ 88{
85 switch (cmd) { 89 struct nand_chip *chip = mtd->priv;
86 case NAND_CTL_SETCLE: 90
87 writeb(readb(FLASHCTL) | FLCLE, FLASHCTL); 91 if (ctrl & NAND_CTRL_CHANGE) {
88 break; 92 unsigned char bits = ctrl & 0x07;
89 case NAND_CTL_CLRCLE: 93
90 writeb(readb(FLASHCTL) & ~FLCLE, FLASHCTL); 94 bits |= (ctrl & 0x01) << 4;
91 break; 95 writeb((readb(FLASHCTL) & 0x17) | bits, FLASHCTL);
92
93 case NAND_CTL_SETALE:
94 writeb(readb(FLASHCTL) | FLALE, FLASHCTL);
95 break;
96 case NAND_CTL_CLRALE:
97 writeb(readb(FLASHCTL) & ~FLALE, FLASHCTL);
98 break;
99
100 case NAND_CTL_SETNCE:
101 writeb(readb(FLASHCTL) & ~(FLCE0|FLCE1), FLASHCTL);
102 break;
103 case NAND_CTL_CLRNCE:
104 writeb(readb(FLASHCTL) | (FLCE0|FLCE1), FLASHCTL);
105 break;
106 } 96 }
97
98 if (cmd != NAND_CMD_NONE)
99 writeb(cmd, chip->IO_ADDR_W);
107} 100}
108 101
109static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; 102static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
@@ -122,31 +115,26 @@ static struct nand_bbt_descr sharpsl_akita_bbt = {
122 .pattern = scan_ff_pattern 115 .pattern = scan_ff_pattern
123}; 116};
124 117
125static struct nand_oobinfo akita_oobinfo = { 118static struct nand_ecclayout akita_oobinfo = {
126 .useecc = MTD_NANDECC_AUTOPLACE,
127 .eccbytes = 24, 119 .eccbytes = 24,
128 .eccpos = { 120 .eccpos = {
129 0x5, 0x1, 0x2, 0x3, 0x6, 0x7, 0x15, 0x11, 121 0x5, 0x1, 0x2, 0x3, 0x6, 0x7, 0x15, 0x11,
130 0x12, 0x13, 0x16, 0x17, 0x25, 0x21, 0x22, 0x23, 122 0x12, 0x13, 0x16, 0x17, 0x25, 0x21, 0x22, 0x23,
131 0x26, 0x27, 0x35, 0x31, 0x32, 0x33, 0x36, 0x37}, 123 0x26, 0x27, 0x35, 0x31, 0x32, 0x33, 0x36, 0x37},
132 .oobfree = { {0x08, 0x09} } 124 .oobfree = {{0x08, 0x09}}
133}; 125};
134 126
135static int 127static int sharpsl_nand_dev_ready(struct mtd_info *mtd)
136sharpsl_nand_dev_ready(struct mtd_info* mtd)
137{ 128{
138 return !((readb(FLASHCTL) & FLRYBY) == 0); 129 return !((readb(FLASHCTL) & FLRYBY) == 0);
139} 130}
140 131
141static void 132static void sharpsl_nand_enable_hwecc(struct mtd_info *mtd, int mode)
142sharpsl_nand_enable_hwecc(struct mtd_info* mtd, int mode)
143{ 133{
144 writeb(0 ,ECCCLRR); 134 writeb(0, ECCCLRR);
145} 135}
146 136
147static int 137static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat, u_char * ecc_code)
148sharpsl_nand_calculate_ecc(struct mtd_info* mtd, const u_char* dat,
149 u_char* ecc_code)
150{ 138{
151 ecc_code[0] = ~readb(ECCLPUB); 139 ecc_code[0] = ~readb(ECCLPUB);
152 ecc_code[1] = ~readb(ECCLPLB); 140 ecc_code[1] = ~readb(ECCLPLB);
@@ -154,47 +142,44 @@ sharpsl_nand_calculate_ecc(struct mtd_info* mtd, const u_char* dat,
154 return readb(ECCCNTR) != 0; 142 return readb(ECCCNTR) != 0;
155} 143}
156 144
157
158#ifdef CONFIG_MTD_PARTITIONS 145#ifdef CONFIG_MTD_PARTITIONS
159const char *part_probes[] = { "cmdlinepart", NULL }; 146const char *part_probes[] = { "cmdlinepart", NULL };
160#endif 147#endif
161 148
162
163/* 149/*
164 * Main initialization routine 150 * Main initialization routine
165 */ 151 */
166int __init 152static int __init sharpsl_nand_init(void)
167sharpsl_nand_init(void)
168{ 153{
169 struct nand_chip *this; 154 struct nand_chip *this;
170 struct mtd_partition* sharpsl_partition_info; 155 struct mtd_partition *sharpsl_partition_info;
171 int err = 0; 156 int err = 0;
172 157
173 /* Allocate memory for MTD device structure and private data */ 158 /* Allocate memory for MTD device structure and private data */
174 sharpsl_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), 159 sharpsl_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
175 GFP_KERNEL);
176 if (!sharpsl_mtd) { 160 if (!sharpsl_mtd) {
177 printk ("Unable to allocate SharpSL NAND MTD device structure.\n"); 161 printk("Unable to allocate SharpSL NAND MTD device structure.\n");
178 return -ENOMEM; 162 return -ENOMEM;
179 } 163 }
180 164
181 /* map physical adress */ 165 /* map physical adress */
182 sharpsl_io_base = ioremap(sharpsl_phys_base, 0x1000); 166 sharpsl_io_base = ioremap(sharpsl_phys_base, 0x1000);
183 if(!sharpsl_io_base){ 167 if (!sharpsl_io_base) {
184 printk("ioremap to access Sharp SL NAND chip failed\n"); 168 printk("ioremap to access Sharp SL NAND chip failed\n");
185 kfree(sharpsl_mtd); 169 kfree(sharpsl_mtd);
186 return -EIO; 170 return -EIO;
187 } 171 }
188 172
189 /* Get pointer to private data */ 173 /* Get pointer to private data */
190 this = (struct nand_chip *) (&sharpsl_mtd[1]); 174 this = (struct nand_chip *)(&sharpsl_mtd[1]);
191 175
192 /* Initialize structures */ 176 /* Initialize structures */
193 memset((char *) sharpsl_mtd, 0, sizeof(struct mtd_info)); 177 memset(sharpsl_mtd, 0, sizeof(struct mtd_info));
194 memset((char *) this, 0, sizeof(struct nand_chip)); 178 memset(this, 0, sizeof(struct nand_chip));
195 179
196 /* Link the private data with the MTD structure */ 180 /* Link the private data with the MTD structure */
197 sharpsl_mtd->priv = this; 181 sharpsl_mtd->priv = this;
182 sharpsl_mtd->owner = THIS_MODULE;
198 183
199 /* 184 /*
200 * PXA initialize 185 * PXA initialize
@@ -205,23 +190,25 @@ sharpsl_nand_init(void)
205 this->IO_ADDR_R = FLASHIO; 190 this->IO_ADDR_R = FLASHIO;
206 this->IO_ADDR_W = FLASHIO; 191 this->IO_ADDR_W = FLASHIO;
207 /* Set address of hardware control function */ 192 /* Set address of hardware control function */
208 this->hwcontrol = sharpsl_nand_hwcontrol; 193 this->cmd_ctrl = sharpsl_nand_hwcontrol;
209 this->dev_ready = sharpsl_nand_dev_ready; 194 this->dev_ready = sharpsl_nand_dev_ready;
210 /* 15 us command delay time */ 195 /* 15 us command delay time */
211 this->chip_delay = 15; 196 this->chip_delay = 15;
212 /* set eccmode using hardware ECC */ 197 /* set eccmode using hardware ECC */
213 this->eccmode = NAND_ECC_HW3_256; 198 this->ecc.mode = NAND_ECC_HW;
199 this->ecc.size = 256;
200 this->ecc.bytes = 3;
214 this->badblock_pattern = &sharpsl_bbt; 201 this->badblock_pattern = &sharpsl_bbt;
215 if (machine_is_akita() || machine_is_borzoi()) { 202 if (machine_is_akita() || machine_is_borzoi()) {
216 this->badblock_pattern = &sharpsl_akita_bbt; 203 this->badblock_pattern = &sharpsl_akita_bbt;
217 this->autooob = &akita_oobinfo; 204 this->ecc.layout = &akita_oobinfo;
218 } 205 }
219 this->enable_hwecc = sharpsl_nand_enable_hwecc; 206 this->ecc.hwctl = sharpsl_nand_enable_hwecc;
220 this->calculate_ecc = sharpsl_nand_calculate_ecc; 207 this->ecc.calculate = sharpsl_nand_calculate_ecc;
221 this->correct_data = nand_correct_data; 208 this->ecc.correct = nand_correct_data;
222 209
223 /* Scan to find existence of the device */ 210 /* Scan to find existence of the device */
224 err=nand_scan(sharpsl_mtd,1); 211 err = nand_scan(sharpsl_mtd, 1);
225 if (err) { 212 if (err) {
226 iounmap(sharpsl_io_base); 213 iounmap(sharpsl_io_base);
227 kfree(sharpsl_mtd); 214 kfree(sharpsl_mtd);
@@ -230,24 +217,23 @@ sharpsl_nand_init(void)
230 217
231 /* Register the partitions */ 218 /* Register the partitions */
232 sharpsl_mtd->name = "sharpsl-nand"; 219 sharpsl_mtd->name = "sharpsl-nand";
233 nr_partitions = parse_mtd_partitions(sharpsl_mtd, part_probes, 220 nr_partitions = parse_mtd_partitions(sharpsl_mtd, part_probes, &sharpsl_partition_info, 0);
234 &sharpsl_partition_info, 0);
235 221
236 if (nr_partitions <= 0) { 222 if (nr_partitions <= 0) {
237 nr_partitions = DEFAULT_NUM_PARTITIONS; 223 nr_partitions = DEFAULT_NUM_PARTITIONS;
238 sharpsl_partition_info = sharpsl_nand_default_partition_info; 224 sharpsl_partition_info = sharpsl_nand_default_partition_info;
239 if (machine_is_poodle()) { 225 if (machine_is_poodle()) {
240 sharpsl_partition_info[1].size=30 * 1024 * 1024; 226 sharpsl_partition_info[1].size = 22 * 1024 * 1024;
241 } else if (machine_is_corgi() || machine_is_shepherd()) { 227 } else if (machine_is_corgi() || machine_is_shepherd()) {
242 sharpsl_partition_info[1].size=25 * 1024 * 1024; 228 sharpsl_partition_info[1].size = 25 * 1024 * 1024;
243 } else if (machine_is_husky()) { 229 } else if (machine_is_husky()) {
244 sharpsl_partition_info[1].size=53 * 1024 * 1024; 230 sharpsl_partition_info[1].size = 53 * 1024 * 1024;
245 } else if (machine_is_spitz()) { 231 } else if (machine_is_spitz()) {
246 sharpsl_partition_info[1].size=5 * 1024 * 1024; 232 sharpsl_partition_info[1].size = 5 * 1024 * 1024;
247 } else if (machine_is_akita()) { 233 } else if (machine_is_akita()) {
248 sharpsl_partition_info[1].size=58 * 1024 * 1024; 234 sharpsl_partition_info[1].size = 58 * 1024 * 1024;
249 } else if (machine_is_borzoi()) { 235 } else if (machine_is_borzoi()) {
250 sharpsl_partition_info[1].size=32 * 1024 * 1024; 236 sharpsl_partition_info[1].size = 32 * 1024 * 1024;
251 } 237 }
252 } 238 }
253 239
@@ -261,15 +247,15 @@ sharpsl_nand_init(void)
261 /* Return happy */ 247 /* Return happy */
262 return 0; 248 return 0;
263} 249}
250
264module_init(sharpsl_nand_init); 251module_init(sharpsl_nand_init);
265 252
266/* 253/*
267 * Clean up routine 254 * Clean up routine
268 */ 255 */
269#ifdef MODULE
270static void __exit sharpsl_nand_cleanup(void) 256static void __exit sharpsl_nand_cleanup(void)
271{ 257{
272 struct nand_chip *this = (struct nand_chip *) &sharpsl_mtd[1]; 258 struct nand_chip *this = (struct nand_chip *)&sharpsl_mtd[1];
273 259
274 /* Release resources, unregister device */ 260 /* Release resources, unregister device */
275 nand_release(sharpsl_mtd); 261 nand_release(sharpsl_mtd);
@@ -279,8 +265,8 @@ static void __exit sharpsl_nand_cleanup(void)
279 /* Free the MTD device structure */ 265 /* Free the MTD device structure */
280 kfree(sharpsl_mtd); 266 kfree(sharpsl_mtd);
281} 267}
268
282module_exit(sharpsl_nand_cleanup); 269module_exit(sharpsl_nand_cleanup);
283#endif
284 270
285MODULE_LICENSE("GPL"); 271MODULE_LICENSE("GPL");
286MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>"); 272MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
diff --git a/drivers/mtd/nand/spia.c b/drivers/mtd/nand/spia.c
index 9cf1ce718ec1..1f6d429b1583 100644
--- a/drivers/mtd/nand/spia.c
+++ b/drivers/mtd/nand/spia.c
@@ -39,16 +39,16 @@ static struct mtd_info *spia_mtd = NULL;
39 */ 39 */
40#define SPIA_IO_BASE 0xd0000000 /* Start of EP7212 IO address space */ 40#define SPIA_IO_BASE 0xd0000000 /* Start of EP7212 IO address space */
41#define SPIA_FIO_BASE 0xf0000000 /* Address where flash is mapped */ 41#define SPIA_FIO_BASE 0xf0000000 /* Address where flash is mapped */
42#define SPIA_PEDR 0x0080 /* 42#define SPIA_PEDR 0x0080 /*
43 * IO offset to Port E data register 43 * IO offset to Port E data register
44 * where the CLE, ALE and NCE pins 44 * where the CLE, ALE and NCE pins
45 * are wired to. 45 * are wired to.
46 */ 46 */
47#define SPIA_PEDDR 0x00c0 /* 47#define SPIA_PEDDR 0x00c0 /*
48 * IO offset to Port E data direction 48 * IO offset to Port E data direction
49 * register so we can control the IO 49 * register so we can control the IO
50 * lines. 50 * lines.
51 */ 51 */
52 52
53/* 53/*
54 * Module stuff 54 * Module stuff
@@ -69,79 +69,84 @@ module_param(spia_peddr, int, 0);
69 */ 69 */
70static const struct mtd_partition partition_info[] = { 70static const struct mtd_partition partition_info[] = {
71 { 71 {
72 .name = "SPIA flash partition 1", 72 .name = "SPIA flash partition 1",
73 .offset = 0, 73 .offset = 0,
74 .size = 2*1024*1024 74 .size = 2 * 1024 * 1024},
75 },
76 { 75 {
77 .name = "SPIA flash partition 2", 76 .name = "SPIA flash partition 2",
78 .offset = 2*1024*1024, 77 .offset = 2 * 1024 * 1024,
79 .size = 6*1024*1024 78 .size = 6 * 1024 * 1024}
80 }
81}; 79};
82#define NUM_PARTITIONS 2
83 80
81#define NUM_PARTITIONS 2
84 82
85/* 83/*
86 * hardware specific access to control-lines 84 * hardware specific access to control-lines
87*/ 85 *
88static void spia_hwcontrol(struct mtd_info *mtd, int cmd){ 86 * ctrl:
89 87 * NAND_CNE: bit 0 -> bit 2
90 switch(cmd){ 88 * NAND_CLE: bit 1 -> bit 0
89 * NAND_ALE: bit 2 -> bit 1
90 */
91static void spia_hwcontrol(struct mtd_info *mtd, int cmd)
92{
93 struct nand_chip *chip = mtd->priv;
91 94
92 case NAND_CTL_SETCLE: (*(volatile unsigned char *) (spia_io_base + spia_pedr)) |= 0x01; break; 95 if (ctrl & NAND_CTRL_CHANGE) {
93 case NAND_CTL_CLRCLE: (*(volatile unsigned char *) (spia_io_base + spia_pedr)) &= ~0x01; break; 96 void __iomem *addr = spia_io_base + spia_pedr;
97 unsigned char bits;
94 98
95 case NAND_CTL_SETALE: (*(volatile unsigned char *) (spia_io_base + spia_pedr)) |= 0x02; break; 99 bits = (ctrl & NAND_CNE) << 2;
96 case NAND_CTL_CLRALE: (*(volatile unsigned char *) (spia_io_base + spia_pedr)) &= ~0x02; break; 100 bits |= (ctrl & NAND_CLE | NAND_ALE) >> 1;
101 writeb((readb(addr) & ~0x7) | bits, addr);
102 }
97 103
98 case NAND_CTL_SETNCE: (*(volatile unsigned char *) (spia_io_base + spia_pedr)) &= ~0x04; break; 104 if (cmd != NAND_CMD_NONE)
99 case NAND_CTL_CLRNCE: (*(volatile unsigned char *) (spia_io_base + spia_pedr)) |= 0x04; break; 105 writeb(cmd, chip->IO_ADDR_W);
100 }
101} 106}
102 107
103/* 108/*
104 * Main initialization routine 109 * Main initialization routine
105 */ 110 */
106int __init spia_init (void) 111static int __init spia_init(void)
107{ 112{
108 struct nand_chip *this; 113 struct nand_chip *this;
109 114
110 /* Allocate memory for MTD device structure and private data */ 115 /* Allocate memory for MTD device structure and private data */
111 spia_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip), 116 spia_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
112 GFP_KERNEL);
113 if (!spia_mtd) { 117 if (!spia_mtd) {
114 printk ("Unable to allocate SPIA NAND MTD device structure.\n"); 118 printk("Unable to allocate SPIA NAND MTD device structure.\n");
115 return -ENOMEM; 119 return -ENOMEM;
116 } 120 }
117 121
118 /* Get pointer to private data */ 122 /* Get pointer to private data */
119 this = (struct nand_chip *) (&spia_mtd[1]); 123 this = (struct nand_chip *)(&spia_mtd[1]);
120 124
121 /* Initialize structures */ 125 /* Initialize structures */
122 memset((char *) spia_mtd, 0, sizeof(struct mtd_info)); 126 memset(spia_mtd, 0, sizeof(struct mtd_info));
123 memset((char *) this, 0, sizeof(struct nand_chip)); 127 memset(this, 0, sizeof(struct nand_chip));
124 128
125 /* Link the private data with the MTD structure */ 129 /* Link the private data with the MTD structure */
126 spia_mtd->priv = this; 130 spia_mtd->priv = this;
131 spia_mtd->owner = THIS_MODULE;
127 132
128 /* 133 /*
129 * Set GPIO Port E control register so that the pins are configured 134 * Set GPIO Port E control register so that the pins are configured
130 * to be outputs for controlling the NAND flash. 135 * to be outputs for controlling the NAND flash.
131 */ 136 */
132 (*(volatile unsigned char *) (spia_io_base + spia_peddr)) = 0x07; 137 (*(volatile unsigned char *)(spia_io_base + spia_peddr)) = 0x07;
133 138
134 /* Set address of NAND IO lines */ 139 /* Set address of NAND IO lines */
135 this->IO_ADDR_R = (void __iomem *) spia_fio_base; 140 this->IO_ADDR_R = (void __iomem *)spia_fio_base;
136 this->IO_ADDR_W = (void __iomem *) spia_fio_base; 141 this->IO_ADDR_W = (void __iomem *)spia_fio_base;
137 /* Set address of hardware control function */ 142 /* Set address of hardware control function */
138 this->hwcontrol = spia_hwcontrol; 143 this->cmd_ctrl = spia_hwcontrol;
139 /* 15 us command delay time */ 144 /* 15 us command delay time */
140 this->chip_delay = 15; 145 this->chip_delay = 15;
141 146
142 /* Scan to find existence of the device */ 147 /* Scan to find existence of the device */
143 if (nand_scan (spia_mtd, 1)) { 148 if (nand_scan(spia_mtd, 1)) {
144 kfree (spia_mtd); 149 kfree(spia_mtd);
145 return -ENXIO; 150 return -ENXIO;
146 } 151 }
147 152
@@ -151,22 +156,22 @@ int __init spia_init (void)
151 /* Return happy */ 156 /* Return happy */
152 return 0; 157 return 0;
153} 158}
159
154module_init(spia_init); 160module_init(spia_init);
155 161
156/* 162/*
157 * Clean up routine 163 * Clean up routine
158 */ 164 */
159#ifdef MODULE 165static void __exit spia_cleanup(void)
160static void __exit spia_cleanup (void)
161{ 166{
162 /* Release resources, unregister device */ 167 /* Release resources, unregister device */
163 nand_release (spia_mtd); 168 nand_release(spia_mtd);
164 169
165 /* Free the MTD device structure */ 170 /* Free the MTD device structure */
166 kfree (spia_mtd); 171 kfree(spia_mtd);
167} 172}
173
168module_exit(spia_cleanup); 174module_exit(spia_cleanup);
169#endif
170 175
171MODULE_LICENSE("GPL"); 176MODULE_LICENSE("GPL");
172MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com"); 177MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com");
diff --git a/drivers/mtd/nand/toto.c b/drivers/mtd/nand/toto.c
index 7609c43cb3ec..f9e2d4a0ab8c 100644
--- a/drivers/mtd/nand/toto.c
+++ b/drivers/mtd/nand/toto.c
@@ -32,6 +32,8 @@
32#include <asm/arch-omap1510/hardware.h> 32#include <asm/arch-omap1510/hardware.h>
33#include <asm/arch/gpio.h> 33#include <asm/arch/gpio.h>
34 34
35#define CONFIG_NAND_WORKAROUND 1
36
35/* 37/*
36 * MTD structure for TOTO board 38 * MTD structure for TOTO board
37 */ 39 */
@@ -39,25 +41,6 @@ static struct mtd_info *toto_mtd = NULL;
39 41
40static unsigned long toto_io_base = OMAP_FLASH_1_BASE; 42static unsigned long toto_io_base = OMAP_FLASH_1_BASE;
41 43
42#define CONFIG_NAND_WORKAROUND 1
43
44#define NAND_NCE 0x4000
45#define NAND_CLE 0x1000
46#define NAND_ALE 0x0002
47#define NAND_MASK (NAND_CLE | NAND_ALE | NAND_NCE)
48
49#define T_NAND_CTL_CLRALE(iob) gpiosetout(NAND_ALE, 0)
50#define T_NAND_CTL_SETALE(iob) gpiosetout(NAND_ALE, NAND_ALE)
51#ifdef CONFIG_NAND_WORKAROUND /* "some" dev boards busted, blue wired to rts2 :( */
52#define T_NAND_CTL_CLRCLE(iob) gpiosetout(NAND_CLE, 0); rts2setout(2, 2)
53#define T_NAND_CTL_SETCLE(iob) gpiosetout(NAND_CLE, NAND_CLE); rts2setout(2, 0)
54#else
55#define T_NAND_CTL_CLRCLE(iob) gpiosetout(NAND_CLE, 0)
56#define T_NAND_CTL_SETCLE(iob) gpiosetout(NAND_CLE, NAND_CLE)
57#endif
58#define T_NAND_CTL_SETNCE(iob) gpiosetout(NAND_NCE, 0)
59#define T_NAND_CTL_CLRNCE(iob) gpiosetout(NAND_NCE, NAND_NCE)
60
61/* 44/*
62 * Define partitions for flash devices 45 * Define partitions for flash devices
63 */ 46 */
@@ -91,91 +74,110 @@ static struct mtd_partition partition_info32M[] = {
91 74
92#define NUM_PARTITIONS32M 3 75#define NUM_PARTITIONS32M 3
93#define NUM_PARTITIONS64M 4 76#define NUM_PARTITIONS64M 4
77
94/* 78/*
95 * hardware specific access to control-lines 79 * hardware specific access to control-lines
96*/ 80 *
97 81 * ctrl:
98static void toto_hwcontrol(struct mtd_info *mtd, int cmd) 82 * NAND_NCE: bit 0 -> bit 14 (0x4000)
83 * NAND_CLE: bit 1 -> bit 12 (0x1000)
84 * NAND_ALE: bit 2 -> bit 1 (0x0002)
85 */
86static void toto_hwcontrol(struct mtd_info *mtd, int cmd,
87 unsigned int ctrl)
99{ 88{
89 struct nand_chip *chip = mtd->priv;
90
91 if (ctrl & NAND_CTRL_CHANGE) {
92 unsigned long bits;
100 93
101 udelay(1); /* hopefully enough time for tc make proceding write to clear */ 94 /* hopefully enough time for tc make proceding write to clear */
102 switch(cmd){ 95 udelay(1);
103 96
104 case NAND_CTL_SETCLE: T_NAND_CTL_SETCLE(cmd); break; 97 bits = (~ctrl & NAND_NCE) << 14;
105 case NAND_CTL_CLRCLE: T_NAND_CTL_CLRCLE(cmd); break; 98 bits |= (ctrl & NAND_CLE) << 12;
99 bits |= (ctrl & NAND_ALE) >> 1;
106 100
107 case NAND_CTL_SETALE: T_NAND_CTL_SETALE(cmd); break; 101#warning Wild guess as gpiosetout() is nowhere defined in the kernel source - tglx
108 case NAND_CTL_CLRALE: T_NAND_CTL_CLRALE(cmd); break; 102 gpiosetout(0x5002, bits);
109 103
110 case NAND_CTL_SETNCE: T_NAND_CTL_SETNCE(cmd); break; 104#ifdef CONFIG_NAND_WORKAROUND
111 case NAND_CTL_CLRNCE: T_NAND_CTL_CLRNCE(cmd); break; 105 /* "some" dev boards busted, blue wired to rts2 :( */
106 rts2setout(2, (ctrl & NAND_CLE) << 1);
107#endif
108 /* allow time to ensure gpio state to over take memory write */
109 udelay(1);
112 } 110 }
113 udelay(1); /* allow time to ensure gpio state to over take memory write */ 111
112 if (cmd != NAND_CMD_NONE)
113 writeb(cmd, chip->IO_ADDR_W);
114} 114}
115 115
116/* 116/*
117 * Main initialization routine 117 * Main initialization routine
118 */ 118 */
119int __init toto_init (void) 119static int __init toto_init(void)
120{ 120{
121 struct nand_chip *this; 121 struct nand_chip *this;
122 int err = 0; 122 int err = 0;
123 123
124 /* Allocate memory for MTD device structure and private data */ 124 /* Allocate memory for MTD device structure and private data */
125 toto_mtd = kmalloc (sizeof(struct mtd_info) + sizeof (struct nand_chip), 125 toto_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
126 GFP_KERNEL);
127 if (!toto_mtd) { 126 if (!toto_mtd) {
128 printk (KERN_WARNING "Unable to allocate toto NAND MTD device structure.\n"); 127 printk(KERN_WARNING "Unable to allocate toto NAND MTD device structure.\n");
129 err = -ENOMEM; 128 err = -ENOMEM;
130 goto out; 129 goto out;
131 } 130 }
132 131
133 /* Get pointer to private data */ 132 /* Get pointer to private data */
134 this = (struct nand_chip *) (&toto_mtd[1]); 133 this = (struct nand_chip *)(&toto_mtd[1]);
135 134
136 /* Initialize structures */ 135 /* Initialize structures */
137 memset((char *) toto_mtd, 0, sizeof(struct mtd_info)); 136 memset(toto_mtd, 0, sizeof(struct mtd_info));
138 memset((char *) this, 0, sizeof(struct nand_chip)); 137 memset(this, 0, sizeof(struct nand_chip));
139 138
140 /* Link the private data with the MTD structure */ 139 /* Link the private data with the MTD structure */
141 toto_mtd->priv = this; 140 toto_mtd->priv = this;
141 toto_mtd->owner = THIS_MODULE;
142 142
143 /* Set address of NAND IO lines */ 143 /* Set address of NAND IO lines */
144 this->IO_ADDR_R = toto_io_base; 144 this->IO_ADDR_R = toto_io_base;
145 this->IO_ADDR_W = toto_io_base; 145 this->IO_ADDR_W = toto_io_base;
146 this->hwcontrol = toto_hwcontrol; 146 this->cmd_ctrl = toto_hwcontrol;
147 this->dev_ready = NULL; 147 this->dev_ready = NULL;
148 /* 25 us command delay time */ 148 /* 25 us command delay time */
149 this->chip_delay = 30; 149 this->chip_delay = 30;
150 this->eccmode = NAND_ECC_SOFT; 150 this->ecc.mode = NAND_ECC_SOFT;
151 151
152 /* Scan to find existance of the device */ 152 /* Scan to find existance of the device */
153 if (nand_scan (toto_mtd, 1)) { 153 if (nand_scan(toto_mtd, 1)) {
154 err = -ENXIO; 154 err = -ENXIO;
155 goto out_mtd; 155 goto out_mtd;
156 } 156 }
157 157
158 /* Register the partitions */ 158 /* Register the partitions */
159 switch(toto_mtd->size){ 159 switch (toto_mtd->size) {
160 case SZ_64M: add_mtd_partitions(toto_mtd, partition_info64M, NUM_PARTITIONS64M); break; 160 case SZ_64M:
161 case SZ_32M: add_mtd_partitions(toto_mtd, partition_info32M, NUM_PARTITIONS32M); break; 161 add_mtd_partitions(toto_mtd, partition_info64M, NUM_PARTITIONS64M);
162 default: { 162 break;
163 printk (KERN_WARNING "Unsupported Nand device\n"); 163 case SZ_32M:
164 add_mtd_partitions(toto_mtd, partition_info32M, NUM_PARTITIONS32M);
165 break;
166 default:{
167 printk(KERN_WARNING "Unsupported Nand device\n");
164 err = -ENXIO; 168 err = -ENXIO;
165 goto out_buf; 169 goto out_buf;
166 } 170 }
167 } 171 }
168 172
169 gpioreserve(NAND_MASK); /* claim our gpios */ 173 gpioreserve(NAND_MASK); /* claim our gpios */
170 archflashwp(0,0); /* open up flash for writing */ 174 archflashwp(0, 0); /* open up flash for writing */
171 175
172 goto out; 176 goto out;
173 177
174out_buf: 178 out_mtd:
175 kfree (this->data_buf); 179 kfree(toto_mtd);
176out_mtd: 180 out:
177 kfree (toto_mtd);
178out:
179 return err; 181 return err;
180} 182}
181 183
@@ -184,20 +186,21 @@ module_init(toto_init);
184/* 186/*
185 * Clean up routine 187 * Clean up routine
186 */ 188 */
187static void __exit toto_cleanup (void) 189static void __exit toto_cleanup(void)
188{ 190{
189 /* Release resources, unregister device */ 191 /* Release resources, unregister device */
190 nand_release (toto_mtd); 192 nand_release(toto_mtd);
191 193
192 /* Free the MTD device structure */ 194 /* Free the MTD device structure */
193 kfree (toto_mtd); 195 kfree(toto_mtd);
194 196
195 /* stop flash writes */ 197 /* stop flash writes */
196 archflashwp(0,1); 198 archflashwp(0, 1);
197 199
198 /* release gpios to system */ 200 /* release gpios to system */
199 gpiorelease(NAND_MASK); 201 gpiorelease(NAND_MASK);
200} 202}
203
201module_exit(toto_cleanup); 204module_exit(toto_cleanup);
202 205
203MODULE_LICENSE("GPL"); 206MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/ts7250.c b/drivers/mtd/nand/ts7250.c
new file mode 100644
index 000000000000..a0b4b1edcb0d
--- /dev/null
+++ b/drivers/mtd/nand/ts7250.c
@@ -0,0 +1,206 @@
1/*
2 * drivers/mtd/nand/ts7250.c
3 *
4 * Copyright (C) 2004 Technologic Systems (support@embeddedARM.com)
5 *
6 * Derived from drivers/mtd/nand/edb7312.c
7 * Copyright (C) 2004 Marius Gröger (mag@sysgo.de)
8 *
9 * Derived from drivers/mtd/nand/autcpu12.c
10 * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
11 *
12 * $Id: ts7250.c,v 1.4 2004/12/30 22:02:07 joff Exp $
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 *
18 * Overview:
19 * This is a device driver for the NAND flash device found on the
20 * TS-7250 board which utilizes a Samsung 32 Mbyte part.
21 */
22
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/init.h>
26#include <linux/mtd/mtd.h>
27#include <linux/mtd/nand.h>
28#include <linux/mtd/partitions.h>
29#include <asm/io.h>
30#include <asm/arch/hardware.h>
31#include <asm/sizes.h>
32#include <asm/mach-types.h>
33
34/*
35 * MTD structure for TS7250 board
36 */
37static struct mtd_info *ts7250_mtd = NULL;
38
39#ifdef CONFIG_MTD_PARTITIONS
40static const char *part_probes[] = { "cmdlinepart", NULL };
41
42#define NUM_PARTITIONS 3
43
44/*
45 * Define static partitions for flash device
46 */
47static struct mtd_partition partition_info32[] = {
48 {
49 .name = "TS-BOOTROM",
50 .offset = 0x00000000,
51 .size = 0x00004000,
52 }, {
53 .name = "Linux",
54 .offset = 0x00004000,
55 .size = 0x01d00000,
56 }, {
57 .name = "RedBoot",
58 .offset = 0x01d04000,
59 .size = 0x002fc000,
60 },
61};
62
63/*
64 * Define static partitions for flash device
65 */
66static struct mtd_partition partition_info128[] = {
67 {
68 .name = "TS-BOOTROM",
69 .offset = 0x00000000,
70 .size = 0x00004000,
71 }, {
72 .name = "Linux",
73 .offset = 0x00004000,
74 .size = 0x07d00000,
75 }, {
76 .name = "RedBoot",
77 .offset = 0x07d04000,
78 .size = 0x002fc000,
79 },
80};
81#endif
82
83
84/*
85 * hardware specific access to control-lines
86 *
87 * ctrl:
88 * NAND_NCE: bit 0 -> bit 2
89 * NAND_CLE: bit 1 -> bit 1
90 * NAND_ALE: bit 2 -> bit 0
91 */
92static void ts7250_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
93{
94 struct nand_chip *chip = mtd->priv;
95
96 if (ctrl & NAND_CTRL_CHANGE) {
97 unsigned long addr = TS72XX_NAND_CONTROL_VIRT_BASE;
98 unsigned char bits;
99
100 bits = (ctrl & NAND_CNE) << 2;
101 bits |= ctrl & NAND_CLE;
102 bits |= (ctrl & NAND_ALE) >> 2;
103
104 __raw_writeb((__raw_readb(addr) & ~0x7) | bits, addr);
105 }
106
107 if (cmd != NAND_CMD_NONE)
108 writeb(cmd, chip->IO_ADDR_W);
109}
110
111/*
112 * read device ready pin
113 */
114static int ts7250_device_ready(struct mtd_info *mtd)
115{
116 return __raw_readb(TS72XX_NAND_BUSY_VIRT_BASE) & 0x20;
117}
118
119/*
120 * Main initialization routine
121 */
122static int __init ts7250_init(void)
123{
124 struct nand_chip *this;
125 const char *part_type = 0;
126 int mtd_parts_nb = 0;
127 struct mtd_partition *mtd_parts = 0;
128
129 if (!machine_is_ts72xx() || board_is_ts7200())
130 return -ENXIO;
131
132 /* Allocate memory for MTD device structure and private data */
133 ts7250_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
134 if (!ts7250_mtd) {
135 printk("Unable to allocate TS7250 NAND MTD device structure.\n");
136 return -ENOMEM;
137 }
138
139 /* Get pointer to private data */
140 this = (struct nand_chip *)(&ts7250_mtd[1]);
141
142 /* Initialize structures */
143 memset(ts7250_mtd, 0, sizeof(struct mtd_info));
144 memset(this, 0, sizeof(struct nand_chip));
145
146 /* Link the private data with the MTD structure */
147 ts7250_mtd->priv = this;
148 ts7250_mtd->owner = THIS_MODULE;
149
150 /* insert callbacks */
151 this->IO_ADDR_R = (void *)TS72XX_NAND_DATA_VIRT_BASE;
152 this->IO_ADDR_W = (void *)TS72XX_NAND_DATA_VIRT_BASE;
153 this->cmd_ctrl = ts7250_hwcontrol;
154 this->dev_ready = ts7250_device_ready;
155 this->chip_delay = 15;
156 this->ecc.mode = NAND_ECC_SOFT;
157
158 printk("Searching for NAND flash...\n");
159 /* Scan to find existence of the device */
160 if (nand_scan(ts7250_mtd, 1)) {
161 kfree(ts7250_mtd);
162 return -ENXIO;
163 }
164#ifdef CONFIG_MTD_PARTITIONS
165 ts7250_mtd->name = "ts7250-nand";
166 mtd_parts_nb = parse_mtd_partitions(ts7250_mtd, part_probes, &mtd_parts, 0);
167 if (mtd_parts_nb > 0)
168 part_type = "command line";
169 else
170 mtd_parts_nb = 0;
171#endif
172 if (mtd_parts_nb == 0) {
173 mtd_parts = partition_info32;
174 if (ts7250_mtd->size >= (128 * 0x100000))
175 mtd_parts = partition_info128;
176 mtd_parts_nb = NUM_PARTITIONS;
177 part_type = "static";
178 }
179
180 /* Register the partitions */
181 printk(KERN_NOTICE "Using %s partition definition\n", part_type);
182 add_mtd_partitions(ts7250_mtd, mtd_parts, mtd_parts_nb);
183
184 /* Return happy */
185 return 0;
186}
187
188module_init(ts7250_init);
189
190/*
191 * Clean up routine
192 */
193static void __exit ts7250_cleanup(void)
194{
195 /* Unregister the device */
196 del_mtd_device(ts7250_mtd);
197
198 /* Free the MTD device structure */
199 kfree(ts7250_mtd);
200}
201
202module_exit(ts7250_cleanup);
203
204MODULE_LICENSE("GPL");
205MODULE_AUTHOR("Jesse Off <joff@embeddedARM.com>");
206MODULE_DESCRIPTION("MTD map driver for Technologic Systems TS-7250 board");
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index d7cd5fa16ba4..dc7573501d8c 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -70,8 +70,6 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
70 nftl->mbd.devnum = -1; 70 nftl->mbd.devnum = -1;
71 nftl->mbd.blksize = 512; 71 nftl->mbd.blksize = 512;
72 nftl->mbd.tr = tr; 72 nftl->mbd.tr = tr;
73 memcpy(&nftl->oobinfo, &mtd->oobinfo, sizeof(struct nand_oobinfo));
74 nftl->oobinfo.useecc = MTD_NANDECC_PLACEONLY;
75 73
76 if (NFTL_mount(nftl) < 0) { 74 if (NFTL_mount(nftl) < 0) {
77 printk(KERN_WARNING "NFTL: could not mount device\n"); 75 printk(KERN_WARNING "NFTL: could not mount device\n");
@@ -136,6 +134,69 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev)
136 kfree(nftl); 134 kfree(nftl);
137} 135}
138 136
137/*
138 * Read oob data from flash
139 */
140int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
141 size_t *retlen, uint8_t *buf)
142{
143 struct mtd_oob_ops ops;
144 int res;
145
146 ops.mode = MTD_OOB_PLACE;
147 ops.ooboffs = offs & (mtd->writesize - 1);
148 ops.ooblen = len;
149 ops.oobbuf = buf;
150 ops.datbuf = NULL;
151 ops.len = len;
152
153 res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
154 *retlen = ops.retlen;
155 return res;
156}
157
158/*
159 * Write oob data to flash
160 */
161int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
162 size_t *retlen, uint8_t *buf)
163{
164 struct mtd_oob_ops ops;
165 int res;
166
167 ops.mode = MTD_OOB_PLACE;
168 ops.ooboffs = offs & (mtd->writesize - 1);
169 ops.ooblen = len;
170 ops.oobbuf = buf;
171 ops.datbuf = NULL;
172 ops.len = len;
173
174 res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
175 *retlen = ops.retlen;
176 return res;
177}
178
179/*
180 * Write data and oob to flash
181 */
182static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
183 size_t *retlen, uint8_t *buf, uint8_t *oob)
184{
185 struct mtd_oob_ops ops;
186 int res;
187
188 ops.mode = MTD_OOB_PLACE;
189 ops.ooboffs = offs;
190 ops.ooblen = mtd->oobsize;
191 ops.oobbuf = oob;
192 ops.datbuf = buf;
193 ops.len = len;
194
195 res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
196 *retlen = ops.retlen;
197 return res;
198}
199
139#ifdef CONFIG_NFTL_RW 200#ifdef CONFIG_NFTL_RW
140 201
141/* Actual NFTL access routines */ 202/* Actual NFTL access routines */
@@ -185,6 +246,7 @@ static u16 NFTL_findfreeblock(struct NFTLrecord *nftl, int desperate )
185 246
186static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock ) 247static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned pendingblock )
187{ 248{
249 struct mtd_info *mtd = nftl->mbd.mtd;
188 u16 BlockMap[MAX_SECTORS_PER_UNIT]; 250 u16 BlockMap[MAX_SECTORS_PER_UNIT];
189 unsigned char BlockLastState[MAX_SECTORS_PER_UNIT]; 251 unsigned char BlockLastState[MAX_SECTORS_PER_UNIT];
190 unsigned char BlockFreeFound[MAX_SECTORS_PER_UNIT]; 252 unsigned char BlockFreeFound[MAX_SECTORS_PER_UNIT];
@@ -194,7 +256,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
194 unsigned int targetEUN; 256 unsigned int targetEUN;
195 struct nftl_oob oob; 257 struct nftl_oob oob;
196 int inplace = 1; 258 int inplace = 1;
197 size_t retlen; 259 size_t retlen;
198 260
199 memset(BlockMap, 0xff, sizeof(BlockMap)); 261 memset(BlockMap, 0xff, sizeof(BlockMap));
200 memset(BlockFreeFound, 0, sizeof(BlockFreeFound)); 262 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
@@ -210,21 +272,21 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
210 /* Scan to find the Erase Unit which holds the actual data for each 272 /* Scan to find the Erase Unit which holds the actual data for each
211 512-byte block within the Chain. 273 512-byte block within the Chain.
212 */ 274 */
213 silly = MAX_LOOPS; 275 silly = MAX_LOOPS;
214 targetEUN = BLOCK_NIL; 276 targetEUN = BLOCK_NIL;
215 while (thisEUN <= nftl->lastEUN ) { 277 while (thisEUN <= nftl->lastEUN ) {
216 unsigned int status, foldmark; 278 unsigned int status, foldmark;
217 279
218 targetEUN = thisEUN; 280 targetEUN = thisEUN;
219 for (block = 0; block < nftl->EraseSize / 512; block ++) { 281 for (block = 0; block < nftl->EraseSize / 512; block ++) {
220 MTD_READOOB(nftl->mbd.mtd, 282 nftl_read_oob(mtd, (thisEUN * nftl->EraseSize) +
221 (thisEUN * nftl->EraseSize) + (block * 512), 283 (block * 512), 16 , &retlen,
222 16 , &retlen, (char *)&oob); 284 (char *)&oob);
223 if (block == 2) { 285 if (block == 2) {
224 foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1; 286 foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1;
225 if (foldmark == FOLD_MARK_IN_PROGRESS) { 287 if (foldmark == FOLD_MARK_IN_PROGRESS) {
226 DEBUG(MTD_DEBUG_LEVEL1, 288 DEBUG(MTD_DEBUG_LEVEL1,
227 "Write Inhibited on EUN %d\n", thisEUN); 289 "Write Inhibited on EUN %d\n", thisEUN);
228 inplace = 0; 290 inplace = 0;
229 } else { 291 } else {
230 /* There's no other reason not to do inplace, 292 /* There's no other reason not to do inplace,
@@ -233,7 +295,7 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
233 inplace = 1; 295 inplace = 1;
234 } 296 }
235 } 297 }
236 status = oob.b.Status | oob.b.Status1; 298 status = oob.b.Status | oob.b.Status1;
237 BlockLastState[block] = status; 299 BlockLastState[block] = status;
238 300
239 switch(status) { 301 switch(status) {
@@ -328,15 +390,15 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
328 return BLOCK_NIL; 390 return BLOCK_NIL;
329 } 391 }
330 } else { 392 } else {
331 /* We put a fold mark in the chain we are folding only if 393 /* We put a fold mark in the chain we are folding only if we
332 we fold in place to help the mount check code. If we do 394 fold in place to help the mount check code. If we do not fold in
333 not fold in place, it is possible to find the valid 395 place, it is possible to find the valid chain by selecting the
334 chain by selecting the longer one */ 396 longer one */
335 oob.u.c.FoldMark = oob.u.c.FoldMark1 = cpu_to_le16(FOLD_MARK_IN_PROGRESS); 397 oob.u.c.FoldMark = oob.u.c.FoldMark1 = cpu_to_le16(FOLD_MARK_IN_PROGRESS);
336 oob.u.c.unused = 0xffffffff; 398 oob.u.c.unused = 0xffffffff;
337 MTD_WRITEOOB(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) + 2 * 512 + 8, 399 nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 2 * 512 + 8,
338 8, &retlen, (char *)&oob.u); 400 8, &retlen, (char *)&oob.u);
339 } 401 }
340 402
341 /* OK. We now know the location of every block in the Virtual Unit Chain, 403 /* OK. We now know the location of every block in the Virtual Unit Chain,
342 and the Erase Unit into which we are supposed to be copying. 404 and the Erase Unit into which we are supposed to be copying.
@@ -353,33 +415,33 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
353 continue; 415 continue;
354 } 416 }
355 417
356 /* copy only in non free block (free blocks can only 418 /* copy only in non free block (free blocks can only
357 happen in case of media errors or deleted blocks) */ 419 happen in case of media errors or deleted blocks) */
358 if (BlockMap[block] == BLOCK_NIL) 420 if (BlockMap[block] == BLOCK_NIL)
359 continue; 421 continue;
360 422
361 ret = MTD_READ(nftl->mbd.mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512), 423 ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512),
362 512, &retlen, movebuf); 424 512, &retlen, movebuf);
363 if (ret < 0) { 425 if (ret < 0 && ret != -EUCLEAN) {
364 ret = MTD_READ(nftl->mbd.mtd, (nftl->EraseSize * BlockMap[block]) 426 ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block])
365 + (block * 512), 512, &retlen, 427 + (block * 512), 512, &retlen,
366 movebuf); 428 movebuf);
367 if (ret != -EIO) 429 if (ret != -EIO)
368 printk("Error went away on retry.\n"); 430 printk("Error went away on retry.\n");
369 } 431 }
370 memset(&oob, 0xff, sizeof(struct nftl_oob)); 432 memset(&oob, 0xff, sizeof(struct nftl_oob));
371 oob.b.Status = oob.b.Status1 = SECTOR_USED; 433 oob.b.Status = oob.b.Status1 = SECTOR_USED;
372 MTD_WRITEECC(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) + (block * 512), 434
373 512, &retlen, movebuf, (char *)&oob, &nftl->oobinfo); 435 nftl_write(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) +
436 (block * 512), 512, &retlen, movebuf, (char *)&oob);
374 } 437 }
375 438
376 /* add the header so that it is now a valid chain */ 439 /* add the header so that it is now a valid chain */
377 oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum 440 oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC);
378 = cpu_to_le16(thisVUC); 441 oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = 0xffff;
379 oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum = 0xffff;
380 442
381 MTD_WRITEOOB(nftl->mbd.mtd, (nftl->EraseSize * targetEUN) + 8, 443 nftl_write_oob(mtd, (nftl->EraseSize * targetEUN) + 8,
382 8, &retlen, (char *)&oob.u); 444 8, &retlen, (char *)&oob.u);
383 445
384 /* OK. We've moved the whole lot into the new block. Now we have to free the original blocks. */ 446 /* OK. We've moved the whole lot into the new block. Now we have to free the original blocks. */
385 447
@@ -396,18 +458,18 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
396 while (thisEUN <= nftl->lastEUN && thisEUN != targetEUN) { 458 while (thisEUN <= nftl->lastEUN && thisEUN != targetEUN) {
397 unsigned int EUNtmp; 459 unsigned int EUNtmp;
398 460
399 EUNtmp = nftl->ReplUnitTable[thisEUN]; 461 EUNtmp = nftl->ReplUnitTable[thisEUN];
400 462
401 if (NFTL_formatblock(nftl, thisEUN) < 0) { 463 if (NFTL_formatblock(nftl, thisEUN) < 0) {
402 /* could not erase : mark block as reserved 464 /* could not erase : mark block as reserved
403 */ 465 */
404 nftl->ReplUnitTable[thisEUN] = BLOCK_RESERVED; 466 nftl->ReplUnitTable[thisEUN] = BLOCK_RESERVED;
405 } else { 467 } else {
406 /* correctly erased : mark it as free */ 468 /* correctly erased : mark it as free */
407 nftl->ReplUnitTable[thisEUN] = BLOCK_FREE; 469 nftl->ReplUnitTable[thisEUN] = BLOCK_FREE;
408 nftl->numfreeEUNs++; 470 nftl->numfreeEUNs++;
409 } 471 }
410 thisEUN = EUNtmp; 472 thisEUN = EUNtmp;
411 } 473 }
412 474
413 /* Make this the new start of chain for thisVUC */ 475 /* Make this the new start of chain for thisVUC */
@@ -473,6 +535,7 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
473{ 535{
474 u16 lastEUN; 536 u16 lastEUN;
475 u16 thisVUC = block / (nftl->EraseSize / 512); 537 u16 thisVUC = block / (nftl->EraseSize / 512);
538 struct mtd_info *mtd = nftl->mbd.mtd;
476 unsigned int writeEUN; 539 unsigned int writeEUN;
477 unsigned long blockofs = (block * 512) & (nftl->EraseSize -1); 540 unsigned long blockofs = (block * 512) & (nftl->EraseSize -1);
478 size_t retlen; 541 size_t retlen;
@@ -489,21 +552,22 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
489 */ 552 */
490 lastEUN = BLOCK_NIL; 553 lastEUN = BLOCK_NIL;
491 writeEUN = nftl->EUNtable[thisVUC]; 554 writeEUN = nftl->EUNtable[thisVUC];
492 silly = MAX_LOOPS; 555 silly = MAX_LOOPS;
493 while (writeEUN <= nftl->lastEUN) { 556 while (writeEUN <= nftl->lastEUN) {
494 struct nftl_bci bci; 557 struct nftl_bci bci;
495 size_t retlen; 558 size_t retlen;
496 unsigned int status; 559 unsigned int status;
497 560
498 lastEUN = writeEUN; 561 lastEUN = writeEUN;
499 562
500 MTD_READOOB(nftl->mbd.mtd, (writeEUN * nftl->EraseSize) + blockofs, 563 nftl_read_oob(mtd,
501 8, &retlen, (char *)&bci); 564 (writeEUN * nftl->EraseSize) + blockofs,
565 8, &retlen, (char *)&bci);
502 566
503 DEBUG(MTD_DEBUG_LEVEL2, "Status of block %d in EUN %d is %x\n", 567 DEBUG(MTD_DEBUG_LEVEL2, "Status of block %d in EUN %d is %x\n",
504 block , writeEUN, le16_to_cpu(bci.Status)); 568 block , writeEUN, le16_to_cpu(bci.Status));
505 569
506 status = bci.Status | bci.Status1; 570 status = bci.Status | bci.Status1;
507 switch(status) { 571 switch(status) {
508 case SECTOR_FREE: 572 case SECTOR_FREE:
509 return writeEUN; 573 return writeEUN;
@@ -574,10 +638,10 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
574 /* We've found a free block. Insert it into the chain. */ 638 /* We've found a free block. Insert it into the chain. */
575 639
576 if (lastEUN != BLOCK_NIL) { 640 if (lastEUN != BLOCK_NIL) {
577 thisVUC |= 0x8000; /* It's a replacement block */ 641 thisVUC |= 0x8000; /* It's a replacement block */
578 } else { 642 } else {
579 /* The first block in a new chain */ 643 /* The first block in a new chain */
580 nftl->EUNtable[thisVUC] = writeEUN; 644 nftl->EUNtable[thisVUC] = writeEUN;
581 } 645 }
582 646
583 /* set up the actual EUN we're writing into */ 647 /* set up the actual EUN we're writing into */
@@ -585,29 +649,29 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block)
585 nftl->ReplUnitTable[writeEUN] = BLOCK_NIL; 649 nftl->ReplUnitTable[writeEUN] = BLOCK_NIL;
586 650
587 /* ... and on the flash itself */ 651 /* ... and on the flash itself */
588 MTD_READOOB(nftl->mbd.mtd, writeEUN * nftl->EraseSize + 8, 8, 652 nftl_read_oob(mtd, writeEUN * nftl->EraseSize + 8, 8,
589 &retlen, (char *)&oob.u); 653 &retlen, (char *)&oob.u);
590 654
591 oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC); 655 oob.u.a.VirtUnitNum = oob.u.a.SpareVirtUnitNum = cpu_to_le16(thisVUC);
592 656
593 MTD_WRITEOOB(nftl->mbd.mtd, writeEUN * nftl->EraseSize + 8, 8, 657 nftl_write_oob(mtd, writeEUN * nftl->EraseSize + 8, 8,
594 &retlen, (char *)&oob.u); 658 &retlen, (char *)&oob.u);
595 659
596 /* we link the new block to the chain only after the 660 /* we link the new block to the chain only after the
597 block is ready. It avoids the case where the chain 661 block is ready. It avoids the case where the chain
598 could point to a free block */ 662 could point to a free block */
599 if (lastEUN != BLOCK_NIL) { 663 if (lastEUN != BLOCK_NIL) {
600 /* Both in our cache... */ 664 /* Both in our cache... */
601 nftl->ReplUnitTable[lastEUN] = writeEUN; 665 nftl->ReplUnitTable[lastEUN] = writeEUN;
602 /* ... and on the flash itself */ 666 /* ... and on the flash itself */
603 MTD_READOOB(nftl->mbd.mtd, (lastEUN * nftl->EraseSize) + 8, 667 nftl_read_oob(mtd, (lastEUN * nftl->EraseSize) + 8,
604 8, &retlen, (char *)&oob.u); 668 8, &retlen, (char *)&oob.u);
605 669
606 oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum 670 oob.u.a.ReplUnitNum = oob.u.a.SpareReplUnitNum
607 = cpu_to_le16(writeEUN); 671 = cpu_to_le16(writeEUN);
608 672
609 MTD_WRITEOOB(nftl->mbd.mtd, (lastEUN * nftl->EraseSize) + 8, 673 nftl_write_oob(mtd, (lastEUN * nftl->EraseSize) + 8,
610 8, &retlen, (char *)&oob.u); 674 8, &retlen, (char *)&oob.u);
611 } 675 }
612 676
613 return writeEUN; 677 return writeEUN;
@@ -639,10 +703,9 @@ static int nftl_writeblock(struct mtd_blktrans_dev *mbd, unsigned long block,
639 703
640 memset(&oob, 0xff, sizeof(struct nftl_oob)); 704 memset(&oob, 0xff, sizeof(struct nftl_oob));
641 oob.b.Status = oob.b.Status1 = SECTOR_USED; 705 oob.b.Status = oob.b.Status1 = SECTOR_USED;
642 MTD_WRITEECC(nftl->mbd.mtd, (writeEUN * nftl->EraseSize) + blockofs,
643 512, &retlen, (char *)buffer, (char *)&oob, &nftl->oobinfo);
644 /* need to write SECTOR_USED flags since they are not written in mtd_writeecc */
645 706
707 nftl_write(nftl->mbd.mtd, (writeEUN * nftl->EraseSize) + blockofs,
708 512, &retlen, (char *)buffer, (char *)&oob);
646 return 0; 709 return 0;
647} 710}
648#endif /* CONFIG_NFTL_RW */ 711#endif /* CONFIG_NFTL_RW */
@@ -651,20 +714,22 @@ static int nftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
651 char *buffer) 714 char *buffer)
652{ 715{
653 struct NFTLrecord *nftl = (void *)mbd; 716 struct NFTLrecord *nftl = (void *)mbd;
717 struct mtd_info *mtd = nftl->mbd.mtd;
654 u16 lastgoodEUN; 718 u16 lastgoodEUN;
655 u16 thisEUN = nftl->EUNtable[block / (nftl->EraseSize / 512)]; 719 u16 thisEUN = nftl->EUNtable[block / (nftl->EraseSize / 512)];
656 unsigned long blockofs = (block * 512) & (nftl->EraseSize - 1); 720 unsigned long blockofs = (block * 512) & (nftl->EraseSize - 1);
657 unsigned int status; 721 unsigned int status;
658 int silly = MAX_LOOPS; 722 int silly = MAX_LOOPS;
659 size_t retlen; 723 size_t retlen;
660 struct nftl_bci bci; 724 struct nftl_bci bci;
661 725
662 lastgoodEUN = BLOCK_NIL; 726 lastgoodEUN = BLOCK_NIL;
663 727
664 if (thisEUN != BLOCK_NIL) { 728 if (thisEUN != BLOCK_NIL) {
665 while (thisEUN < nftl->nb_blocks) { 729 while (thisEUN < nftl->nb_blocks) {
666 if (MTD_READOOB(nftl->mbd.mtd, (thisEUN * nftl->EraseSize) + blockofs, 730 if (nftl_read_oob(mtd, (thisEUN * nftl->EraseSize) +
667 8, &retlen, (char *)&bci) < 0) 731 blockofs, 8, &retlen,
732 (char *)&bci) < 0)
668 status = SECTOR_IGNORE; 733 status = SECTOR_IGNORE;
669 else 734 else
670 status = bci.Status | bci.Status1; 735 status = bci.Status | bci.Status1;
@@ -694,7 +759,7 @@ static int nftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
694 } 759 }
695 thisEUN = nftl->ReplUnitTable[thisEUN]; 760 thisEUN = nftl->ReplUnitTable[thisEUN];
696 } 761 }
697 } 762 }
698 763
699 the_end: 764 the_end:
700 if (lastgoodEUN == BLOCK_NIL) { 765 if (lastgoodEUN == BLOCK_NIL) {
@@ -703,7 +768,9 @@ static int nftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
703 } else { 768 } else {
704 loff_t ptr = (lastgoodEUN * nftl->EraseSize) + blockofs; 769 loff_t ptr = (lastgoodEUN * nftl->EraseSize) + blockofs;
705 size_t retlen; 770 size_t retlen;
706 if (MTD_READ(nftl->mbd.mtd, ptr, 512, &retlen, buffer)) 771 int res = mtd->read(mtd, ptr, 512, &retlen, buffer);
772
773 if (res < 0 && res != -EUCLEAN)
707 return -EIO; 774 return -EIO;
708 } 775 }
709 return 0; 776 return 0;
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index 3b104ebb219a..067262ee8df0 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -33,6 +33,11 @@
33 33
34char nftlmountrev[]="$Revision: 1.41 $"; 34char nftlmountrev[]="$Revision: 1.41 $";
35 35
36extern int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
37 size_t *retlen, uint8_t *buf);
38extern int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
39 size_t *retlen, uint8_t *buf);
40
36/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the 41/* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the
37 * various device information of the NFTL partition and Bad Unit Table. Update 42 * various device information of the NFTL partition and Bad Unit Table. Update
38 * the ReplUnitTable[] table accroding to the Bad Unit Table. ReplUnitTable[] 43 * the ReplUnitTable[] table accroding to the Bad Unit Table. ReplUnitTable[]
@@ -45,6 +50,7 @@ static int find_boot_record(struct NFTLrecord *nftl)
45 size_t retlen; 50 size_t retlen;
46 u8 buf[SECTORSIZE]; 51 u8 buf[SECTORSIZE];
47 struct NFTLMediaHeader *mh = &nftl->MediaHdr; 52 struct NFTLMediaHeader *mh = &nftl->MediaHdr;
53 struct mtd_info *mtd = nftl->mbd.mtd;
48 unsigned int i; 54 unsigned int i;
49 55
50 /* Assume logical EraseSize == physical erasesize for starting the scan. 56 /* Assume logical EraseSize == physical erasesize for starting the scan.
@@ -65,7 +71,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
65 71
66 /* Check for ANAND header first. Then can whinge if it's found but later 72 /* Check for ANAND header first. Then can whinge if it's found but later
67 checks fail */ 73 checks fail */
68 ret = MTD_READ(nftl->mbd.mtd, block * nftl->EraseSize, SECTORSIZE, &retlen, buf); 74 ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE,
75 &retlen, buf);
69 /* We ignore ret in case the ECC of the MediaHeader is invalid 76 /* We ignore ret in case the ECC of the MediaHeader is invalid
70 (which is apparently acceptable) */ 77 (which is apparently acceptable) */
71 if (retlen != SECTORSIZE) { 78 if (retlen != SECTORSIZE) {
@@ -90,8 +97,9 @@ static int find_boot_record(struct NFTLrecord *nftl)
90 } 97 }
91 98
92 /* To be safer with BIOS, also use erase mark as discriminant */ 99 /* To be safer with BIOS, also use erase mark as discriminant */
93 if ((ret = MTD_READOOB(nftl->mbd.mtd, block * nftl->EraseSize + SECTORSIZE + 8, 100 if ((ret = nftl_read_oob(mtd, block * nftl->EraseSize +
94 8, &retlen, (char *)&h1) < 0)) { 101 SECTORSIZE + 8, 8, &retlen,
102 (char *)&h1) < 0)) {
95 printk(KERN_WARNING "ANAND header found at 0x%x in mtd%d, but OOB data read failed (err %d)\n", 103 printk(KERN_WARNING "ANAND header found at 0x%x in mtd%d, but OOB data read failed (err %d)\n",
96 block * nftl->EraseSize, nftl->mbd.mtd->index, ret); 104 block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
97 continue; 105 continue;
@@ -109,8 +117,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
109 } 117 }
110 118
111 /* Finally reread to check ECC */ 119 /* Finally reread to check ECC */
112 if ((ret = MTD_READECC(nftl->mbd.mtd, block * nftl->EraseSize, SECTORSIZE, 120 if ((ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE,
113 &retlen, buf, (char *)&oob, NULL) < 0)) { 121 &retlen, buf) < 0)) {
114 printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but ECC read failed (err %d)\n", 122 printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but ECC read failed (err %d)\n",
115 block * nftl->EraseSize, nftl->mbd.mtd->index, ret); 123 block * nftl->EraseSize, nftl->mbd.mtd->index, ret);
116 continue; 124 continue;
@@ -228,9 +236,9 @@ device is already correct.
228The new DiskOnChip driver already scanned the bad block table. Just query it. 236The new DiskOnChip driver already scanned the bad block table. Just query it.
229 if ((i & (SECTORSIZE - 1)) == 0) { 237 if ((i & (SECTORSIZE - 1)) == 0) {
230 /* read one sector for every SECTORSIZE of blocks */ 238 /* read one sector for every SECTORSIZE of blocks */
231 if ((ret = MTD_READECC(nftl->mbd.mtd, block * nftl->EraseSize + 239 if ((ret = mtd->read(nftl->mbd.mtd, block * nftl->EraseSize +
232 i + SECTORSIZE, SECTORSIZE, &retlen, buf, 240 i + SECTORSIZE, SECTORSIZE, &retlen,
233 (char *)&oob, NULL)) < 0) { 241 buf)) < 0) {
234 printk(KERN_NOTICE "Read of bad sector table failed (err %d)\n", 242 printk(KERN_NOTICE "Read of bad sector table failed (err %d)\n",
235 ret); 243 ret);
236 kfree(nftl->ReplUnitTable); 244 kfree(nftl->ReplUnitTable);
@@ -268,18 +276,22 @@ static int memcmpb(void *a, int c, int n)
268static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int len, 276static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int len,
269 int check_oob) 277 int check_oob)
270{ 278{
271 int i;
272 size_t retlen;
273 u8 buf[SECTORSIZE + nftl->mbd.mtd->oobsize]; 279 u8 buf[SECTORSIZE + nftl->mbd.mtd->oobsize];
280 struct mtd_info *mtd = nftl->mbd.mtd;
281 size_t retlen;
282 int i;
274 283
275 for (i = 0; i < len; i += SECTORSIZE) { 284 for (i = 0; i < len; i += SECTORSIZE) {
276 if (MTD_READECC(nftl->mbd.mtd, address, SECTORSIZE, &retlen, buf, &buf[SECTORSIZE], &nftl->oobinfo) < 0) 285 if (mtd->read(mtd, address, SECTORSIZE, &retlen, buf))
277 return -1; 286 return -1;
278 if (memcmpb(buf, 0xff, SECTORSIZE) != 0) 287 if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
279 return -1; 288 return -1;
280 289
281 if (check_oob) { 290 if (check_oob) {
282 if (memcmpb(buf + SECTORSIZE, 0xff, nftl->mbd.mtd->oobsize) != 0) 291 if(nftl_read_oob(mtd, address, mtd->oobsize,
292 &retlen, &buf[SECTORSIZE]) < 0)
293 return -1;
294 if (memcmpb(buf + SECTORSIZE, 0xff, mtd->oobsize) != 0)
283 return -1; 295 return -1;
284 } 296 }
285 address += SECTORSIZE; 297 address += SECTORSIZE;
@@ -301,10 +313,11 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
301 unsigned int nb_erases, erase_mark; 313 unsigned int nb_erases, erase_mark;
302 struct nftl_uci1 uci; 314 struct nftl_uci1 uci;
303 struct erase_info *instr = &nftl->instr; 315 struct erase_info *instr = &nftl->instr;
316 struct mtd_info *mtd = nftl->mbd.mtd;
304 317
305 /* Read the Unit Control Information #1 for Wear-Leveling */ 318 /* Read the Unit Control Information #1 for Wear-Leveling */
306 if (MTD_READOOB(nftl->mbd.mtd, block * nftl->EraseSize + SECTORSIZE + 8, 319 if (nftl_read_oob(mtd, block * nftl->EraseSize + SECTORSIZE + 8,
307 8, &retlen, (char *)&uci) < 0) 320 8, &retlen, (char *)&uci) < 0)
308 goto default_uci1; 321 goto default_uci1;
309 322
310 erase_mark = le16_to_cpu ((uci.EraseMark | uci.EraseMark1)); 323 erase_mark = le16_to_cpu ((uci.EraseMark | uci.EraseMark1));
@@ -321,7 +334,7 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
321 instr->mtd = nftl->mbd.mtd; 334 instr->mtd = nftl->mbd.mtd;
322 instr->addr = block * nftl->EraseSize; 335 instr->addr = block * nftl->EraseSize;
323 instr->len = nftl->EraseSize; 336 instr->len = nftl->EraseSize;
324 MTD_ERASE(nftl->mbd.mtd, instr); 337 mtd->erase(mtd, instr);
325 338
326 if (instr->state == MTD_ERASE_FAILED) { 339 if (instr->state == MTD_ERASE_FAILED) {
327 printk("Error while formatting block %d\n", block); 340 printk("Error while formatting block %d\n", block);
@@ -343,8 +356,8 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
343 goto fail; 356 goto fail;
344 357
345 uci.WearInfo = le32_to_cpu(nb_erases); 358 uci.WearInfo = le32_to_cpu(nb_erases);
346 if (MTD_WRITEOOB(nftl->mbd.mtd, block * nftl->EraseSize + SECTORSIZE + 8, 8, 359 if (nftl_write_oob(mtd, block * nftl->EraseSize + SECTORSIZE +
347 &retlen, (char *)&uci) < 0) 360 8, 8, &retlen, (char *)&uci) < 0)
348 goto fail; 361 goto fail;
349 return 0; 362 return 0;
350fail: 363fail:
@@ -365,6 +378,7 @@ fail:
365 * case. */ 378 * case. */
366static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_block) 379static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_block)
367{ 380{
381 struct mtd_info *mtd = nftl->mbd.mtd;
368 unsigned int block, i, status; 382 unsigned int block, i, status;
369 struct nftl_bci bci; 383 struct nftl_bci bci;
370 int sectors_per_block; 384 int sectors_per_block;
@@ -374,8 +388,9 @@ static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_b
374 block = first_block; 388 block = first_block;
375 for (;;) { 389 for (;;) {
376 for (i = 0; i < sectors_per_block; i++) { 390 for (i = 0; i < sectors_per_block; i++) {
377 if (MTD_READOOB(nftl->mbd.mtd, block * nftl->EraseSize + i * SECTORSIZE, 391 if (nftl_read_oob(mtd,
378 8, &retlen, (char *)&bci) < 0) 392 block * nftl->EraseSize + i * SECTORSIZE,
393 8, &retlen, (char *)&bci) < 0)
379 status = SECTOR_IGNORE; 394 status = SECTOR_IGNORE;
380 else 395 else
381 status = bci.Status | bci.Status1; 396 status = bci.Status | bci.Status1;
@@ -394,9 +409,10 @@ static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_b
394 /* sector not free actually : mark it as SECTOR_IGNORE */ 409 /* sector not free actually : mark it as SECTOR_IGNORE */
395 bci.Status = SECTOR_IGNORE; 410 bci.Status = SECTOR_IGNORE;
396 bci.Status1 = SECTOR_IGNORE; 411 bci.Status1 = SECTOR_IGNORE;
397 MTD_WRITEOOB(nftl->mbd.mtd, 412 nftl_write_oob(mtd, block *
398 block * nftl->EraseSize + i * SECTORSIZE, 413 nftl->EraseSize +
399 8, &retlen, (char *)&bci); 414 i * SECTORSIZE, 8,
415 &retlen, (char *)&bci);
400 } 416 }
401 break; 417 break;
402 default: 418 default:
@@ -481,13 +497,14 @@ static void format_chain(struct NFTLrecord *nftl, unsigned int first_block)
481 * 1. */ 497 * 1. */
482static int check_and_mark_free_block(struct NFTLrecord *nftl, int block) 498static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
483{ 499{
500 struct mtd_info *mtd = nftl->mbd.mtd;
484 struct nftl_uci1 h1; 501 struct nftl_uci1 h1;
485 unsigned int erase_mark; 502 unsigned int erase_mark;
486 size_t retlen; 503 size_t retlen;
487 504
488 /* check erase mark. */ 505 /* check erase mark. */
489 if (MTD_READOOB(nftl->mbd.mtd, block * nftl->EraseSize + SECTORSIZE + 8, 8, 506 if (nftl_read_oob(mtd, block * nftl->EraseSize + SECTORSIZE + 8, 8,
490 &retlen, (char *)&h1) < 0) 507 &retlen, (char *)&h1) < 0)
491 return -1; 508 return -1;
492 509
493 erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1)); 510 erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1));
@@ -501,8 +518,9 @@ static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
501 h1.EraseMark = cpu_to_le16(ERASE_MARK); 518 h1.EraseMark = cpu_to_le16(ERASE_MARK);
502 h1.EraseMark1 = cpu_to_le16(ERASE_MARK); 519 h1.EraseMark1 = cpu_to_le16(ERASE_MARK);
503 h1.WearInfo = cpu_to_le32(0); 520 h1.WearInfo = cpu_to_le32(0);
504 if (MTD_WRITEOOB(nftl->mbd.mtd, block * nftl->EraseSize + SECTORSIZE + 8, 8, 521 if (nftl_write_oob(mtd,
505 &retlen, (char *)&h1) < 0) 522 block * nftl->EraseSize + SECTORSIZE + 8, 8,
523 &retlen, (char *)&h1) < 0)
506 return -1; 524 return -1;
507 } else { 525 } else {
508#if 0 526#if 0
@@ -513,8 +531,8 @@ static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
513 SECTORSIZE, 0) != 0) 531 SECTORSIZE, 0) != 0)
514 return -1; 532 return -1;
515 533
516 if (MTD_READOOB(nftl->mbd.mtd, block * nftl->EraseSize + i, 534 if (nftl_read_oob(mtd, block * nftl->EraseSize + i,
517 16, &retlen, buf) < 0) 535 16, &retlen, buf) < 0)
518 return -1; 536 return -1;
519 if (i == SECTORSIZE) { 537 if (i == SECTORSIZE) {
520 /* skip erase mark */ 538 /* skip erase mark */
@@ -540,11 +558,12 @@ static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
540 */ 558 */
541static int get_fold_mark(struct NFTLrecord *nftl, unsigned int block) 559static int get_fold_mark(struct NFTLrecord *nftl, unsigned int block)
542{ 560{
561 struct mtd_info *mtd = nftl->mbd.mtd;
543 struct nftl_uci2 uci; 562 struct nftl_uci2 uci;
544 size_t retlen; 563 size_t retlen;
545 564
546 if (MTD_READOOB(nftl->mbd.mtd, block * nftl->EraseSize + 2 * SECTORSIZE + 8, 565 if (nftl_read_oob(mtd, block * nftl->EraseSize + 2 * SECTORSIZE + 8,
547 8, &retlen, (char *)&uci) < 0) 566 8, &retlen, (char *)&uci) < 0)
548 return 0; 567 return 0;
549 568
550 return le16_to_cpu((uci.FoldMark | uci.FoldMark1)); 569 return le16_to_cpu((uci.FoldMark | uci.FoldMark1));
@@ -558,6 +577,7 @@ int NFTL_mount(struct NFTLrecord *s)
558 int chain_length, do_format_chain; 577 int chain_length, do_format_chain;
559 struct nftl_uci0 h0; 578 struct nftl_uci0 h0;
560 struct nftl_uci1 h1; 579 struct nftl_uci1 h1;
580 struct mtd_info *mtd = s->mbd.mtd;
561 size_t retlen; 581 size_t retlen;
562 582
563 /* search for NFTL MediaHeader and Spare NFTL Media Header */ 583 /* search for NFTL MediaHeader and Spare NFTL Media Header */
@@ -582,10 +602,13 @@ int NFTL_mount(struct NFTLrecord *s)
582 602
583 for (;;) { 603 for (;;) {
584 /* read the block header. If error, we format the chain */ 604 /* read the block header. If error, we format the chain */
585 if (MTD_READOOB(s->mbd.mtd, block * s->EraseSize + 8, 8, 605 if (nftl_read_oob(mtd,
586 &retlen, (char *)&h0) < 0 || 606 block * s->EraseSize + 8, 8,
587 MTD_READOOB(s->mbd.mtd, block * s->EraseSize + SECTORSIZE + 8, 8, 607 &retlen, (char *)&h0) < 0 ||
588 &retlen, (char *)&h1) < 0) { 608 nftl_read_oob(mtd,
609 block * s->EraseSize +
610 SECTORSIZE + 8, 8,
611 &retlen, (char *)&h1) < 0) {
589 s->ReplUnitTable[block] = BLOCK_NIL; 612 s->ReplUnitTable[block] = BLOCK_NIL;
590 do_format_chain = 1; 613 do_format_chain = 1;
591 break; 614 break;
diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig
index 126ff6bf63d5..5930a03736d7 100644
--- a/drivers/mtd/onenand/Kconfig
+++ b/drivers/mtd/onenand/Kconfig
@@ -29,6 +29,20 @@ config MTD_ONENAND_GENERIC
29 help 29 help
30 Support for OneNAND flash via platform device driver. 30 Support for OneNAND flash via platform device driver.
31 31
32config MTD_ONENAND_OTP
33 bool "OneNAND OTP Support"
34 depends on MTD_ONENAND
35 help
36 One Block of the NAND Flash Array memory is reserved as
37 a One-Time Programmable Block memory area.
38 Also, 1st Block of NAND Flash Array can be used as OTP.
39
40 The OTP block can be read, programmed and locked using the same
41 operations as any other NAND Flash Array memory block.
42 OTP block cannot be erased.
43
44 OTP block is fully-guaranteed to be a valid block.
45
32config MTD_ONENAND_SYNC_READ 46config MTD_ONENAND_SYNC_READ
33 bool "OneNAND Sync. Burst Read Support" 47 bool "OneNAND Sync. Burst Read Support"
34 depends on ARCH_OMAP 48 depends on ARCH_OMAP
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index a53a73fc2a5a..84ec40d25438 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -23,8 +23,7 @@
23/** 23/**
24 * onenand_oob_64 - oob info for large (2KB) page 24 * onenand_oob_64 - oob info for large (2KB) page
25 */ 25 */
26static struct nand_oobinfo onenand_oob_64 = { 26static struct nand_ecclayout onenand_oob_64 = {
27 .useecc = MTD_NANDECC_AUTOPLACE,
28 .eccbytes = 20, 27 .eccbytes = 20,
29 .eccpos = { 28 .eccpos = {
30 8, 9, 10, 11, 12, 29 8, 9, 10, 11, 12,
@@ -34,14 +33,14 @@ static struct nand_oobinfo onenand_oob_64 = {
34 }, 33 },
35 .oobfree = { 34 .oobfree = {
36 {2, 3}, {14, 2}, {18, 3}, {30, 2}, 35 {2, 3}, {14, 2}, {18, 3}, {30, 2},
37 {24, 3}, {46, 2}, {40, 3}, {62, 2} } 36 {34, 3}, {46, 2}, {50, 3}, {62, 2}
37 }
38}; 38};
39 39
40/** 40/**
41 * onenand_oob_32 - oob info for middle (1KB) page 41 * onenand_oob_32 - oob info for middle (1KB) page
42 */ 42 */
43static struct nand_oobinfo onenand_oob_32 = { 43static struct nand_ecclayout onenand_oob_32 = {
44 .useecc = MTD_NANDECC_AUTOPLACE,
45 .eccbytes = 10, 44 .eccbytes = 10,
46 .eccpos = { 45 .eccpos = {
47 8, 9, 10, 11, 12, 46 8, 9, 10, 11, 12,
@@ -190,7 +189,7 @@ static int onenand_buffer_address(int dataram1, int sectors, int count)
190static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t len) 189static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t len)
191{ 190{
192 struct onenand_chip *this = mtd->priv; 191 struct onenand_chip *this = mtd->priv;
193 int value, readcmd = 0; 192 int value, readcmd = 0, block_cmd = 0;
194 int block, page; 193 int block, page;
195 /* Now we use page size operation */ 194 /* Now we use page size operation */
196 int sectors = 4, count = 4; 195 int sectors = 4, count = 4;
@@ -206,6 +205,8 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
206 205
207 case ONENAND_CMD_ERASE: 206 case ONENAND_CMD_ERASE:
208 case ONENAND_CMD_BUFFERRAM: 207 case ONENAND_CMD_BUFFERRAM:
208 case ONENAND_CMD_OTP_ACCESS:
209 block_cmd = 1;
209 block = (int) (addr >> this->erase_shift); 210 block = (int) (addr >> this->erase_shift);
210 page = -1; 211 page = -1;
211 break; 212 break;
@@ -233,6 +234,12 @@ static int onenand_command(struct mtd_info *mtd, int cmd, loff_t addr, size_t le
233 /* Write 'DFS, FBA' of Flash */ 234 /* Write 'DFS, FBA' of Flash */
234 value = onenand_block_address(this, block); 235 value = onenand_block_address(this, block);
235 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1); 236 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS1);
237
238 if (block_cmd) {
239 /* Select DataRAM for DDP */
240 value = onenand_bufferram_address(this, block);
241 this->write_word(value, this->base + ONENAND_REG_START_ADDRESS2);
242 }
236 } 243 }
237 244
238 if (page != -1) { 245 if (page != -1) {
@@ -301,6 +308,7 @@ static int onenand_wait(struct mtd_info *mtd, int state)
301 308
302 if (state != FL_READING) 309 if (state != FL_READING)
303 cond_resched(); 310 cond_resched();
311 touch_softlockup_watchdog();
304 } 312 }
305 /* To get correct interrupt status in timeout case */ 313 /* To get correct interrupt status in timeout case */
306 interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT); 314 interrupt = this->read_word(this->base + ONENAND_REG_INTERRUPT);
@@ -344,7 +352,7 @@ static inline int onenand_bufferram_offset(struct mtd_info *mtd, int area)
344 352
345 if (ONENAND_CURRENT_BUFFERRAM(this)) { 353 if (ONENAND_CURRENT_BUFFERRAM(this)) {
346 if (area == ONENAND_DATARAM) 354 if (area == ONENAND_DATARAM)
347 return mtd->oobblock; 355 return mtd->writesize;
348 if (area == ONENAND_SPARERAM) 356 if (area == ONENAND_SPARERAM)
349 return mtd->oobsize; 357 return mtd->oobsize;
350 } 358 }
@@ -372,6 +380,17 @@ static int onenand_read_bufferram(struct mtd_info *mtd, int area,
372 380
373 bufferram += onenand_bufferram_offset(mtd, area); 381 bufferram += onenand_bufferram_offset(mtd, area);
374 382
383 if (ONENAND_CHECK_BYTE_ACCESS(count)) {
384 unsigned short word;
385
386 /* Align with word(16-bit) size */
387 count--;
388
389 /* Read word and save byte */
390 word = this->read_word(bufferram + offset + count);
391 buffer[count] = (word & 0xff);
392 }
393
375 memcpy(buffer, bufferram + offset, count); 394 memcpy(buffer, bufferram + offset, count);
376 395
377 return 0; 396 return 0;
@@ -399,6 +418,17 @@ static int onenand_sync_read_bufferram(struct mtd_info *mtd, int area,
399 418
400 this->mmcontrol(mtd, ONENAND_SYS_CFG1_SYNC_READ); 419 this->mmcontrol(mtd, ONENAND_SYS_CFG1_SYNC_READ);
401 420
421 if (ONENAND_CHECK_BYTE_ACCESS(count)) {
422 unsigned short word;
423
424 /* Align with word(16-bit) size */
425 count--;
426
427 /* Read word and save byte */
428 word = this->read_word(bufferram + offset + count);
429 buffer[count] = (word & 0xff);
430 }
431
402 memcpy(buffer, bufferram + offset, count); 432 memcpy(buffer, bufferram + offset, count);
403 433
404 this->mmcontrol(mtd, 0); 434 this->mmcontrol(mtd, 0);
@@ -426,6 +456,22 @@ static int onenand_write_bufferram(struct mtd_info *mtd, int area,
426 456
427 bufferram += onenand_bufferram_offset(mtd, area); 457 bufferram += onenand_bufferram_offset(mtd, area);
428 458
459 if (ONENAND_CHECK_BYTE_ACCESS(count)) {
460 unsigned short word;
461 int byte_offset;
462
463 /* Align with word(16-bit) size */
464 count--;
465
466 /* Calculate byte access offset */
467 byte_offset = offset + count;
468
469 /* Read word and save byte */
470 word = this->read_word(bufferram + byte_offset);
471 word = (word & ~0xff) | buffer[count];
472 this->write_word(word, bufferram + byte_offset);
473 }
474
429 memcpy(bufferram + offset, buffer, count); 475 memcpy(bufferram + offset, buffer, count);
430 476
431 return 0; 477 return 0;
@@ -549,31 +595,28 @@ static void onenand_release_device(struct mtd_info *mtd)
549} 595}
550 596
551/** 597/**
552 * onenand_read_ecc - [MTD Interface] Read data with ECC 598 * onenand_read - [MTD Interface] Read data from flash
553 * @param mtd MTD device structure 599 * @param mtd MTD device structure
554 * @param from offset to read from 600 * @param from offset to read from
555 * @param len number of bytes to read 601 * @param len number of bytes to read
556 * @param retlen pointer to variable to store the number of read bytes 602 * @param retlen pointer to variable to store the number of read bytes
557 * @param buf the databuffer to put data 603 * @param buf the databuffer to put data
558 * @param oob_buf filesystem supplied oob data buffer
559 * @param oobsel oob selection structure
560 * 604 *
561 * OneNAND read with ECC 605 * Read with ecc
562 */ 606*/
563static int onenand_read_ecc(struct mtd_info *mtd, loff_t from, size_t len, 607static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
564 size_t *retlen, u_char *buf, 608 size_t *retlen, u_char *buf)
565 u_char *oob_buf, struct nand_oobinfo *oobsel)
566{ 609{
567 struct onenand_chip *this = mtd->priv; 610 struct onenand_chip *this = mtd->priv;
568 int read = 0, column; 611 int read = 0, column;
569 int thislen; 612 int thislen;
570 int ret = 0; 613 int ret = 0;
571 614
572 DEBUG(MTD_DEBUG_LEVEL3, "onenand_read_ecc: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len); 615 DEBUG(MTD_DEBUG_LEVEL3, "onenand_read: from = 0x%08x, len = %i\n", (unsigned int) from, (int) len);
573 616
574 /* Do not allow reads past end of device */ 617 /* Do not allow reads past end of device */
575 if ((from + len) > mtd->size) { 618 if ((from + len) > mtd->size) {
576 DEBUG(MTD_DEBUG_LEVEL0, "onenand_read_ecc: Attempt read beyond end of device\n"); 619 DEBUG(MTD_DEBUG_LEVEL0, "onenand_read: Attempt read beyond end of device\n");
577 *retlen = 0; 620 *retlen = 0;
578 return -EINVAL; 621 return -EINVAL;
579 } 622 }
@@ -584,14 +627,14 @@ static int onenand_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
584 /* TODO handling oob */ 627 /* TODO handling oob */
585 628
586 while (read < len) { 629 while (read < len) {
587 thislen = min_t(int, mtd->oobblock, len - read); 630 thislen = min_t(int, mtd->writesize, len - read);
588 631
589 column = from & (mtd->oobblock - 1); 632 column = from & (mtd->writesize - 1);
590 if (column + thislen > mtd->oobblock) 633 if (column + thislen > mtd->writesize)
591 thislen = mtd->oobblock - column; 634 thislen = mtd->writesize - column;
592 635
593 if (!onenand_check_bufferram(mtd, from)) { 636 if (!onenand_check_bufferram(mtd, from)) {
594 this->command(mtd, ONENAND_CMD_READ, from, mtd->oobblock); 637 this->command(mtd, ONENAND_CMD_READ, from, mtd->writesize);
595 638
596 ret = this->wait(mtd, FL_READING); 639 ret = this->wait(mtd, FL_READING);
597 /* First copy data and check return value for ECC handling */ 640 /* First copy data and check return value for ECC handling */
@@ -606,7 +649,7 @@ static int onenand_read_ecc(struct mtd_info *mtd, loff_t from, size_t len,
606 break; 649 break;
607 650
608 if (ret) { 651 if (ret) {
609 DEBUG(MTD_DEBUG_LEVEL0, "onenand_read_ecc: read failed = %d\n", ret); 652 DEBUG(MTD_DEBUG_LEVEL0, "onenand_read: read failed = %d\n", ret);
610 goto out; 653 goto out;
611 } 654 }
612 655
@@ -628,23 +671,7 @@ out:
628} 671}
629 672
630/** 673/**
631 * onenand_read - [MTD Interface] MTD compability function for onenand_read_ecc 674 * onenand_do_read_oob - [MTD Interface] OneNAND read out-of-band
632 * @param mtd MTD device structure
633 * @param from offset to read from
634 * @param len number of bytes to read
635 * @param retlen pointer to variable to store the number of read bytes
636 * @param buf the databuffer to put data
637 *
638 * This function simply calls onenand_read_ecc with oob buffer and oobsel = NULL
639*/
640static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
641 size_t *retlen, u_char *buf)
642{
643 return onenand_read_ecc(mtd, from, len, retlen, buf, NULL, NULL);
644}
645
646/**
647 * onenand_read_oob - [MTD Interface] OneNAND read out-of-band
648 * @param mtd MTD device structure 675 * @param mtd MTD device structure
649 * @param from offset to read from 676 * @param from offset to read from
650 * @param len number of bytes to read 677 * @param len number of bytes to read
@@ -653,8 +680,8 @@ static int onenand_read(struct mtd_info *mtd, loff_t from, size_t len,
653 * 680 *
654 * OneNAND read out-of-band data from the spare area 681 * OneNAND read out-of-band data from the spare area
655 */ 682 */
656static int onenand_read_oob(struct mtd_info *mtd, loff_t from, size_t len, 683int onenand_do_read_oob(struct mtd_info *mtd, loff_t from, size_t len,
657 size_t *retlen, u_char *buf) 684 size_t *retlen, u_char *buf)
658{ 685{
659 struct onenand_chip *this = mtd->priv; 686 struct onenand_chip *this = mtd->priv;
660 int read = 0, thislen, column; 687 int read = 0, thislen, column;
@@ -704,7 +731,7 @@ static int onenand_read_oob(struct mtd_info *mtd, loff_t from, size_t len,
704 /* Read more? */ 731 /* Read more? */
705 if (read < len) { 732 if (read < len) {
706 /* Page size */ 733 /* Page size */
707 from += mtd->oobblock; 734 from += mtd->writesize;
708 column = 0; 735 column = 0;
709 } 736 }
710 } 737 }
@@ -717,8 +744,53 @@ out:
717 return ret; 744 return ret;
718} 745}
719 746
747/**
748 * onenand_read_oob - [MTD Interface] NAND write data and/or out-of-band
749 * @mtd: MTD device structure
750 * @from: offset to read from
751 * @ops: oob operation description structure
752 */
753static int onenand_read_oob(struct mtd_info *mtd, loff_t from,
754 struct mtd_oob_ops *ops)
755{
756 BUG_ON(ops->mode != MTD_OOB_PLACE);
757
758 return onenand_do_read_oob(mtd, from + ops->ooboffs, ops->len,
759 &ops->retlen, ops->oobbuf);
760}
761
720#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE 762#ifdef CONFIG_MTD_ONENAND_VERIFY_WRITE
721/** 763/**
764 * onenand_verify_oob - [GENERIC] verify the oob contents after a write
765 * @param mtd MTD device structure
766 * @param buf the databuffer to verify
767 * @param to offset to read from
768 * @param len number of bytes to read and compare
769 *
770 */
771static int onenand_verify_oob(struct mtd_info *mtd, const u_char *buf, loff_t to, int len)
772{
773 struct onenand_chip *this = mtd->priv;
774 char *readp = this->page_buf;
775 int column = to & (mtd->oobsize - 1);
776 int status, i;
777
778 this->command(mtd, ONENAND_CMD_READOOB, to, mtd->oobsize);
779 onenand_update_bufferram(mtd, to, 0);
780 status = this->wait(mtd, FL_READING);
781 if (status)
782 return status;
783
784 this->read_bufferram(mtd, ONENAND_SPARERAM, readp, column, len);
785
786 for(i = 0; i < len; i++)
787 if (buf[i] != 0xFF && buf[i] != readp[i])
788 return -EBADMSG;
789
790 return 0;
791}
792
793/**
722 * onenand_verify_page - [GENERIC] verify the chip contents after a write 794 * onenand_verify_page - [GENERIC] verify the chip contents after a write
723 * @param mtd MTD device structure 795 * @param mtd MTD device structure
724 * @param buf the databuffer to verify 796 * @param buf the databuffer to verify
@@ -731,7 +803,7 @@ static int onenand_verify_page(struct mtd_info *mtd, u_char *buf, loff_t addr)
731 void __iomem *dataram0, *dataram1; 803 void __iomem *dataram0, *dataram1;
732 int ret = 0; 804 int ret = 0;
733 805
734 this->command(mtd, ONENAND_CMD_READ, addr, mtd->oobblock); 806 this->command(mtd, ONENAND_CMD_READ, addr, mtd->writesize);
735 807
736 ret = this->wait(mtd, FL_READING); 808 ret = this->wait(mtd, FL_READING);
737 if (ret) 809 if (ret)
@@ -741,53 +813,51 @@ static int onenand_verify_page(struct mtd_info *mtd, u_char *buf, loff_t addr)
741 813
742 /* Check, if the two dataram areas are same */ 814 /* Check, if the two dataram areas are same */
743 dataram0 = this->base + ONENAND_DATARAM; 815 dataram0 = this->base + ONENAND_DATARAM;
744 dataram1 = dataram0 + mtd->oobblock; 816 dataram1 = dataram0 + mtd->writesize;
745 817
746 if (memcmp(dataram0, dataram1, mtd->oobblock)) 818 if (memcmp(dataram0, dataram1, mtd->writesize))
747 return -EBADMSG; 819 return -EBADMSG;
748 820
749 return 0; 821 return 0;
750} 822}
751#else 823#else
752#define onenand_verify_page(...) (0) 824#define onenand_verify_page(...) (0)
825#define onenand_verify_oob(...) (0)
753#endif 826#endif
754 827
755#define NOTALIGNED(x) ((x & (mtd->oobblock - 1)) != 0) 828#define NOTALIGNED(x) ((x & (mtd->writesize - 1)) != 0)
756 829
757/** 830/**
758 * onenand_write_ecc - [MTD Interface] OneNAND write with ECC 831 * onenand_write - [MTD Interface] write buffer to FLASH
759 * @param mtd MTD device structure 832 * @param mtd MTD device structure
760 * @param to offset to write to 833 * @param to offset to write to
761 * @param len number of bytes to write 834 * @param len number of bytes to write
762 * @param retlen pointer to variable to store the number of written bytes 835 * @param retlen pointer to variable to store the number of written bytes
763 * @param buf the data to write 836 * @param buf the data to write
764 * @param eccbuf filesystem supplied oob data buffer
765 * @param oobsel oob selection structure
766 * 837 *
767 * OneNAND write with ECC 838 * Write with ECC
768 */ 839 */
769static int onenand_write_ecc(struct mtd_info *mtd, loff_t to, size_t len, 840static int onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
770 size_t *retlen, const u_char *buf, 841 size_t *retlen, const u_char *buf)
771 u_char *eccbuf, struct nand_oobinfo *oobsel)
772{ 842{
773 struct onenand_chip *this = mtd->priv; 843 struct onenand_chip *this = mtd->priv;
774 int written = 0; 844 int written = 0;
775 int ret = 0; 845 int ret = 0;
776 846
777 DEBUG(MTD_DEBUG_LEVEL3, "onenand_write_ecc: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len); 847 DEBUG(MTD_DEBUG_LEVEL3, "onenand_write: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len);
778 848
779 /* Initialize retlen, in case of early exit */ 849 /* Initialize retlen, in case of early exit */
780 *retlen = 0; 850 *retlen = 0;
781 851
782 /* Do not allow writes past end of device */ 852 /* Do not allow writes past end of device */
783 if (unlikely((to + len) > mtd->size)) { 853 if (unlikely((to + len) > mtd->size)) {
784 DEBUG(MTD_DEBUG_LEVEL0, "onenand_write_ecc: Attempt write to past end of device\n"); 854 DEBUG(MTD_DEBUG_LEVEL0, "onenand_write: Attempt write to past end of device\n");
785 return -EINVAL; 855 return -EINVAL;
786 } 856 }
787 857
788 /* Reject writes, which are not page aligned */ 858 /* Reject writes, which are not page aligned */
789 if (unlikely(NOTALIGNED(to)) || unlikely(NOTALIGNED(len))) { 859 if (unlikely(NOTALIGNED(to)) || unlikely(NOTALIGNED(len))) {
790 DEBUG(MTD_DEBUG_LEVEL0, "onenand_write_ecc: Attempt to write not page aligned data\n"); 860 DEBUG(MTD_DEBUG_LEVEL0, "onenand_write: Attempt to write not page aligned data\n");
791 return -EINVAL; 861 return -EINVAL;
792 } 862 }
793 863
@@ -796,20 +866,20 @@ static int onenand_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
796 866
797 /* Loop until all data write */ 867 /* Loop until all data write */
798 while (written < len) { 868 while (written < len) {
799 int thislen = min_t(int, mtd->oobblock, len - written); 869 int thislen = min_t(int, mtd->writesize, len - written);
800 870
801 this->command(mtd, ONENAND_CMD_BUFFERRAM, to, mtd->oobblock); 871 this->command(mtd, ONENAND_CMD_BUFFERRAM, to, mtd->writesize);
802 872
803 this->write_bufferram(mtd, ONENAND_DATARAM, buf, 0, thislen); 873 this->write_bufferram(mtd, ONENAND_DATARAM, buf, 0, thislen);
804 this->write_bufferram(mtd, ONENAND_SPARERAM, ffchars, 0, mtd->oobsize); 874 this->write_bufferram(mtd, ONENAND_SPARERAM, ffchars, 0, mtd->oobsize);
805 875
806 this->command(mtd, ONENAND_CMD_PROG, to, mtd->oobblock); 876 this->command(mtd, ONENAND_CMD_PROG, to, mtd->writesize);
807 877
808 onenand_update_bufferram(mtd, to, 1); 878 onenand_update_bufferram(mtd, to, 1);
809 879
810 ret = this->wait(mtd, FL_WRITING); 880 ret = this->wait(mtd, FL_WRITING);
811 if (ret) { 881 if (ret) {
812 DEBUG(MTD_DEBUG_LEVEL0, "onenand_write_ecc: write filaed %d\n", ret); 882 DEBUG(MTD_DEBUG_LEVEL0, "onenand_write: write filaed %d\n", ret);
813 goto out; 883 goto out;
814 } 884 }
815 885
@@ -818,7 +888,7 @@ static int onenand_write_ecc(struct mtd_info *mtd, loff_t to, size_t len,
818 /* Only check verify write turn on */ 888 /* Only check verify write turn on */
819 ret = onenand_verify_page(mtd, (u_char *) buf, to); 889 ret = onenand_verify_page(mtd, (u_char *) buf, to);
820 if (ret) { 890 if (ret) {
821 DEBUG(MTD_DEBUG_LEVEL0, "onenand_write_ecc: verify failed %d\n", ret); 891 DEBUG(MTD_DEBUG_LEVEL0, "onenand_write: verify failed %d\n", ret);
822 goto out; 892 goto out;
823 } 893 }
824 894
@@ -839,24 +909,7 @@ out:
839} 909}
840 910
841/** 911/**
842 * onenand_write - [MTD Interface] compability function for onenand_write_ecc 912 * onenand_do_write_oob - [Internal] OneNAND write out-of-band
843 * @param mtd MTD device structure
844 * @param to offset to write to
845 * @param len number of bytes to write
846 * @param retlen pointer to variable to store the number of written bytes
847 * @param buf the data to write
848 *
849 * This function simply calls onenand_write_ecc
850 * with oob buffer and oobsel = NULL
851 */
852static int onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
853 size_t *retlen, const u_char *buf)
854{
855 return onenand_write_ecc(mtd, to, len, retlen, buf, NULL, NULL);
856}
857
858/**
859 * onenand_write_oob - [MTD Interface] OneNAND write out-of-band
860 * @param mtd MTD device structure 913 * @param mtd MTD device structure
861 * @param to offset to write to 914 * @param to offset to write to
862 * @param len number of bytes to write 915 * @param len number of bytes to write
@@ -865,11 +918,11 @@ static int onenand_write(struct mtd_info *mtd, loff_t to, size_t len,
865 * 918 *
866 * OneNAND write out-of-band 919 * OneNAND write out-of-band
867 */ 920 */
868static int onenand_write_oob(struct mtd_info *mtd, loff_t to, size_t len, 921static int onenand_do_write_oob(struct mtd_info *mtd, loff_t to, size_t len,
869 size_t *retlen, const u_char *buf) 922 size_t *retlen, const u_char *buf)
870{ 923{
871 struct onenand_chip *this = mtd->priv; 924 struct onenand_chip *this = mtd->priv;
872 int column, status; 925 int column, ret = 0;
873 int written = 0; 926 int written = 0;
874 927
875 DEBUG(MTD_DEBUG_LEVEL3, "onenand_write_oob: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len); 928 DEBUG(MTD_DEBUG_LEVEL3, "onenand_write_oob: to = 0x%08x, len = %i\n", (unsigned int) to, (int) len);
@@ -894,16 +947,27 @@ static int onenand_write_oob(struct mtd_info *mtd, loff_t to, size_t len,
894 947
895 this->command(mtd, ONENAND_CMD_BUFFERRAM, to, mtd->oobsize); 948 this->command(mtd, ONENAND_CMD_BUFFERRAM, to, mtd->oobsize);
896 949
897 this->write_bufferram(mtd, ONENAND_SPARERAM, ffchars, 0, mtd->oobsize); 950 /* We send data to spare ram with oobsize
898 this->write_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen); 951 * to prevent byte access */
952 memset(this->page_buf, 0xff, mtd->oobsize);
953 memcpy(this->page_buf + column, buf, thislen);
954 this->write_bufferram(mtd, ONENAND_SPARERAM, this->page_buf, 0, mtd->oobsize);
899 955
900 this->command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize); 956 this->command(mtd, ONENAND_CMD_PROGOOB, to, mtd->oobsize);
901 957
902 onenand_update_bufferram(mtd, to, 0); 958 onenand_update_bufferram(mtd, to, 0);
903 959
904 status = this->wait(mtd, FL_WRITING); 960 ret = this->wait(mtd, FL_WRITING);
905 if (status) 961 if (ret) {
962 DEBUG(MTD_DEBUG_LEVEL0, "onenand_write_oob: write filaed %d\n", ret);
963 goto out;
964 }
965
966 ret = onenand_verify_oob(mtd, buf, to, thislen);
967 if (ret) {
968 DEBUG(MTD_DEBUG_LEVEL0, "onenand_write_oob: verify failed %d\n", ret);
906 goto out; 969 goto out;
970 }
907 971
908 written += thislen; 972 written += thislen;
909 973
@@ -920,145 +984,22 @@ out:
920 984
921 *retlen = written; 985 *retlen = written;
922 986
923 return 0; 987 return ret;
924} 988}
925 989
926/** 990/**
927 * onenand_writev_ecc - [MTD Interface] write with iovec with ecc 991 * onenand_write_oob - [MTD Interface] NAND write data and/or out-of-band
928 * @param mtd MTD device structure 992 * @mtd: MTD device structure
929 * @param vecs the iovectors to write 993 * @from: offset to read from
930 * @param count number of vectors 994 * @ops: oob operation description structure
931 * @param to offset to write to
932 * @param retlen pointer to variable to store the number of written bytes
933 * @param eccbuf filesystem supplied oob data buffer
934 * @param oobsel oob selection structure
935 *
936 * OneNAND write with iovec with ecc
937 */ 995 */
938static int onenand_writev_ecc(struct mtd_info *mtd, const struct kvec *vecs, 996static int onenand_write_oob(struct mtd_info *mtd, loff_t to,
939 unsigned long count, loff_t to, size_t *retlen, 997 struct mtd_oob_ops *ops)
940 u_char *eccbuf, struct nand_oobinfo *oobsel)
941{ 998{
942 struct onenand_chip *this = mtd->priv; 999 BUG_ON(ops->mode != MTD_OOB_PLACE);
943 unsigned char *pbuf;
944 size_t total_len, len;
945 int i, written = 0;
946 int ret = 0;
947
948 /* Preset written len for early exit */
949 *retlen = 0;
950
951 /* Calculate total length of data */
952 total_len = 0;
953 for (i = 0; i < count; i++)
954 total_len += vecs[i].iov_len;
955
956 DEBUG(MTD_DEBUG_LEVEL3, "onenand_writev_ecc: to = 0x%08x, len = %i, count = %ld\n", (unsigned int) to, (unsigned int) total_len, count);
957
958 /* Do not allow write past end of the device */
959 if (unlikely((to + total_len) > mtd->size)) {
960 DEBUG(MTD_DEBUG_LEVEL0, "onenand_writev_ecc: Attempted write past end of device\n");
961 return -EINVAL;
962 }
963
964 /* Reject writes, which are not page aligned */
965 if (unlikely(NOTALIGNED(to)) || unlikely(NOTALIGNED(total_len))) {
966 DEBUG(MTD_DEBUG_LEVEL0, "onenand_writev_ecc: Attempt to write not page aligned data\n");
967 return -EINVAL;
968 }
969
970 /* Grab the lock and see if the device is available */
971 onenand_get_device(mtd, FL_WRITING);
972
973 /* TODO handling oob */
974
975 /* Loop until all keve's data has been written */
976 len = 0;
977 while (count) {
978 pbuf = this->page_buf;
979 /*
980 * If the given tuple is >= pagesize then
981 * write it out from the iov
982 */
983 if ((vecs->iov_len - len) >= mtd->oobblock) {
984 pbuf = vecs->iov_base + len;
985
986 len += mtd->oobblock;
987
988 /* Check, if we have to switch to the next tuple */
989 if (len >= (int) vecs->iov_len) {
990 vecs++;
991 len = 0;
992 count--;
993 }
994 } else {
995 int cnt = 0, thislen;
996 while (cnt < mtd->oobblock) {
997 thislen = min_t(int, mtd->oobblock - cnt, vecs->iov_len - len);
998 memcpy(this->page_buf + cnt, vecs->iov_base + len, thislen);
999 cnt += thislen;
1000 len += thislen;
1001
1002 /* Check, if we have to switch to the next tuple */
1003 if (len >= (int) vecs->iov_len) {
1004 vecs++;
1005 len = 0;
1006 count--;
1007 }
1008 }
1009 }
1010
1011 this->command(mtd, ONENAND_CMD_BUFFERRAM, to, mtd->oobblock);
1012 1000
1013 this->write_bufferram(mtd, ONENAND_DATARAM, pbuf, 0, mtd->oobblock); 1001 return onenand_do_write_oob(mtd, to + ops->ooboffs, ops->len,
1014 this->write_bufferram(mtd, ONENAND_SPARERAM, ffchars, 0, mtd->oobsize); 1002 &ops->retlen, ops->oobbuf);
1015
1016 this->command(mtd, ONENAND_CMD_PROG, to, mtd->oobblock);
1017
1018 onenand_update_bufferram(mtd, to, 1);
1019
1020 ret = this->wait(mtd, FL_WRITING);
1021 if (ret) {
1022 DEBUG(MTD_DEBUG_LEVEL0, "onenand_writev_ecc: write failed %d\n", ret);
1023 goto out;
1024 }
1025
1026
1027 /* Only check verify write turn on */
1028 ret = onenand_verify_page(mtd, (u_char *) pbuf, to);
1029 if (ret) {
1030 DEBUG(MTD_DEBUG_LEVEL0, "onenand_writev_ecc: verify failed %d\n", ret);
1031 goto out;
1032 }
1033
1034 written += mtd->oobblock;
1035
1036 to += mtd->oobblock;
1037 }
1038
1039out:
1040 /* Deselect and wakt up anyone waiting on the device */
1041 onenand_release_device(mtd);
1042
1043 *retlen = written;
1044
1045 return 0;
1046}
1047
1048/**
1049 * onenand_writev - [MTD Interface] compabilty function for onenand_writev_ecc
1050 * @param mtd MTD device structure
1051 * @param vecs the iovectors to write
1052 * @param count number of vectors
1053 * @param to offset to write to
1054 * @param retlen pointer to variable to store the number of written bytes
1055 *
1056 * OneNAND write with kvec. This just calls the ecc function
1057 */
1058static int onenand_writev(struct mtd_info *mtd, const struct kvec *vecs,
1059 unsigned long count, loff_t to, size_t *retlen)
1060{
1061 return onenand_writev_ecc(mtd, vecs, count, to, retlen, NULL, NULL);
1062} 1003}
1063 1004
1064/** 1005/**
@@ -1227,7 +1168,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
1227 1168
1228 /* We write two bytes, so we dont have to mess with 16 bit access */ 1169 /* We write two bytes, so we dont have to mess with 16 bit access */
1229 ofs += mtd->oobsize + (bbm->badblockpos & ~0x01); 1170 ofs += mtd->oobsize + (bbm->badblockpos & ~0x01);
1230 return mtd->write_oob(mtd, ofs , 2, &retlen, buf); 1171 return onenand_do_write_oob(mtd, ofs , 2, &retlen, buf);
1231} 1172}
1232 1173
1233/** 1174/**
@@ -1324,6 +1265,304 @@ static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1324 return 0; 1265 return 0;
1325} 1266}
1326 1267
1268#ifdef CONFIG_MTD_ONENAND_OTP
1269
1270/* Interal OTP operation */
1271typedef int (*otp_op_t)(struct mtd_info *mtd, loff_t form, size_t len,
1272 size_t *retlen, u_char *buf);
1273
1274/**
1275 * do_otp_read - [DEFAULT] Read OTP block area
1276 * @param mtd MTD device structure
1277 * @param from The offset to read
1278 * @param len number of bytes to read
1279 * @param retlen pointer to variable to store the number of readbytes
1280 * @param buf the databuffer to put/get data
1281 *
1282 * Read OTP block area.
1283 */
1284static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len,
1285 size_t *retlen, u_char *buf)
1286{
1287 struct onenand_chip *this = mtd->priv;
1288 int ret;
1289
1290 /* Enter OTP access mode */
1291 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
1292 this->wait(mtd, FL_OTPING);
1293
1294 ret = mtd->read(mtd, from, len, retlen, buf);
1295
1296 /* Exit OTP access mode */
1297 this->command(mtd, ONENAND_CMD_RESET, 0, 0);
1298 this->wait(mtd, FL_RESETING);
1299
1300 return ret;
1301}
1302
1303/**
1304 * do_otp_write - [DEFAULT] Write OTP block area
1305 * @param mtd MTD device structure
1306 * @param from The offset to write
1307 * @param len number of bytes to write
1308 * @param retlen pointer to variable to store the number of write bytes
1309 * @param buf the databuffer to put/get data
1310 *
1311 * Write OTP block area.
1312 */
1313static int do_otp_write(struct mtd_info *mtd, loff_t from, size_t len,
1314 size_t *retlen, u_char *buf)
1315{
1316 struct onenand_chip *this = mtd->priv;
1317 unsigned char *pbuf = buf;
1318 int ret;
1319
1320 /* Force buffer page aligned */
1321 if (len < mtd->writesize) {
1322 memcpy(this->page_buf, buf, len);
1323 memset(this->page_buf + len, 0xff, mtd->writesize - len);
1324 pbuf = this->page_buf;
1325 len = mtd->writesize;
1326 }
1327
1328 /* Enter OTP access mode */
1329 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
1330 this->wait(mtd, FL_OTPING);
1331
1332 ret = mtd->write(mtd, from, len, retlen, pbuf);
1333
1334 /* Exit OTP access mode */
1335 this->command(mtd, ONENAND_CMD_RESET, 0, 0);
1336 this->wait(mtd, FL_RESETING);
1337
1338 return ret;
1339}
1340
1341/**
1342 * do_otp_lock - [DEFAULT] Lock OTP block area
1343 * @param mtd MTD device structure
1344 * @param from The offset to lock
1345 * @param len number of bytes to lock
1346 * @param retlen pointer to variable to store the number of lock bytes
1347 * @param buf the databuffer to put/get data
1348 *
1349 * Lock OTP block area.
1350 */
1351static int do_otp_lock(struct mtd_info *mtd, loff_t from, size_t len,
1352 size_t *retlen, u_char *buf)
1353{
1354 struct onenand_chip *this = mtd->priv;
1355 int ret;
1356
1357 /* Enter OTP access mode */
1358 this->command(mtd, ONENAND_CMD_OTP_ACCESS, 0, 0);
1359 this->wait(mtd, FL_OTPING);
1360
1361 ret = onenand_do_write_oob(mtd, from, len, retlen, buf);
1362
1363 /* Exit OTP access mode */
1364 this->command(mtd, ONENAND_CMD_RESET, 0, 0);
1365 this->wait(mtd, FL_RESETING);
1366
1367 return ret;
1368}
1369
1370/**
1371 * onenand_otp_walk - [DEFAULT] Handle OTP operation
1372 * @param mtd MTD device structure
1373 * @param from The offset to read/write
1374 * @param len number of bytes to read/write
1375 * @param retlen pointer to variable to store the number of read bytes
1376 * @param buf the databuffer to put/get data
1377 * @param action do given action
1378 * @param mode specify user and factory
1379 *
1380 * Handle OTP operation.
1381 */
1382static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
1383 size_t *retlen, u_char *buf,
1384 otp_op_t action, int mode)
1385{
1386 struct onenand_chip *this = mtd->priv;
1387 int otp_pages;
1388 int density;
1389 int ret = 0;
1390
1391 *retlen = 0;
1392
1393 density = this->device_id >> ONENAND_DEVICE_DENSITY_SHIFT;
1394 if (density < ONENAND_DEVICE_DENSITY_512Mb)
1395 otp_pages = 20;
1396 else
1397 otp_pages = 10;
1398
1399 if (mode == MTD_OTP_FACTORY) {
1400 from += mtd->writesize * otp_pages;
1401 otp_pages = 64 - otp_pages;
1402 }
1403
1404 /* Check User/Factory boundary */
1405 if (((mtd->writesize * otp_pages) - (from + len)) < 0)
1406 return 0;
1407
1408 while (len > 0 && otp_pages > 0) {
1409 if (!action) { /* OTP Info functions */
1410 struct otp_info *otpinfo;
1411
1412 len -= sizeof(struct otp_info);
1413 if (len <= 0)
1414 return -ENOSPC;
1415
1416 otpinfo = (struct otp_info *) buf;
1417 otpinfo->start = from;
1418 otpinfo->length = mtd->writesize;
1419 otpinfo->locked = 0;
1420
1421 from += mtd->writesize;
1422 buf += sizeof(struct otp_info);
1423 *retlen += sizeof(struct otp_info);
1424 } else {
1425 size_t tmp_retlen;
1426 int size = len;
1427
1428 ret = action(mtd, from, len, &tmp_retlen, buf);
1429
1430 buf += size;
1431 len -= size;
1432 *retlen += size;
1433
1434 if (ret < 0)
1435 return ret;
1436 }
1437 otp_pages--;
1438 }
1439
1440 return 0;
1441}
1442
1443/**
1444 * onenand_get_fact_prot_info - [MTD Interface] Read factory OTP info
1445 * @param mtd MTD device structure
1446 * @param buf the databuffer to put/get data
1447 * @param len number of bytes to read
1448 *
1449 * Read factory OTP info.
1450 */
1451static int onenand_get_fact_prot_info(struct mtd_info *mtd,
1452 struct otp_info *buf, size_t len)
1453{
1454 size_t retlen;
1455 int ret;
1456
1457 ret = onenand_otp_walk(mtd, 0, len, &retlen, (u_char *) buf, NULL, MTD_OTP_FACTORY);
1458
1459 return ret ? : retlen;
1460}
1461
1462/**
1463 * onenand_read_fact_prot_reg - [MTD Interface] Read factory OTP area
1464 * @param mtd MTD device structure
1465 * @param from The offset to read
1466 * @param len number of bytes to read
1467 * @param retlen pointer to variable to store the number of read bytes
1468 * @param buf the databuffer to put/get data
1469 *
1470 * Read factory OTP area.
1471 */
1472static int onenand_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
1473 size_t len, size_t *retlen, u_char *buf)
1474{
1475 return onenand_otp_walk(mtd, from, len, retlen, buf, do_otp_read, MTD_OTP_FACTORY);
1476}
1477
1478/**
1479 * onenand_get_user_prot_info - [MTD Interface] Read user OTP info
1480 * @param mtd MTD device structure
1481 * @param buf the databuffer to put/get data
1482 * @param len number of bytes to read
1483 *
1484 * Read user OTP info.
1485 */
1486static int onenand_get_user_prot_info(struct mtd_info *mtd,
1487 struct otp_info *buf, size_t len)
1488{
1489 size_t retlen;
1490 int ret;
1491
1492 ret = onenand_otp_walk(mtd, 0, len, &retlen, (u_char *) buf, NULL, MTD_OTP_USER);
1493
1494 return ret ? : retlen;
1495}
1496
1497/**
1498 * onenand_read_user_prot_reg - [MTD Interface] Read user OTP area
1499 * @param mtd MTD device structure
1500 * @param from The offset to read
1501 * @param len number of bytes to read
1502 * @param retlen pointer to variable to store the number of read bytes
1503 * @param buf the databuffer to put/get data
1504 *
1505 * Read user OTP area.
1506 */
1507static int onenand_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
1508 size_t len, size_t *retlen, u_char *buf)
1509{
1510 return onenand_otp_walk(mtd, from, len, retlen, buf, do_otp_read, MTD_OTP_USER);
1511}
1512
1513/**
1514 * onenand_write_user_prot_reg - [MTD Interface] Write user OTP area
1515 * @param mtd MTD device structure
1516 * @param from The offset to write
1517 * @param len number of bytes to write
1518 * @param retlen pointer to variable to store the number of write bytes
1519 * @param buf the databuffer to put/get data
1520 *
1521 * Write user OTP area.
1522 */
1523static int onenand_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
1524 size_t len, size_t *retlen, u_char *buf)
1525{
1526 return onenand_otp_walk(mtd, from, len, retlen, buf, do_otp_write, MTD_OTP_USER);
1527}
1528
1529/**
1530 * onenand_lock_user_prot_reg - [MTD Interface] Lock user OTP area
1531 * @param mtd MTD device structure
1532 * @param from The offset to lock
1533 * @param len number of bytes to unlock
1534 *
1535 * Write lock mark on spare area in page 0 in OTP block
1536 */
1537static int onenand_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
1538 size_t len)
1539{
1540 unsigned char oob_buf[64];
1541 size_t retlen;
1542 int ret;
1543
1544 memset(oob_buf, 0xff, mtd->oobsize);
1545 /*
1546 * Note: OTP lock operation
1547 * OTP block : 0xXXFC
1548 * 1st block : 0xXXF3 (If chip support)
1549 * Both : 0xXXF0 (If chip support)
1550 */
1551 oob_buf[ONENAND_OTP_LOCK_OFFSET] = 0xFC;
1552
1553 /*
1554 * Write lock mark to 8th word of sector0 of page0 of the spare0.
1555 * We write 16 bytes spare area instead of 2 bytes.
1556 */
1557 from = 0;
1558 len = 16;
1559
1560 ret = onenand_otp_walk(mtd, from, len, &retlen, oob_buf, do_otp_lock, MTD_OTP_USER);
1561
1562 return ret ? : retlen;
1563}
1564#endif /* CONFIG_MTD_ONENAND_OTP */
1565
1327/** 1566/**
1328 * onenand_print_device_info - Print device ID 1567 * onenand_print_device_info - Print device ID
1329 * @param device device ID 1568 * @param device device ID
@@ -1423,15 +1662,15 @@ static int onenand_probe(struct mtd_info *mtd)
1423 1662
1424 /* OneNAND page size & block size */ 1663 /* OneNAND page size & block size */
1425 /* The data buffer size is equal to page size */ 1664 /* The data buffer size is equal to page size */
1426 mtd->oobblock = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE); 1665 mtd->writesize = this->read_word(this->base + ONENAND_REG_DATA_BUFFER_SIZE);
1427 mtd->oobsize = mtd->oobblock >> 5; 1666 mtd->oobsize = mtd->writesize >> 5;
1428 /* Pagers per block is always 64 in OneNAND */ 1667 /* Pagers per block is always 64 in OneNAND */
1429 mtd->erasesize = mtd->oobblock << 6; 1668 mtd->erasesize = mtd->writesize << 6;
1430 1669
1431 this->erase_shift = ffs(mtd->erasesize) - 1; 1670 this->erase_shift = ffs(mtd->erasesize) - 1;
1432 this->page_shift = ffs(mtd->oobblock) - 1; 1671 this->page_shift = ffs(mtd->writesize) - 1;
1433 this->ppb_shift = (this->erase_shift - this->page_shift); 1672 this->ppb_shift = (this->erase_shift - this->page_shift);
1434 this->page_mask = (mtd->erasesize / mtd->oobblock) - 1; 1673 this->page_mask = (mtd->erasesize / mtd->writesize) - 1;
1435 1674
1436 /* REVIST: Multichip handling */ 1675 /* REVIST: Multichip handling */
1437 1676
@@ -1475,7 +1714,6 @@ static void onenand_resume(struct mtd_info *mtd)
1475 "in suspended state\n"); 1714 "in suspended state\n");
1476} 1715}
1477 1716
1478
1479/** 1717/**
1480 * onenand_scan - [OneNAND Interface] Scan for the OneNAND device 1718 * onenand_scan - [OneNAND Interface] Scan for the OneNAND device
1481 * @param mtd MTD device structure 1719 * @param mtd MTD device structure
@@ -1522,7 +1760,7 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
1522 /* Allocate buffers, if necessary */ 1760 /* Allocate buffers, if necessary */
1523 if (!this->page_buf) { 1761 if (!this->page_buf) {
1524 size_t len; 1762 size_t len;
1525 len = mtd->oobblock + mtd->oobsize; 1763 len = mtd->writesize + mtd->oobsize;
1526 this->page_buf = kmalloc(len, GFP_KERNEL); 1764 this->page_buf = kmalloc(len, GFP_KERNEL);
1527 if (!this->page_buf) { 1765 if (!this->page_buf) {
1528 printk(KERN_ERR "onenand_scan(): Can't allocate page_buf\n"); 1766 printk(KERN_ERR "onenand_scan(): Can't allocate page_buf\n");
@@ -1537,40 +1775,42 @@ int onenand_scan(struct mtd_info *mtd, int maxchips)
1537 1775
1538 switch (mtd->oobsize) { 1776 switch (mtd->oobsize) {
1539 case 64: 1777 case 64:
1540 this->autooob = &onenand_oob_64; 1778 this->ecclayout = &onenand_oob_64;
1541 break; 1779 break;
1542 1780
1543 case 32: 1781 case 32:
1544 this->autooob = &onenand_oob_32; 1782 this->ecclayout = &onenand_oob_32;
1545 break; 1783 break;
1546 1784
1547 default: 1785 default:
1548 printk(KERN_WARNING "No OOB scheme defined for oobsize %d\n", 1786 printk(KERN_WARNING "No OOB scheme defined for oobsize %d\n",
1549 mtd->oobsize); 1787 mtd->oobsize);
1550 /* To prevent kernel oops */ 1788 /* To prevent kernel oops */
1551 this->autooob = &onenand_oob_32; 1789 this->ecclayout = &onenand_oob_32;
1552 break; 1790 break;
1553 } 1791 }
1554 1792
1555 memcpy(&mtd->oobinfo, this->autooob, sizeof(mtd->oobinfo)); 1793 mtd->ecclayout = this->ecclayout;
1556 1794
1557 /* Fill in remaining MTD driver data */ 1795 /* Fill in remaining MTD driver data */
1558 mtd->type = MTD_NANDFLASH; 1796 mtd->type = MTD_NANDFLASH;
1559 mtd->flags = MTD_CAP_NANDFLASH | MTD_ECC; 1797 mtd->flags = MTD_CAP_NANDFLASH;
1560 mtd->ecctype = MTD_ECC_SW; 1798 mtd->ecctype = MTD_ECC_SW;
1561 mtd->erase = onenand_erase; 1799 mtd->erase = onenand_erase;
1562 mtd->point = NULL; 1800 mtd->point = NULL;
1563 mtd->unpoint = NULL; 1801 mtd->unpoint = NULL;
1564 mtd->read = onenand_read; 1802 mtd->read = onenand_read;
1565 mtd->write = onenand_write; 1803 mtd->write = onenand_write;
1566 mtd->read_ecc = onenand_read_ecc;
1567 mtd->write_ecc = onenand_write_ecc;
1568 mtd->read_oob = onenand_read_oob; 1804 mtd->read_oob = onenand_read_oob;
1569 mtd->write_oob = onenand_write_oob; 1805 mtd->write_oob = onenand_write_oob;
1570 mtd->readv = NULL; 1806#ifdef CONFIG_MTD_ONENAND_OTP
1571 mtd->readv_ecc = NULL; 1807 mtd->get_fact_prot_info = onenand_get_fact_prot_info;
1572 mtd->writev = onenand_writev; 1808 mtd->read_fact_prot_reg = onenand_read_fact_prot_reg;
1573 mtd->writev_ecc = onenand_writev_ecc; 1809 mtd->get_user_prot_info = onenand_get_user_prot_info;
1810 mtd->read_user_prot_reg = onenand_read_user_prot_reg;
1811 mtd->write_user_prot_reg = onenand_write_user_prot_reg;
1812 mtd->lock_user_prot_reg = onenand_lock_user_prot_reg;
1813#endif
1574 mtd->sync = onenand_sync; 1814 mtd->sync = onenand_sync;
1575 mtd->lock = NULL; 1815 mtd->lock = NULL;
1576 mtd->unlock = onenand_unlock; 1816 mtd->unlock = onenand_unlock;
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index 4510d3361eaa..1b00dac3d7d6 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -17,6 +17,9 @@
17#include <linux/mtd/onenand.h> 17#include <linux/mtd/onenand.h>
18#include <linux/mtd/compatmac.h> 18#include <linux/mtd/compatmac.h>
19 19
20extern int onenand_do_read_oob(struct mtd_info *mtd, loff_t from, size_t len,
21 size_t *retlen, u_char *buf);
22
20/** 23/**
21 * check_short_pattern - [GENERIC] check if a pattern is in the buffer 24 * check_short_pattern - [GENERIC] check if a pattern is in the buffer
22 * @param buf the buffer to search 25 * @param buf the buffer to search
@@ -87,13 +90,13 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
87 90
88 /* No need to read pages fully, 91 /* No need to read pages fully,
89 * just read required OOB bytes */ 92 * just read required OOB bytes */
90 ret = mtd->read_oob(mtd, from + j * mtd->oobblock + bd->offs, 93 ret = onenand_do_read_oob(mtd, from + j * mtd->writesize + bd->offs,
91 readlen, &retlen, &buf[0]); 94 readlen, &retlen, &buf[0]);
92 95
93 if (ret) 96 if (ret)
94 return ret; 97 return ret;
95 98
96 if (check_short_pattern(&buf[j * scanlen], scanlen, mtd->oobblock, bd)) { 99 if (check_short_pattern(&buf[j * scanlen], scanlen, mtd->writesize, bd)) {
97 bbm->bbt[i >> 3] |= 0x03 << (i & 0x6); 100 bbm->bbt[i >> 3] |= 0x03 << (i & 0x6);
98 printk(KERN_WARNING "Bad eraseblock %d at 0x%08x\n", 101 printk(KERN_WARNING "Bad eraseblock %d at 0x%08x\n",
99 i >> 1, (unsigned int) from); 102 i >> 1, (unsigned int) from);
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index c077d2ec9cdd..5b58523e4d4e 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * $Id: redboot.c,v 1.19 2005/12/01 10:03:51 dwmw2 Exp $ 2 * $Id: redboot.c,v 1.21 2006/03/30 18:34:37 bjd Exp $
3 * 3 *
4 * Parse RedBoot-style Flash Image System (FIS) tables and 4 * Parse RedBoot-style Flash Image System (FIS) tables and
5 * produce a Linux partition array to match. 5 * produce a Linux partition array to match.
@@ -15,14 +15,14 @@
15 15
16struct fis_image_desc { 16struct fis_image_desc {
17 unsigned char name[16]; // Null terminated name 17 unsigned char name[16]; // Null terminated name
18 unsigned long flash_base; // Address within FLASH of image 18 uint32_t flash_base; // Address within FLASH of image
19 unsigned long mem_base; // Address in memory where it executes 19 uint32_t mem_base; // Address in memory where it executes
20 unsigned long size; // Length of image 20 uint32_t size; // Length of image
21 unsigned long entry_point; // Execution entry point 21 uint32_t entry_point; // Execution entry point
22 unsigned long data_length; // Length of actual data 22 uint32_t data_length; // Length of actual data
23 unsigned char _pad[256-(16+7*sizeof(unsigned long))]; 23 unsigned char _pad[256-(16+7*sizeof(uint32_t))];
24 unsigned long desc_cksum; // Checksum over image descriptor 24 uint32_t desc_cksum; // Checksum over image descriptor
25 unsigned long file_cksum; // Checksum over image data 25 uint32_t file_cksum; // Checksum over image data
26}; 26};
27 27
28struct fis_list { 28struct fis_list {
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index a3e00a4635a5..fa4362fb4dd8 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2005 Sean Young <sean@mess.org> 4 * Copyright (C) 2005 Sean Young <sean@mess.org>
5 * 5 *
6 * $Id: rfd_ftl.c,v 1.5 2005/11/07 11:14:21 gleixner Exp $ 6 * $Id: rfd_ftl.c,v 1.8 2006/01/15 12:51:44 sean Exp $
7 * 7 *
8 * This type of flash translation layer (FTL) is used by the Embedded BIOS 8 * This type of flash translation layer (FTL) is used by the Embedded BIOS
9 * by General Software. It is known as the Resident Flash Disk (RFD), see: 9 * by General Software. It is known as the Resident Flash Disk (RFD), see:
@@ -61,6 +61,7 @@ struct block {
61 BLOCK_OK, 61 BLOCK_OK,
62 BLOCK_ERASING, 62 BLOCK_ERASING,
63 BLOCK_ERASED, 63 BLOCK_ERASED,
64 BLOCK_UNUSED,
64 BLOCK_FAILED 65 BLOCK_FAILED
65 } state; 66 } state;
66 int free_sectors; 67 int free_sectors;
@@ -99,10 +100,8 @@ static int build_block_map(struct partition *part, int block_no)
99 block->offset = part->block_size * block_no; 100 block->offset = part->block_size * block_no;
100 101
101 if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) { 102 if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
102 block->state = BLOCK_ERASED; /* assumption */ 103 block->state = BLOCK_UNUSED;
103 block->free_sectors = part->data_sectors_per_block; 104 return -ENOENT;
104 part->reserved_block = block_no;
105 return 1;
106 } 105 }
107 106
108 block->state = BLOCK_OK; 107 block->state = BLOCK_OK;
@@ -124,7 +123,7 @@ static int build_block_map(struct partition *part, int block_no)
124 entry = 0; 123 entry = 0;
125 124
126 if (entry >= part->sector_count) { 125 if (entry >= part->sector_count) {
127 printk(KERN_NOTICE PREFIX 126 printk(KERN_WARNING PREFIX
128 "'%s': unit #%d: entry %d corrupt, " 127 "'%s': unit #%d: entry %d corrupt, "
129 "sector %d out of range\n", 128 "sector %d out of range\n",
130 part->mbd.mtd->name, block_no, i, entry); 129 part->mbd.mtd->name, block_no, i, entry);
@@ -132,7 +131,7 @@ static int build_block_map(struct partition *part, int block_no)
132 } 131 }
133 132
134 if (part->sector_map[entry] != -1) { 133 if (part->sector_map[entry] != -1) {
135 printk(KERN_NOTICE PREFIX 134 printk(KERN_WARNING PREFIX
136 "'%s': more than one entry for sector %d\n", 135 "'%s': more than one entry for sector %d\n",
137 part->mbd.mtd->name, entry); 136 part->mbd.mtd->name, entry);
138 part->errors = 1; 137 part->errors = 1;
@@ -167,7 +166,7 @@ static int scan_header(struct partition *part)
167 /* each erase block has three bytes header, followed by the map */ 166 /* each erase block has three bytes header, followed by the map */
168 part->header_sectors_per_block = 167 part->header_sectors_per_block =
169 ((HEADER_MAP_OFFSET + sectors_per_block) * 168 ((HEADER_MAP_OFFSET + sectors_per_block) *
170 sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE; 169 sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
171 170
172 part->data_sectors_per_block = sectors_per_block - 171 part->data_sectors_per_block = sectors_per_block -
173 part->header_sectors_per_block; 172 part->header_sectors_per_block;
@@ -226,7 +225,7 @@ static int scan_header(struct partition *part)
226 } 225 }
227 226
228 if (part->reserved_block == -1) { 227 if (part->reserved_block == -1) {
229 printk(KERN_NOTICE PREFIX "'%s': no empty erase unit found\n", 228 printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
230 part->mbd.mtd->name); 229 part->mbd.mtd->name);
231 230
232 part->errors = 1; 231 part->errors = 1;
@@ -315,7 +314,7 @@ static void erase_callback(struct erase_info *erase)
315 rc = -EIO; 314 rc = -EIO;
316 315
317 if (rc) { 316 if (rc) {
318 printk(KERN_NOTICE PREFIX "'%s': unable to write RFD " 317 printk(KERN_ERR PREFIX "'%s': unable to write RFD "
319 "header at 0x%lx\n", 318 "header at 0x%lx\n",
320 part->mbd.mtd->name, 319 part->mbd.mtd->name,
321 part->blocks[i].offset); 320 part->blocks[i].offset);
@@ -348,7 +347,7 @@ static int erase_block(struct partition *part, int block)
348 rc = part->mbd.mtd->erase(part->mbd.mtd, erase); 347 rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
349 348
350 if (rc) { 349 if (rc) {
351 printk(KERN_WARNING PREFIX "erase of region %x,%x on '%s' " 350 printk(KERN_ERR PREFIX "erase of region %x,%x on '%s' "
352 "failed\n", erase->addr, erase->len, 351 "failed\n", erase->addr, erase->len,
353 part->mbd.mtd->name); 352 part->mbd.mtd->name);
354 kfree(erase); 353 kfree(erase);
@@ -383,7 +382,7 @@ static int move_block_contents(struct partition *part, int block_no, u_long *old
383 rc = -EIO; 382 rc = -EIO;
384 383
385 if (rc) { 384 if (rc) {
386 printk(KERN_NOTICE PREFIX "error reading '%s' at " 385 printk(KERN_ERR PREFIX "error reading '%s' at "
387 "0x%lx\n", part->mbd.mtd->name, 386 "0x%lx\n", part->mbd.mtd->name,
388 part->blocks[block_no].offset); 387 part->blocks[block_no].offset);
389 388
@@ -423,7 +422,7 @@ static int move_block_contents(struct partition *part, int block_no, u_long *old
423 rc = -EIO; 422 rc = -EIO;
424 423
425 if (rc) { 424 if (rc) {
426 printk(KERN_NOTICE PREFIX "'%s': Unable to " 425 printk(KERN_ERR PREFIX "'%s': Unable to "
427 "read sector for relocation\n", 426 "read sector for relocation\n",
428 part->mbd.mtd->name); 427 part->mbd.mtd->name);
429 428
@@ -520,7 +519,7 @@ static int reclaim_block(struct partition *part, u_long *old_sector)
520 * because if we fill that one up first it'll have the most chance of having 519 * because if we fill that one up first it'll have the most chance of having
521 * the least live sectors at reclaim. 520 * the least live sectors at reclaim.
522 */ 521 */
523static int find_free_block(const struct partition *part) 522static int find_free_block(struct partition *part)
524{ 523{
525 int block, stop; 524 int block, stop;
526 525
@@ -533,6 +532,9 @@ static int find_free_block(const struct partition *part)
533 block != part->reserved_block) 532 block != part->reserved_block)
534 return block; 533 return block;
535 534
535 if (part->blocks[block].state == BLOCK_UNUSED)
536 erase_block(part, block);
537
536 if (++block >= part->total_blocks) 538 if (++block >= part->total_blocks)
537 block = 0; 539 block = 0;
538 540
@@ -541,7 +543,7 @@ static int find_free_block(const struct partition *part)
541 return -1; 543 return -1;
542} 544}
543 545
544static int find_writeable_block(struct partition *part, u_long *old_sector) 546static int find_writable_block(struct partition *part, u_long *old_sector)
545{ 547{
546 int rc, block; 548 int rc, block;
547 size_t retlen; 549 size_t retlen;
@@ -570,7 +572,7 @@ static int find_writeable_block(struct partition *part, u_long *old_sector)
570 rc = -EIO; 572 rc = -EIO;
571 573
572 if (rc) { 574 if (rc) {
573 printk(KERN_NOTICE PREFIX "'%s': unable to read header at " 575 printk(KERN_ERR PREFIX "'%s': unable to read header at "
574 "0x%lx\n", part->mbd.mtd->name, 576 "0x%lx\n", part->mbd.mtd->name,
575 part->blocks[block].offset); 577 part->blocks[block].offset);
576 goto err; 578 goto err;
@@ -602,7 +604,7 @@ static int mark_sector_deleted(struct partition *part, u_long old_addr)
602 rc = -EIO; 604 rc = -EIO;
603 605
604 if (rc) { 606 if (rc) {
605 printk(KERN_WARNING PREFIX "error writing '%s' at " 607 printk(KERN_ERR PREFIX "error writing '%s' at "
606 "0x%lx\n", part->mbd.mtd->name, addr); 608 "0x%lx\n", part->mbd.mtd->name, addr);
607 if (rc) 609 if (rc)
608 goto err; 610 goto err;
@@ -652,7 +654,7 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,
652 if (part->current_block == -1 || 654 if (part->current_block == -1 ||
653 !part->blocks[part->current_block].free_sectors) { 655 !part->blocks[part->current_block].free_sectors) {
654 656
655 rc = find_writeable_block(part, old_addr); 657 rc = find_writable_block(part, old_addr);
656 if (rc) 658 if (rc)
657 goto err; 659 goto err;
658 } 660 }
@@ -675,7 +677,7 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,
675 rc = -EIO; 677 rc = -EIO;
676 678
677 if (rc) { 679 if (rc) {
678 printk(KERN_WARNING PREFIX "error writing '%s' at 0x%lx\n", 680 printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
679 part->mbd.mtd->name, addr); 681 part->mbd.mtd->name, addr);
680 if (rc) 682 if (rc)
681 goto err; 683 goto err;
@@ -695,7 +697,7 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,
695 rc = -EIO; 697 rc = -EIO;
696 698
697 if (rc) { 699 if (rc) {
698 printk(KERN_WARNING PREFIX "error writing '%s' at 0x%lx\n", 700 printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
699 part->mbd.mtd->name, addr); 701 part->mbd.mtd->name, addr);
700 if (rc) 702 if (rc)
701 goto err; 703 goto err;
@@ -776,7 +778,7 @@ static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
776 part->block_size = block_size; 778 part->block_size = block_size;
777 else { 779 else {
778 if (!mtd->erasesize) { 780 if (!mtd->erasesize) {
779 printk(KERN_NOTICE PREFIX "please provide block_size"); 781 printk(KERN_WARNING PREFIX "please provide block_size");
780 return; 782 return;
781 } 783 }
782 else 784 else
@@ -791,8 +793,8 @@ static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
791 if (!(mtd->flags & MTD_WRITEABLE)) 793 if (!(mtd->flags & MTD_WRITEABLE))
792 part->mbd.readonly = 1; 794 part->mbd.readonly = 1;
793 else if (part->errors) { 795 else if (part->errors) {
794 printk(KERN_NOTICE PREFIX "'%s': errors found, " 796 printk(KERN_WARNING PREFIX "'%s': errors found, "
795 "setting read-only", mtd->name); 797 "setting read-only\n", mtd->name);
796 part->mbd.readonly = 1; 798 part->mbd.readonly = 1;
797 } 799 }
798 800
diff --git a/fs/Kconfig b/fs/Kconfig
index f9b5842c8d2d..572cc435a1bb 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -1101,6 +1101,44 @@ config JFFS2_SUMMARY
1101 1101
1102 If unsure, say 'N'. 1102 If unsure, say 'N'.
1103 1103
1104config JFFS2_FS_XATTR
1105 bool "JFFS2 XATTR support (EXPERIMENTAL)"
1106 depends on JFFS2_FS && EXPERIMENTAL && !JFFS2_FS_WRITEBUFFER
1107 default n
1108 help
1109 Extended attributes are name:value pairs associated with inodes by
1110 the kernel or by users (see the attr(5) manual page, or visit
1111 <http://acl.bestbits.at/> for details).
1112
1113 If unsure, say N.
1114
1115config JFFS2_FS_POSIX_ACL
1116 bool "JFFS2 POSIX Access Control Lists"
1117 depends on JFFS2_FS_XATTR
1118 default y
1119 select FS_POSIX_ACL
1120 help
1121 Posix Access Control Lists (ACLs) support permissions for users and
1122 groups beyond the owner/group/world scheme.
1123
1124 To learn more about Access Control Lists, visit the Posix ACLs for
1125 Linux website <http://acl.bestbits.at/>.
1126
1127 If you don't know what Access Control Lists are, say N
1128
1129config JFFS2_FS_SECURITY
1130 bool "JFFS2 Security Labels"
1131 depends on JFFS2_FS_XATTR
1132 default y
1133 help
1134 Security labels support alternative access control models
1135 implemented by security modules like SELinux. This option
1136 enables an extended attribute handler for file security
1137 labels in the jffs2 filesystem.
1138
1139 If you are not using a security module that requires using
1140 extended attributes for file security labels, say N.
1141
1104config JFFS2_COMPRESSION_OPTIONS 1142config JFFS2_COMPRESSION_OPTIONS
1105 bool "Advanced compression options for JFFS2" 1143 bool "Advanced compression options for JFFS2"
1106 depends on JFFS2_FS 1144 depends on JFFS2_FS
diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c
index 0ef207dfaf6f..5371a403130a 100644
--- a/fs/jffs/intrep.c
+++ b/fs/jffs/intrep.c
@@ -247,7 +247,7 @@ flash_safe_read(struct mtd_info *mtd, loff_t from,
247 D3(printk(KERN_NOTICE "flash_safe_read(%p, %08x, %p, %08x)\n", 247 D3(printk(KERN_NOTICE "flash_safe_read(%p, %08x, %p, %08x)\n",
248 mtd, (unsigned int) from, buf, count)); 248 mtd, (unsigned int) from, buf, count));
249 249
250 res = MTD_READ(mtd, from, count, &retlen, buf); 250 res = mtd->read(mtd, from, count, &retlen, buf);
251 if (retlen != count) { 251 if (retlen != count) {
252 panic("Didn't read all bytes in flash_safe_read(). Returned %d\n", res); 252 panic("Didn't read all bytes in flash_safe_read(). Returned %d\n", res);
253 } 253 }
@@ -262,7 +262,7 @@ flash_read_u32(struct mtd_info *mtd, loff_t from)
262 __u32 ret; 262 __u32 ret;
263 int res; 263 int res;
264 264
265 res = MTD_READ(mtd, from, 4, &retlen, (unsigned char *)&ret); 265 res = mtd->read(mtd, from, 4, &retlen, (unsigned char *)&ret);
266 if (retlen != 4) { 266 if (retlen != 4) {
267 printk("Didn't read all bytes in flash_read_u32(). Returned %d\n", res); 267 printk("Didn't read all bytes in flash_read_u32(). Returned %d\n", res);
268 return 0; 268 return 0;
@@ -282,7 +282,7 @@ flash_safe_write(struct mtd_info *mtd, loff_t to,
282 D3(printk(KERN_NOTICE "flash_safe_write(%p, %08x, %p, %08x)\n", 282 D3(printk(KERN_NOTICE "flash_safe_write(%p, %08x, %p, %08x)\n",
283 mtd, (unsigned int) to, buf, count)); 283 mtd, (unsigned int) to, buf, count));
284 284
285 res = MTD_WRITE(mtd, to, count, &retlen, buf); 285 res = mtd->write(mtd, to, count, &retlen, buf);
286 if (retlen != count) { 286 if (retlen != count) {
287 printk("Didn't write all bytes in flash_safe_write(). Returned %d\n", res); 287 printk("Didn't write all bytes in flash_safe_write(). Returned %d\n", res);
288 } 288 }
@@ -300,9 +300,9 @@ flash_safe_writev(struct mtd_info *mtd, const struct kvec *vecs,
300 300
301 D3(printk(KERN_NOTICE "flash_safe_writev(%p, %08x, %p)\n", 301 D3(printk(KERN_NOTICE "flash_safe_writev(%p, %08x, %p)\n",
302 mtd, (unsigned int) to, vecs)); 302 mtd, (unsigned int) to, vecs));
303 303
304 if (mtd->writev) { 304 if (mtd->writev) {
305 res = MTD_WRITEV(mtd, vecs, iovec_cnt, to, &retlen); 305 res = mtd->writev(mtd, vecs, iovec_cnt, to, &retlen);
306 return res ? res : retlen; 306 return res ? res : retlen;
307 } 307 }
308 /* Not implemented writev. Repeatedly use write - on the not so 308 /* Not implemented writev. Repeatedly use write - on the not so
@@ -312,7 +312,8 @@ flash_safe_writev(struct mtd_info *mtd, const struct kvec *vecs,
312 retlen=0; 312 retlen=0;
313 313
314 for (i=0; !res && i<iovec_cnt; i++) { 314 for (i=0; !res && i<iovec_cnt; i++) {
315 res = MTD_WRITE(mtd, to, vecs[i].iov_len, &retlen_a, vecs[i].iov_base); 315 res = mtd->write(mtd, to, vecs[i].iov_len, &retlen_a,
316 vecs[i].iov_base);
316 if (retlen_a != vecs[i].iov_len) { 317 if (retlen_a != vecs[i].iov_len) {
317 printk("Didn't write all bytes in flash_safe_writev(). Returned %d\n", res); 318 printk("Didn't write all bytes in flash_safe_writev(). Returned %d\n", res);
318 if (i != iovec_cnt-1) 319 if (i != iovec_cnt-1)
@@ -393,7 +394,7 @@ flash_erase_region(struct mtd_info *mtd, loff_t start,
393 set_current_state(TASK_UNINTERRUPTIBLE); 394 set_current_state(TASK_UNINTERRUPTIBLE);
394 add_wait_queue(&wait_q, &wait); 395 add_wait_queue(&wait_q, &wait);
395 396
396 if (MTD_ERASE(mtd, erase) < 0) { 397 if (mtd->erase(mtd, erase) < 0) {
397 set_current_state(TASK_RUNNING); 398 set_current_state(TASK_RUNNING);
398 remove_wait_queue(&wait_q, &wait); 399 remove_wait_queue(&wait_q, &wait);
399 kfree(erase); 400 kfree(erase);
diff --git a/fs/jffs2/Makefile b/fs/jffs2/Makefile
index 77dc5561a04e..7f28ee0bd132 100644
--- a/fs/jffs2/Makefile
+++ b/fs/jffs2/Makefile
@@ -12,6 +12,9 @@ jffs2-y += symlink.o build.o erase.o background.o fs.o writev.o
12jffs2-y += super.o debug.o 12jffs2-y += super.o debug.o
13 13
14jffs2-$(CONFIG_JFFS2_FS_WRITEBUFFER) += wbuf.o 14jffs2-$(CONFIG_JFFS2_FS_WRITEBUFFER) += wbuf.o
15jffs2-$(CONFIG_JFFS2_FS_XATTR) += xattr.o xattr_trusted.o xattr_user.o
16jffs2-$(CONFIG_JFFS2_FS_SECURITY) += security.o
17jffs2-$(CONFIG_JFFS2_FS_POSIX_ACL) += acl.o
15jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rubin.o 18jffs2-$(CONFIG_JFFS2_RUBIN) += compr_rubin.o
16jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o 19jffs2-$(CONFIG_JFFS2_RTIME) += compr_rtime.o
17jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o 20jffs2-$(CONFIG_JFFS2_ZLIB) += compr_zlib.o
diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking
index b7943439b6ec..c8f0bd64e53e 100644
--- a/fs/jffs2/README.Locking
+++ b/fs/jffs2/README.Locking
@@ -150,3 +150,24 @@ the buffer.
150 150
151Ordering constraints: 151Ordering constraints:
152 Lock wbuf_sem last, after the alloc_sem or and f->sem. 152 Lock wbuf_sem last, after the alloc_sem or and f->sem.
153
154
155 c->xattr_sem
156 ------------
157
158This read/write semaphore protects against concurrent access to the
159xattr related objects which include stuff in superblock and ic->xref.
160In read-only path, write-semaphore is too much exclusion. It's enough
161by read-semaphore. But you must hold write-semaphore when updating,
162creating or deleting any xattr related object.
163
164Once xattr_sem released, there would be no assurance for the existence
165of those objects. Thus, a series of processes is often required to retry,
166when updating such a object is necessary under holding read semaphore.
167For example, do_jffs2_getxattr() holds read-semaphore to scan xref and
168xdatum at first. But it retries this process with holding write-semaphore
169after release read-semaphore, if it's necessary to load name/value pair
170from medium.
171
172Ordering constraints:
173 Lock xattr_sem last, after the alloc_sem.
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
new file mode 100644
index 000000000000..320dd48b834e
--- /dev/null
+++ b/fs/jffs2/acl.c
@@ -0,0 +1,485 @@
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2006 NEC Corporation
5 *
6 * Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
10 */
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14#include <linux/time.h>
15#include <linux/crc32.h>
16#include <linux/jffs2.h>
17#include <linux/xattr.h>
18#include <linux/posix_acl_xattr.h>
19#include <linux/mtd/mtd.h>
20#include "nodelist.h"
21
22static size_t jffs2_acl_size(int count)
23{
24 if (count <= 4) {
25 return sizeof(struct jffs2_acl_header)
26 + count * sizeof(struct jffs2_acl_entry_short);
27 } else {
28 return sizeof(struct jffs2_acl_header)
29 + 4 * sizeof(struct jffs2_acl_entry_short)
30 + (count - 4) * sizeof(struct jffs2_acl_entry);
31 }
32}
33
34static int jffs2_acl_count(size_t size)
35{
36 size_t s;
37
38 size -= sizeof(struct jffs2_acl_header);
39 s = size - 4 * sizeof(struct jffs2_acl_entry_short);
40 if (s < 0) {
41 if (size % sizeof(struct jffs2_acl_entry_short))
42 return -1;
43 return size / sizeof(struct jffs2_acl_entry_short);
44 } else {
45 if (s % sizeof(struct jffs2_acl_entry))
46 return -1;
47 return s / sizeof(struct jffs2_acl_entry) + 4;
48 }
49}
50
51static struct posix_acl *jffs2_acl_from_medium(void *value, size_t size)
52{
53 void *end = value + size;
54 struct jffs2_acl_header *header = value;
55 struct jffs2_acl_entry *entry;
56 struct posix_acl *acl;
57 uint32_t ver;
58 int i, count;
59
60 if (!value)
61 return NULL;
62 if (size < sizeof(struct jffs2_acl_header))
63 return ERR_PTR(-EINVAL);
64 ver = je32_to_cpu(header->a_version);
65 if (ver != JFFS2_ACL_VERSION) {
66 JFFS2_WARNING("Invalid ACL version. (=%u)\n", ver);
67 return ERR_PTR(-EINVAL);
68 }
69
70 value += sizeof(struct jffs2_acl_header);
71 count = jffs2_acl_count(size);
72 if (count < 0)
73 return ERR_PTR(-EINVAL);
74 if (count == 0)
75 return NULL;
76
77 acl = posix_acl_alloc(count, GFP_KERNEL);
78 if (!acl)
79 return ERR_PTR(-ENOMEM);
80
81 for (i=0; i < count; i++) {
82 entry = value;
83 if (value + sizeof(struct jffs2_acl_entry_short) > end)
84 goto fail;
85 acl->a_entries[i].e_tag = je16_to_cpu(entry->e_tag);
86 acl->a_entries[i].e_perm = je16_to_cpu(entry->e_perm);
87 switch (acl->a_entries[i].e_tag) {
88 case ACL_USER_OBJ:
89 case ACL_GROUP_OBJ:
90 case ACL_MASK:
91 case ACL_OTHER:
92 value += sizeof(struct jffs2_acl_entry_short);
93 acl->a_entries[i].e_id = ACL_UNDEFINED_ID;
94 break;
95
96 case ACL_USER:
97 case ACL_GROUP:
98 value += sizeof(struct jffs2_acl_entry);
99 if (value > end)
100 goto fail;
101 acl->a_entries[i].e_id = je32_to_cpu(entry->e_id);
102 break;
103
104 default:
105 goto fail;
106 }
107 }
108 if (value != end)
109 goto fail;
110 return acl;
111 fail:
112 posix_acl_release(acl);
113 return ERR_PTR(-EINVAL);
114}
115
116static void *jffs2_acl_to_medium(const struct posix_acl *acl, size_t *size)
117{
118 struct jffs2_acl_header *header;
119 struct jffs2_acl_entry *entry;
120 void *e;
121 size_t i;
122
123 *size = jffs2_acl_size(acl->a_count);
124 header = kmalloc(sizeof(*header) + acl->a_count * sizeof(*entry), GFP_KERNEL);
125 if (!header)
126 return ERR_PTR(-ENOMEM);
127 header->a_version = cpu_to_je32(JFFS2_ACL_VERSION);
128 e = header + 1;
129 for (i=0; i < acl->a_count; i++) {
130 entry = e;
131 entry->e_tag = cpu_to_je16(acl->a_entries[i].e_tag);
132 entry->e_perm = cpu_to_je16(acl->a_entries[i].e_perm);
133 switch(acl->a_entries[i].e_tag) {
134 case ACL_USER:
135 case ACL_GROUP:
136 entry->e_id = cpu_to_je32(acl->a_entries[i].e_id);
137 e += sizeof(struct jffs2_acl_entry);
138 break;
139
140 case ACL_USER_OBJ:
141 case ACL_GROUP_OBJ:
142 case ACL_MASK:
143 case ACL_OTHER:
144 e += sizeof(struct jffs2_acl_entry_short);
145 break;
146
147 default:
148 goto fail;
149 }
150 }
151 return header;
152 fail:
153 kfree(header);
154 return ERR_PTR(-EINVAL);
155}
156
157static struct posix_acl *jffs2_iget_acl(struct inode *inode, struct posix_acl **i_acl)
158{
159 struct posix_acl *acl = JFFS2_ACL_NOT_CACHED;
160
161 spin_lock(&inode->i_lock);
162 if (*i_acl != JFFS2_ACL_NOT_CACHED)
163 acl = posix_acl_dup(*i_acl);
164 spin_unlock(&inode->i_lock);
165 return acl;
166}
167
168static void jffs2_iset_acl(struct inode *inode, struct posix_acl **i_acl, struct posix_acl *acl)
169{
170 spin_lock(&inode->i_lock);
171 if (*i_acl != JFFS2_ACL_NOT_CACHED)
172 posix_acl_release(*i_acl);
173 *i_acl = posix_acl_dup(acl);
174 spin_unlock(&inode->i_lock);
175}
176
177static struct posix_acl *jffs2_get_acl(struct inode *inode, int type)
178{
179 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
180 struct posix_acl *acl;
181 char *value = NULL;
182 int rc, xprefix;
183
184 switch (type) {
185 case ACL_TYPE_ACCESS:
186 acl = jffs2_iget_acl(inode, &f->i_acl_access);
187 if (acl != JFFS2_ACL_NOT_CACHED)
188 return acl;
189 xprefix = JFFS2_XPREFIX_ACL_ACCESS;
190 break;
191 case ACL_TYPE_DEFAULT:
192 acl = jffs2_iget_acl(inode, &f->i_acl_default);
193 if (acl != JFFS2_ACL_NOT_CACHED)
194 return acl;
195 xprefix = JFFS2_XPREFIX_ACL_DEFAULT;
196 break;
197 default:
198 return ERR_PTR(-EINVAL);
199 }
200 rc = do_jffs2_getxattr(inode, xprefix, "", NULL, 0);
201 if (rc > 0) {
202 value = kmalloc(rc, GFP_KERNEL);
203 if (!value)
204 return ERR_PTR(-ENOMEM);
205 rc = do_jffs2_getxattr(inode, xprefix, "", value, rc);
206 }
207 if (rc > 0) {
208 acl = jffs2_acl_from_medium(value, rc);
209 } else if (rc == -ENODATA || rc == -ENOSYS) {
210 acl = NULL;
211 } else {
212 acl = ERR_PTR(rc);
213 }
214 if (value)
215 kfree(value);
216 if (!IS_ERR(acl)) {
217 switch (type) {
218 case ACL_TYPE_ACCESS:
219 jffs2_iset_acl(inode, &f->i_acl_access, acl);
220 break;
221 case ACL_TYPE_DEFAULT:
222 jffs2_iset_acl(inode, &f->i_acl_default, acl);
223 break;
224 }
225 }
226 return acl;
227}
228
229static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
230{
231 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
232 size_t size = 0;
233 char *value = NULL;
234 int rc, xprefix;
235
236 if (S_ISLNK(inode->i_mode))
237 return -EOPNOTSUPP;
238
239 switch (type) {
240 case ACL_TYPE_ACCESS:
241 xprefix = JFFS2_XPREFIX_ACL_ACCESS;
242 if (acl) {
243 mode_t mode = inode->i_mode;
244 rc = posix_acl_equiv_mode(acl, &mode);
245 if (rc < 0)
246 return rc;
247 if (inode->i_mode != mode) {
248 inode->i_mode = mode;
249 jffs2_dirty_inode(inode);
250 }
251 if (rc == 0)
252 acl = NULL;
253 }
254 break;
255 case ACL_TYPE_DEFAULT:
256 xprefix = JFFS2_XPREFIX_ACL_DEFAULT;
257 if (!S_ISDIR(inode->i_mode))
258 return acl ? -EACCES : 0;
259 break;
260 default:
261 return -EINVAL;
262 }
263 if (acl) {
264 value = jffs2_acl_to_medium(acl, &size);
265 if (IS_ERR(value))
266 return PTR_ERR(value);
267 }
268
269 rc = do_jffs2_setxattr(inode, xprefix, "", value, size, 0);
270 if (value)
271 kfree(value);
272 if (!rc) {
273 switch(type) {
274 case ACL_TYPE_ACCESS:
275 jffs2_iset_acl(inode, &f->i_acl_access, acl);
276 break;
277 case ACL_TYPE_DEFAULT:
278 jffs2_iset_acl(inode, &f->i_acl_default, acl);
279 break;
280 }
281 }
282 return rc;
283}
284
285static int jffs2_check_acl(struct inode *inode, int mask)
286{
287 struct posix_acl *acl;
288 int rc;
289
290 acl = jffs2_get_acl(inode, ACL_TYPE_ACCESS);
291 if (IS_ERR(acl))
292 return PTR_ERR(acl);
293 if (acl) {
294 rc = posix_acl_permission(inode, acl, mask);
295 posix_acl_release(acl);
296 return rc;
297 }
298 return -EAGAIN;
299}
300
301int jffs2_permission(struct inode *inode, int mask, struct nameidata *nd)
302{
303 return generic_permission(inode, mask, jffs2_check_acl);
304}
305
306int jffs2_init_acl(struct inode *inode, struct inode *dir)
307{
308 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
309 struct posix_acl *acl = NULL, *clone;
310 mode_t mode;
311 int rc = 0;
312
313 f->i_acl_access = JFFS2_ACL_NOT_CACHED;
314 f->i_acl_default = JFFS2_ACL_NOT_CACHED;
315 if (!S_ISLNK(inode->i_mode)) {
316 acl = jffs2_get_acl(dir, ACL_TYPE_DEFAULT);
317 if (IS_ERR(acl))
318 return PTR_ERR(acl);
319 if (!acl)
320 inode->i_mode &= ~current->fs->umask;
321 }
322 if (acl) {
323 if (S_ISDIR(inode->i_mode)) {
324 rc = jffs2_set_acl(inode, ACL_TYPE_DEFAULT, acl);
325 if (rc)
326 goto cleanup;
327 }
328 clone = posix_acl_clone(acl, GFP_KERNEL);
329 rc = -ENOMEM;
330 if (!clone)
331 goto cleanup;
332 mode = inode->i_mode;
333 rc = posix_acl_create_masq(clone, &mode);
334 if (rc >= 0) {
335 inode->i_mode = mode;
336 if (rc > 0)
337 rc = jffs2_set_acl(inode, ACL_TYPE_ACCESS, clone);
338 }
339 posix_acl_release(clone);
340 }
341 cleanup:
342 posix_acl_release(acl);
343 return rc;
344}
345
346void jffs2_clear_acl(struct inode *inode)
347{
348 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
349
350 if (f->i_acl_access && f->i_acl_access != JFFS2_ACL_NOT_CACHED) {
351 posix_acl_release(f->i_acl_access);
352 f->i_acl_access = JFFS2_ACL_NOT_CACHED;
353 }
354 if (f->i_acl_default && f->i_acl_default != JFFS2_ACL_NOT_CACHED) {
355 posix_acl_release(f->i_acl_default);
356 f->i_acl_default = JFFS2_ACL_NOT_CACHED;
357 }
358}
359
360int jffs2_acl_chmod(struct inode *inode)
361{
362 struct posix_acl *acl, *clone;
363 int rc;
364
365 if (S_ISLNK(inode->i_mode))
366 return -EOPNOTSUPP;
367 acl = jffs2_get_acl(inode, ACL_TYPE_ACCESS);
368 if (IS_ERR(acl) || !acl)
369 return PTR_ERR(acl);
370 clone = posix_acl_clone(acl, GFP_KERNEL);
371 posix_acl_release(acl);
372 if (!clone)
373 return -ENOMEM;
374 rc = posix_acl_chmod_masq(clone, inode->i_mode);
375 if (!rc)
376 rc = jffs2_set_acl(inode, ACL_TYPE_ACCESS, clone);
377 posix_acl_release(clone);
378 return rc;
379}
380
381static size_t jffs2_acl_access_listxattr(struct inode *inode, char *list, size_t list_size,
382 const char *name, size_t name_len)
383{
384 const int retlen = sizeof(POSIX_ACL_XATTR_ACCESS);
385
386 if (list && retlen <= list_size)
387 strcpy(list, POSIX_ACL_XATTR_ACCESS);
388 return retlen;
389}
390
391static size_t jffs2_acl_default_listxattr(struct inode *inode, char *list, size_t list_size,
392 const char *name, size_t name_len)
393{
394 const int retlen = sizeof(POSIX_ACL_XATTR_DEFAULT);
395
396 if (list && retlen <= list_size)
397 strcpy(list, POSIX_ACL_XATTR_DEFAULT);
398 return retlen;
399}
400
401static int jffs2_acl_getxattr(struct inode *inode, int type, void *buffer, size_t size)
402{
403 struct posix_acl *acl;
404 int rc;
405
406 acl = jffs2_get_acl(inode, type);
407 if (IS_ERR(acl))
408 return PTR_ERR(acl);
409 if (!acl)
410 return -ENODATA;
411 rc = posix_acl_to_xattr(acl, buffer, size);
412 posix_acl_release(acl);
413
414 return rc;
415}
416
417static int jffs2_acl_access_getxattr(struct inode *inode, const char *name, void *buffer, size_t size)
418{
419 if (name[0] != '\0')
420 return -EINVAL;
421 return jffs2_acl_getxattr(inode, ACL_TYPE_ACCESS, buffer, size);
422}
423
424static int jffs2_acl_default_getxattr(struct inode *inode, const char *name, void *buffer, size_t size)
425{
426 if (name[0] != '\0')
427 return -EINVAL;
428 return jffs2_acl_getxattr(inode, ACL_TYPE_DEFAULT, buffer, size);
429}
430
431static int jffs2_acl_setxattr(struct inode *inode, int type, const void *value, size_t size)
432{
433 struct posix_acl *acl;
434 int rc;
435
436 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER))
437 return -EPERM;
438
439 if (value) {
440 acl = posix_acl_from_xattr(value, size);
441 if (IS_ERR(acl))
442 return PTR_ERR(acl);
443 if (acl) {
444 rc = posix_acl_valid(acl);
445 if (rc)
446 goto out;
447 }
448 } else {
449 acl = NULL;
450 }
451 rc = jffs2_set_acl(inode, type, acl);
452 out:
453 posix_acl_release(acl);
454 return rc;
455}
456
457static int jffs2_acl_access_setxattr(struct inode *inode, const char *name,
458 const void *buffer, size_t size, int flags)
459{
460 if (name[0] != '\0')
461 return -EINVAL;
462 return jffs2_acl_setxattr(inode, ACL_TYPE_ACCESS, buffer, size);
463}
464
465static int jffs2_acl_default_setxattr(struct inode *inode, const char *name,
466 const void *buffer, size_t size, int flags)
467{
468 if (name[0] != '\0')
469 return -EINVAL;
470 return jffs2_acl_setxattr(inode, ACL_TYPE_DEFAULT, buffer, size);
471}
472
473struct xattr_handler jffs2_acl_access_xattr_handler = {
474 .prefix = POSIX_ACL_XATTR_ACCESS,
475 .list = jffs2_acl_access_listxattr,
476 .get = jffs2_acl_access_getxattr,
477 .set = jffs2_acl_access_setxattr,
478};
479
480struct xattr_handler jffs2_acl_default_xattr_handler = {
481 .prefix = POSIX_ACL_XATTR_DEFAULT,
482 .list = jffs2_acl_default_listxattr,
483 .get = jffs2_acl_default_getxattr,
484 .set = jffs2_acl_default_setxattr,
485};
diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h
new file mode 100644
index 000000000000..8893bd1a6ba7
--- /dev/null
+++ b/fs/jffs2/acl.h
@@ -0,0 +1,45 @@
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2006 NEC Corporation
5 *
6 * Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
10 */
11struct jffs2_acl_entry {
12 jint16_t e_tag;
13 jint16_t e_perm;
14 jint32_t e_id;
15};
16
17struct jffs2_acl_entry_short {
18 jint16_t e_tag;
19 jint16_t e_perm;
20};
21
22struct jffs2_acl_header {
23 jint32_t a_version;
24};
25
26#ifdef CONFIG_JFFS2_FS_POSIX_ACL
27
28#define JFFS2_ACL_NOT_CACHED ((void *)-1)
29
30extern int jffs2_permission(struct inode *, int, struct nameidata *);
31extern int jffs2_acl_chmod(struct inode *);
32extern int jffs2_init_acl(struct inode *, struct inode *);
33extern void jffs2_clear_acl(struct inode *);
34
35extern struct xattr_handler jffs2_acl_access_xattr_handler;
36extern struct xattr_handler jffs2_acl_default_xattr_handler;
37
38#else
39
40#define jffs2_permission NULL
41#define jffs2_acl_chmod(inode) (0)
42#define jffs2_init_acl(inode,dir) (0)
43#define jffs2_clear_acl(inode)
44
45#endif /* CONFIG_JFFS2_FS_POSIX_ACL */
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
index 70f7a896c04a..02826967ab58 100644
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -160,6 +160,7 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
160 ic->scan_dents = NULL; 160 ic->scan_dents = NULL;
161 cond_resched(); 161 cond_resched();
162 } 162 }
163 jffs2_build_xattr_subsystem(c);
163 c->flags &= ~JFFS2_SB_FLAG_BUILDING; 164 c->flags &= ~JFFS2_SB_FLAG_BUILDING;
164 165
165 dbg_fsbuild("FS build complete\n"); 166 dbg_fsbuild("FS build complete\n");
@@ -178,6 +179,7 @@ exit:
178 jffs2_free_full_dirent(fd); 179 jffs2_free_full_dirent(fd);
179 } 180 }
180 } 181 }
182 jffs2_clear_xattr_subsystem(c);
181 } 183 }
182 184
183 return ret; 185 return ret;
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c
index e7944e665b9f..7001ba26c067 100644
--- a/fs/jffs2/compr.c
+++ b/fs/jffs2/compr.c
@@ -412,7 +412,7 @@ void jffs2_free_comprbuf(unsigned char *comprbuf, unsigned char *orig)
412 kfree(comprbuf); 412 kfree(comprbuf);
413} 413}
414 414
415int jffs2_compressors_init(void) 415int __init jffs2_compressors_init(void)
416{ 416{
417/* Registering compressors */ 417/* Registering compressors */
418#ifdef CONFIG_JFFS2_ZLIB 418#ifdef CONFIG_JFFS2_ZLIB
diff --git a/fs/jffs2/compr.h b/fs/jffs2/compr.h
index a77e830d85c5..509b8b1c0811 100644
--- a/fs/jffs2/compr.h
+++ b/fs/jffs2/compr.h
@@ -23,8 +23,8 @@
23#include <linux/errno.h> 23#include <linux/errno.h>
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <linux/jffs2.h> 25#include <linux/jffs2.h>
26#include <linux/jffs2_fs_i.h> 26#include "jffs2_fs_i.h"
27#include <linux/jffs2_fs_sb.h> 27#include "jffs2_fs_sb.h"
28#include "nodelist.h" 28#include "nodelist.h"
29 29
30#define JFFS2_RUBINMIPS_PRIORITY 10 30#define JFFS2_RUBINMIPS_PRIORITY 10
diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c
index 1fe17de713e8..72b4fc13a106 100644
--- a/fs/jffs2/debug.c
+++ b/fs/jffs2/debug.c
@@ -192,13 +192,13 @@ __jffs2_dbg_acct_paranoia_check_nolock(struct jffs2_sb_info *c,
192 else 192 else
193 my_dirty_size += totlen; 193 my_dirty_size += totlen;
194 194
195 if ((!ref2->next_phys) != (ref2 == jeb->last_node)) { 195 if ((!ref_next(ref2)) != (ref2 == jeb->last_node)) {
196 JFFS2_ERROR("node_ref for node at %#08x (mem %p) has next_phys at %#08x (mem %p), last_node is at %#08x (mem %p).\n", 196 JFFS2_ERROR("node_ref for node at %#08x (mem %p) has next at %#08x (mem %p), last_node is at %#08x (mem %p).\n",
197 ref_offset(ref2), ref2, ref_offset(ref2->next_phys), ref2->next_phys, 197 ref_offset(ref2), ref2, ref_offset(ref_next(ref2)), ref_next(ref2),
198 ref_offset(jeb->last_node), jeb->last_node); 198 ref_offset(jeb->last_node), jeb->last_node);
199 goto error; 199 goto error;
200 } 200 }
201 ref2 = ref2->next_phys; 201 ref2 = ref_next(ref2);
202 } 202 }
203 203
204 if (my_used_size != jeb->used_size) { 204 if (my_used_size != jeb->used_size) {
@@ -268,9 +268,9 @@ __jffs2_dbg_dump_node_refs_nolock(struct jffs2_sb_info *c,
268 } 268 }
269 269
270 printk(JFFS2_DBG); 270 printk(JFFS2_DBG);
271 for (ref = jeb->first_node; ; ref = ref->next_phys) { 271 for (ref = jeb->first_node; ; ref = ref_next(ref)) {
272 printk("%#08x(%#x)", ref_offset(ref), ref->__totlen); 272 printk("%#08x(%#x)", ref_offset(ref), ref->__totlen);
273 if (ref->next_phys) 273 if (ref_next(ref))
274 printk("->"); 274 printk("->");
275 else 275 else
276 break; 276 break;
diff --git a/fs/jffs2/debug.h b/fs/jffs2/debug.h
index 162af6dfe292..5fa494a792b2 100644
--- a/fs/jffs2/debug.h
+++ b/fs/jffs2/debug.h
@@ -171,6 +171,12 @@
171#define dbg_memalloc(fmt, ...) 171#define dbg_memalloc(fmt, ...)
172#endif 172#endif
173 173
174/* Watch the XATTR subsystem */
175#ifdef JFFS2_DBG_XATTR_MESSAGES
176#define dbg_xattr(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
177#else
178#define dbg_xattr(fmt, ...)
179#endif
174 180
175/* "Sanity" checks */ 181/* "Sanity" checks */
176void 182void
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 8bc7a5018e40..edd8371fc6a5 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -17,8 +17,8 @@
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/crc32.h> 18#include <linux/crc32.h>
19#include <linux/jffs2.h> 19#include <linux/jffs2.h>
20#include <linux/jffs2_fs_i.h> 20#include "jffs2_fs_i.h"
21#include <linux/jffs2_fs_sb.h> 21#include "jffs2_fs_sb.h"
22#include <linux/time.h> 22#include <linux/time.h>
23#include "nodelist.h" 23#include "nodelist.h"
24 24
@@ -57,7 +57,12 @@ struct inode_operations jffs2_dir_inode_operations =
57 .rmdir = jffs2_rmdir, 57 .rmdir = jffs2_rmdir,
58 .mknod = jffs2_mknod, 58 .mknod = jffs2_mknod,
59 .rename = jffs2_rename, 59 .rename = jffs2_rename,
60 .permission = jffs2_permission,
60 .setattr = jffs2_setattr, 61 .setattr = jffs2_setattr,
62 .setxattr = jffs2_setxattr,
63 .getxattr = jffs2_getxattr,
64 .listxattr = jffs2_listxattr,
65 .removexattr = jffs2_removexattr
61}; 66};
62 67
63/***********************************************************************/ 68/***********************************************************************/
@@ -78,6 +83,9 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
78 83
79 D1(printk(KERN_DEBUG "jffs2_lookup()\n")); 84 D1(printk(KERN_DEBUG "jffs2_lookup()\n"));
80 85
86 if (target->d_name.len > JFFS2_MAX_NAME_LEN)
87 return ERR_PTR(-ENAMETOOLONG);
88
81 dir_f = JFFS2_INODE_INFO(dir_i); 89 dir_f = JFFS2_INODE_INFO(dir_i);
82 c = JFFS2_SB_INFO(dir_i->i_sb); 90 c = JFFS2_SB_INFO(dir_i->i_sb);
83 91
@@ -206,12 +214,15 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
206 ret = jffs2_do_create(c, dir_f, f, ri, 214 ret = jffs2_do_create(c, dir_f, f, ri,
207 dentry->d_name.name, dentry->d_name.len); 215 dentry->d_name.name, dentry->d_name.len);
208 216
209 if (ret) { 217 if (ret)
210 make_bad_inode(inode); 218 goto fail;
211 iput(inode); 219
212 jffs2_free_raw_inode(ri); 220 ret = jffs2_init_security(inode, dir_i);
213 return ret; 221 if (ret)
214 } 222 goto fail;
223 ret = jffs2_init_acl(inode, dir_i);
224 if (ret)
225 goto fail;
215 226
216 dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(ri->ctime)); 227 dir_i->i_mtime = dir_i->i_ctime = ITIME(je32_to_cpu(ri->ctime));
217 228
@@ -221,6 +232,12 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, int mode,
221 D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n", 232 D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n",
222 inode->i_ino, inode->i_mode, inode->i_nlink, f->inocache->nlink, inode->i_mapping->nrpages)); 233 inode->i_ino, inode->i_mode, inode->i_nlink, f->inocache->nlink, inode->i_mapping->nrpages));
223 return 0; 234 return 0;
235
236 fail:
237 make_bad_inode(inode);
238 iput(inode);
239 jffs2_free_raw_inode(ri);
240 return ret;
224} 241}
225 242
226/***********************************************************************/ 243/***********************************************************************/
@@ -291,7 +308,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
291 struct jffs2_full_dnode *fn; 308 struct jffs2_full_dnode *fn;
292 struct jffs2_full_dirent *fd; 309 struct jffs2_full_dirent *fd;
293 int namelen; 310 int namelen;
294 uint32_t alloclen, phys_ofs; 311 uint32_t alloclen;
295 int ret, targetlen = strlen(target); 312 int ret, targetlen = strlen(target);
296 313
297 /* FIXME: If you care. We'd need to use frags for the target 314 /* FIXME: If you care. We'd need to use frags for the target
@@ -310,8 +327,8 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
310 * Just the node will do for now, though 327 * Just the node will do for now, though
311 */ 328 */
312 namelen = dentry->d_name.len; 329 namelen = dentry->d_name.len;
313 ret = jffs2_reserve_space(c, sizeof(*ri) + targetlen, &phys_ofs, &alloclen, 330 ret = jffs2_reserve_space(c, sizeof(*ri) + targetlen, &alloclen,
314 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); 331 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
315 332
316 if (ret) { 333 if (ret) {
317 jffs2_free_raw_inode(ri); 334 jffs2_free_raw_inode(ri);
@@ -339,7 +356,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
339 ri->data_crc = cpu_to_je32(crc32(0, target, targetlen)); 356 ri->data_crc = cpu_to_je32(crc32(0, target, targetlen));
340 ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); 357 ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
341 358
342 fn = jffs2_write_dnode(c, f, ri, target, targetlen, phys_ofs, ALLOC_NORMAL); 359 fn = jffs2_write_dnode(c, f, ri, target, targetlen, ALLOC_NORMAL);
343 360
344 jffs2_free_raw_inode(ri); 361 jffs2_free_raw_inode(ri);
345 362
@@ -371,8 +388,20 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
371 up(&f->sem); 388 up(&f->sem);
372 389
373 jffs2_complete_reservation(c); 390 jffs2_complete_reservation(c);
374 ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, 391
375 ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); 392 ret = jffs2_init_security(inode, dir_i);
393 if (ret) {
394 jffs2_clear_inode(inode);
395 return ret;
396 }
397 ret = jffs2_init_acl(inode, dir_i);
398 if (ret) {
399 jffs2_clear_inode(inode);
400 return ret;
401 }
402
403 ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
404 ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
376 if (ret) { 405 if (ret) {
377 /* Eep. */ 406 /* Eep. */
378 jffs2_clear_inode(inode); 407 jffs2_clear_inode(inode);
@@ -404,7 +433,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
404 rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); 433 rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
405 rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); 434 rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));
406 435
407 fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); 436 fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL);
408 437
409 if (IS_ERR(fd)) { 438 if (IS_ERR(fd)) {
410 /* dirent failed to write. Delete the inode normally 439 /* dirent failed to write. Delete the inode normally
@@ -442,7 +471,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
442 struct jffs2_full_dnode *fn; 471 struct jffs2_full_dnode *fn;
443 struct jffs2_full_dirent *fd; 472 struct jffs2_full_dirent *fd;
444 int namelen; 473 int namelen;
445 uint32_t alloclen, phys_ofs; 474 uint32_t alloclen;
446 int ret; 475 int ret;
447 476
448 mode |= S_IFDIR; 477 mode |= S_IFDIR;
@@ -457,8 +486,8 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
457 * Just the node will do for now, though 486 * Just the node will do for now, though
458 */ 487 */
459 namelen = dentry->d_name.len; 488 namelen = dentry->d_name.len;
460 ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL, 489 ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL,
461 JFFS2_SUMMARY_INODE_SIZE); 490 JFFS2_SUMMARY_INODE_SIZE);
462 491
463 if (ret) { 492 if (ret) {
464 jffs2_free_raw_inode(ri); 493 jffs2_free_raw_inode(ri);
@@ -483,7 +512,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
483 ri->data_crc = cpu_to_je32(0); 512 ri->data_crc = cpu_to_je32(0);
484 ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); 513 ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
485 514
486 fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL); 515 fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL);
487 516
488 jffs2_free_raw_inode(ri); 517 jffs2_free_raw_inode(ri);
489 518
@@ -501,8 +530,20 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
501 up(&f->sem); 530 up(&f->sem);
502 531
503 jffs2_complete_reservation(c); 532 jffs2_complete_reservation(c);
504 ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, 533
505 ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); 534 ret = jffs2_init_security(inode, dir_i);
535 if (ret) {
536 jffs2_clear_inode(inode);
537 return ret;
538 }
539 ret = jffs2_init_acl(inode, dir_i);
540 if (ret) {
541 jffs2_clear_inode(inode);
542 return ret;
543 }
544
545 ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
546 ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
506 if (ret) { 547 if (ret) {
507 /* Eep. */ 548 /* Eep. */
508 jffs2_clear_inode(inode); 549 jffs2_clear_inode(inode);
@@ -534,7 +575,7 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, int mode)
534 rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); 575 rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
535 rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); 576 rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));
536 577
537 fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); 578 fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL);
538 579
539 if (IS_ERR(fd)) { 580 if (IS_ERR(fd)) {
540 /* dirent failed to write. Delete the inode normally 581 /* dirent failed to write. Delete the inode normally
@@ -588,12 +629,12 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
588 struct jffs2_full_dnode *fn; 629 struct jffs2_full_dnode *fn;
589 struct jffs2_full_dirent *fd; 630 struct jffs2_full_dirent *fd;
590 int namelen; 631 int namelen;
591 jint16_t dev; 632 union jffs2_device_node dev;
592 int devlen = 0; 633 int devlen = 0;
593 uint32_t alloclen, phys_ofs; 634 uint32_t alloclen;
594 int ret; 635 int ret;
595 636
596 if (!old_valid_dev(rdev)) 637 if (!new_valid_dev(rdev))
597 return -EINVAL; 638 return -EINVAL;
598 639
599 ri = jffs2_alloc_raw_inode(); 640 ri = jffs2_alloc_raw_inode();
@@ -602,17 +643,15 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
602 643
603 c = JFFS2_SB_INFO(dir_i->i_sb); 644 c = JFFS2_SB_INFO(dir_i->i_sb);
604 645
605 if (S_ISBLK(mode) || S_ISCHR(mode)) { 646 if (S_ISBLK(mode) || S_ISCHR(mode))
606 dev = cpu_to_je16(old_encode_dev(rdev)); 647 devlen = jffs2_encode_dev(&dev, rdev);
607 devlen = sizeof(dev);
608 }
609 648
610 /* Try to reserve enough space for both node and dirent. 649 /* Try to reserve enough space for both node and dirent.
611 * Just the node will do for now, though 650 * Just the node will do for now, though
612 */ 651 */
613 namelen = dentry->d_name.len; 652 namelen = dentry->d_name.len;
614 ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &phys_ofs, &alloclen, 653 ret = jffs2_reserve_space(c, sizeof(*ri) + devlen, &alloclen,
615 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); 654 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
616 655
617 if (ret) { 656 if (ret) {
618 jffs2_free_raw_inode(ri); 657 jffs2_free_raw_inode(ri);
@@ -639,7 +678,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
639 ri->data_crc = cpu_to_je32(crc32(0, &dev, devlen)); 678 ri->data_crc = cpu_to_je32(crc32(0, &dev, devlen));
640 ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); 679 ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
641 680
642 fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, phys_ofs, ALLOC_NORMAL); 681 fn = jffs2_write_dnode(c, f, ri, (char *)&dev, devlen, ALLOC_NORMAL);
643 682
644 jffs2_free_raw_inode(ri); 683 jffs2_free_raw_inode(ri);
645 684
@@ -657,8 +696,20 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
657 up(&f->sem); 696 up(&f->sem);
658 697
659 jffs2_complete_reservation(c); 698 jffs2_complete_reservation(c);
660 ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, 699
661 ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); 700 ret = jffs2_init_security(inode, dir_i);
701 if (ret) {
702 jffs2_clear_inode(inode);
703 return ret;
704 }
705 ret = jffs2_init_acl(inode, dir_i);
706 if (ret) {
707 jffs2_clear_inode(inode);
708 return ret;
709 }
710
711 ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
712 ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
662 if (ret) { 713 if (ret) {
663 /* Eep. */ 714 /* Eep. */
664 jffs2_clear_inode(inode); 715 jffs2_clear_inode(inode);
@@ -693,7 +744,7 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, int mode, de
693 rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); 744 rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
694 rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen)); 745 rd->name_crc = cpu_to_je32(crc32(0, dentry->d_name.name, namelen));
695 746
696 fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, phys_ofs, ALLOC_NORMAL); 747 fd = jffs2_write_dirent(c, dir_f, rd, dentry->d_name.name, namelen, ALLOC_NORMAL);
697 748
698 if (IS_ERR(fd)) { 749 if (IS_ERR(fd)) {
699 /* dirent failed to write. Delete the inode normally 750 /* dirent failed to write. Delete the inode normally
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index dad68fdffe9e..1862e8bc101d 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -30,7 +30,6 @@ static void jffs2_erase_callback(struct erase_info *);
30#endif 30#endif
31static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset); 31static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset);
32static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); 32static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
33static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
34static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); 33static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
35 34
36static void jffs2_erase_block(struct jffs2_sb_info *c, 35static void jffs2_erase_block(struct jffs2_sb_info *c,
@@ -136,7 +135,7 @@ void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count)
136 c->used_size -= jeb->used_size; 135 c->used_size -= jeb->used_size;
137 c->dirty_size -= jeb->dirty_size; 136 c->dirty_size -= jeb->dirty_size;
138 jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0; 137 jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0;
139 jffs2_free_all_node_refs(c, jeb); 138 jffs2_free_jeb_node_refs(c, jeb);
140 list_add(&jeb->list, &c->erasing_list); 139 list_add(&jeb->list, &c->erasing_list);
141 spin_unlock(&c->erase_completion_lock); 140 spin_unlock(&c->erase_completion_lock);
142 141
@@ -231,6 +230,7 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
231 at the end of the linked list. Stash it and continue 230 at the end of the linked list. Stash it and continue
232 from the beginning of the list */ 231 from the beginning of the list */
233 ic = (struct jffs2_inode_cache *)(*prev); 232 ic = (struct jffs2_inode_cache *)(*prev);
233 BUG_ON(ic->class != RAWNODE_CLASS_INODE_CACHE);
234 prev = &ic->nodes; 234 prev = &ic->nodes;
235 continue; 235 continue;
236 } 236 }
@@ -283,22 +283,27 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
283 jffs2_del_ino_cache(c, ic); 283 jffs2_del_ino_cache(c, ic);
284} 284}
285 285
286static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) 286void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
287{ 287{
288 struct jffs2_raw_node_ref *ref; 288 struct jffs2_raw_node_ref *block, *ref;
289 D1(printk(KERN_DEBUG "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset)); 289 D1(printk(KERN_DEBUG "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset));
290 while(jeb->first_node) {
291 ref = jeb->first_node;
292 jeb->first_node = ref->next_phys;
293 290
294 /* Remove from the inode-list */ 291 block = ref = jeb->first_node;
295 if (ref->next_in_ino) 292
293 while (ref) {
294 if (ref->flash_offset == REF_LINK_NODE) {
295 ref = ref->next_in_ino;
296 jffs2_free_refblock(block);
297 block = ref;
298 continue;
299 }
300 if (ref->flash_offset != REF_EMPTY_NODE && ref->next_in_ino)
296 jffs2_remove_node_refs_from_ino_list(c, ref, jeb); 301 jffs2_remove_node_refs_from_ino_list(c, ref, jeb);
297 /* else it was a non-inode node or already removed, so don't bother */ 302 /* else it was a non-inode node or already removed, so don't bother */
298 303
299 jffs2_free_raw_node_ref(ref); 304 ref++;
300 } 305 }
301 jeb->last_node = NULL; 306 jeb->first_node = jeb->last_node = NULL;
302} 307}
303 308
304static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset) 309static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset)
@@ -351,7 +356,6 @@ fail:
351 356
352static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) 357static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
353{ 358{
354 struct jffs2_raw_node_ref *marker_ref = NULL;
355 size_t retlen; 359 size_t retlen;
356 int ret; 360 int ret;
357 uint32_t bad_offset; 361 uint32_t bad_offset;
@@ -373,12 +377,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
373 goto filebad; 377 goto filebad;
374 } 378 }
375 379
376 jeb->first_node = jeb->last_node = NULL; 380 /* Everything else got zeroed before the erase */
377 jeb->free_size = c->sector_size; 381 jeb->free_size = c->sector_size;
378 jeb->used_size = 0;
379 jeb->dirty_size = 0;
380 jeb->wasted_size = 0;
381
382 } else { 382 } else {
383 383
384 struct kvec vecs[1]; 384 struct kvec vecs[1];
@@ -388,11 +388,7 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
388 .totlen = cpu_to_je32(c->cleanmarker_size) 388 .totlen = cpu_to_je32(c->cleanmarker_size)
389 }; 389 };
390 390
391 marker_ref = jffs2_alloc_raw_node_ref(); 391 jffs2_prealloc_raw_node_refs(c, jeb, 1);
392 if (!marker_ref) {
393 printk(KERN_WARNING "Failed to allocate raw node ref for clean marker. Refiling\n");
394 goto refile;
395 }
396 392
397 marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4)); 393 marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4));
398 394
@@ -408,21 +404,13 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
408 printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", 404 printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n",
409 jeb->offset, sizeof(marker), retlen); 405 jeb->offset, sizeof(marker), retlen);
410 406
411 jffs2_free_raw_node_ref(marker_ref);
412 goto filebad; 407 goto filebad;
413 } 408 }
414 409
415 marker_ref->next_in_ino = NULL; 410 /* Everything else got zeroed before the erase */
416 marker_ref->next_phys = NULL; 411 jeb->free_size = c->sector_size;
417 marker_ref->flash_offset = jeb->offset | REF_NORMAL; 412 /* FIXME Special case for cleanmarker in empty block */
418 marker_ref->__totlen = c->cleanmarker_size; 413 jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL);
419
420 jeb->first_node = jeb->last_node = marker_ref;
421
422 jeb->free_size = c->sector_size - c->cleanmarker_size;
423 jeb->used_size = c->cleanmarker_size;
424 jeb->dirty_size = 0;
425 jeb->wasted_size = 0;
426 } 414 }
427 415
428 spin_lock(&c->erase_completion_lock); 416 spin_lock(&c->erase_completion_lock);
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
index 9f4171213e58..bb8844f40e48 100644
--- a/fs/jffs2/file.c
+++ b/fs/jffs2/file.c
@@ -54,7 +54,12 @@ const struct file_operations jffs2_file_operations =
54 54
55struct inode_operations jffs2_file_inode_operations = 55struct inode_operations jffs2_file_inode_operations =
56{ 56{
57 .setattr = jffs2_setattr 57 .permission = jffs2_permission,
58 .setattr = jffs2_setattr,
59 .setxattr = jffs2_setxattr,
60 .getxattr = jffs2_getxattr,
61 .listxattr = jffs2_listxattr,
62 .removexattr = jffs2_removexattr
58}; 63};
59 64
60struct address_space_operations jffs2_file_address_operations = 65struct address_space_operations jffs2_file_address_operations =
@@ -129,13 +134,13 @@ static int jffs2_prepare_write (struct file *filp, struct page *pg,
129 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); 134 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
130 struct jffs2_raw_inode ri; 135 struct jffs2_raw_inode ri;
131 struct jffs2_full_dnode *fn; 136 struct jffs2_full_dnode *fn;
132 uint32_t phys_ofs, alloc_len; 137 uint32_t alloc_len;
133 138
134 D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", 139 D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
135 (unsigned int)inode->i_size, pageofs)); 140 (unsigned int)inode->i_size, pageofs));
136 141
137 ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len, 142 ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
138 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); 143 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
139 if (ret) 144 if (ret)
140 return ret; 145 return ret;
141 146
@@ -161,7 +166,7 @@ static int jffs2_prepare_write (struct file *filp, struct page *pg,
161 ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); 166 ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
162 ri.data_crc = cpu_to_je32(0); 167 ri.data_crc = cpu_to_je32(0);
163 168
164 fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_NORMAL); 169 fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_NORMAL);
165 170
166 if (IS_ERR(fn)) { 171 if (IS_ERR(fn)) {
167 ret = PTR_ERR(fn); 172 ret = PTR_ERR(fn);
@@ -215,12 +220,20 @@ static int jffs2_commit_write (struct file *filp, struct page *pg,
215 D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", 220 D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
216 inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags)); 221 inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags));
217 222
218 if (!start && end == PAGE_CACHE_SIZE) { 223 if (end == PAGE_CACHE_SIZE) {
219 /* We need to avoid deadlock with page_cache_read() in 224 if (!start) {
220 jffs2_garbage_collect_pass(). So we have to mark the 225 /* We need to avoid deadlock with page_cache_read() in
221 page up to date, to prevent page_cache_read() from 226 jffs2_garbage_collect_pass(). So we have to mark the
222 trying to re-lock it. */ 227 page up to date, to prevent page_cache_read() from
223 SetPageUptodate(pg); 228 trying to re-lock it. */
229 SetPageUptodate(pg);
230 } else {
231 /* When writing out the end of a page, write out the
232 _whole_ page. This helps to reduce the number of
233 nodes in files which have many short writes, like
234 syslog files. */
235 start = aligned_start = 0;
236 }
224 } 237 }
225 238
226 ri = jffs2_alloc_raw_inode(); 239 ri = jffs2_alloc_raw_inode();
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 09e5d10b8840..7b6c24b14f85 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -33,11 +33,11 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
33 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); 33 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
34 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); 34 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
35 struct jffs2_raw_inode *ri; 35 struct jffs2_raw_inode *ri;
36 unsigned short dev; 36 union jffs2_device_node dev;
37 unsigned char *mdata = NULL; 37 unsigned char *mdata = NULL;
38 int mdatalen = 0; 38 int mdatalen = 0;
39 unsigned int ivalid; 39 unsigned int ivalid;
40 uint32_t phys_ofs, alloclen; 40 uint32_t alloclen;
41 int ret; 41 int ret;
42 D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino)); 42 D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino));
43 ret = inode_change_ok(inode, iattr); 43 ret = inode_change_ok(inode, iattr);
@@ -51,20 +51,24 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
51 it out again with the appropriate data attached */ 51 it out again with the appropriate data attached */
52 if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) { 52 if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
53 /* For these, we don't actually need to read the old node */ 53 /* For these, we don't actually need to read the old node */
54 dev = old_encode_dev(inode->i_rdev); 54 mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
55 mdata = (char *)&dev; 55 mdata = (char *)&dev;
56 mdatalen = sizeof(dev);
57 D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen)); 56 D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen));
58 } else if (S_ISLNK(inode->i_mode)) { 57 } else if (S_ISLNK(inode->i_mode)) {
58 down(&f->sem);
59 mdatalen = f->metadata->size; 59 mdatalen = f->metadata->size;
60 mdata = kmalloc(f->metadata->size, GFP_USER); 60 mdata = kmalloc(f->metadata->size, GFP_USER);
61 if (!mdata) 61 if (!mdata) {
62 up(&f->sem);
62 return -ENOMEM; 63 return -ENOMEM;
64 }
63 ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen); 65 ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
64 if (ret) { 66 if (ret) {
67 up(&f->sem);
65 kfree(mdata); 68 kfree(mdata);
66 return ret; 69 return ret;
67 } 70 }
71 up(&f->sem);
68 D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen)); 72 D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen));
69 } 73 }
70 74
@@ -75,8 +79,8 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
75 return -ENOMEM; 79 return -ENOMEM;
76 } 80 }
77 81
78 ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &phys_ofs, &alloclen, 82 ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
79 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); 83 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
80 if (ret) { 84 if (ret) {
81 jffs2_free_raw_inode(ri); 85 jffs2_free_raw_inode(ri);
82 if (S_ISLNK(inode->i_mode & S_IFMT)) 86 if (S_ISLNK(inode->i_mode & S_IFMT))
@@ -127,7 +131,7 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
127 else 131 else
128 ri->data_crc = cpu_to_je32(0); 132 ri->data_crc = cpu_to_je32(0);
129 133
130 new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, phys_ofs, ALLOC_NORMAL); 134 new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, ALLOC_NORMAL);
131 if (S_ISLNK(inode->i_mode)) 135 if (S_ISLNK(inode->i_mode))
132 kfree(mdata); 136 kfree(mdata);
133 137
@@ -180,7 +184,12 @@ static int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
180 184
181int jffs2_setattr(struct dentry *dentry, struct iattr *iattr) 185int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
182{ 186{
183 return jffs2_do_setattr(dentry->d_inode, iattr); 187 int rc;
188
189 rc = jffs2_do_setattr(dentry->d_inode, iattr);
190 if (!rc && (iattr->ia_valid & ATTR_MODE))
191 rc = jffs2_acl_chmod(dentry->d_inode);
192 return rc;
184} 193}
185 194
186int jffs2_statfs(struct super_block *sb, struct kstatfs *buf) 195int jffs2_statfs(struct super_block *sb, struct kstatfs *buf)
@@ -219,6 +228,7 @@ void jffs2_clear_inode (struct inode *inode)
219 228
220 D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode)); 229 D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode));
221 230
231 jffs2_xattr_delete_inode(c, f->inocache);
222 jffs2_do_clear_inode(c, f); 232 jffs2_do_clear_inode(c, f);
223} 233}
224 234
@@ -227,6 +237,8 @@ void jffs2_read_inode (struct inode *inode)
227 struct jffs2_inode_info *f; 237 struct jffs2_inode_info *f;
228 struct jffs2_sb_info *c; 238 struct jffs2_sb_info *c;
229 struct jffs2_raw_inode latest_node; 239 struct jffs2_raw_inode latest_node;
240 union jffs2_device_node jdev;
241 dev_t rdev = 0;
230 int ret; 242 int ret;
231 243
232 D1(printk(KERN_DEBUG "jffs2_read_inode(): inode->i_ino == %lu\n", inode->i_ino)); 244 D1(printk(KERN_DEBUG "jffs2_read_inode(): inode->i_ino == %lu\n", inode->i_ino));
@@ -258,7 +270,6 @@ void jffs2_read_inode (struct inode *inode)
258 inode->i_blocks = (inode->i_size + 511) >> 9; 270 inode->i_blocks = (inode->i_size + 511) >> 9;
259 271
260 switch (inode->i_mode & S_IFMT) { 272 switch (inode->i_mode & S_IFMT) {
261 jint16_t rdev;
262 273
263 case S_IFLNK: 274 case S_IFLNK:
264 inode->i_op = &jffs2_symlink_inode_operations; 275 inode->i_op = &jffs2_symlink_inode_operations;
@@ -292,8 +303,16 @@ void jffs2_read_inode (struct inode *inode)
292 case S_IFBLK: 303 case S_IFBLK:
293 case S_IFCHR: 304 case S_IFCHR:
294 /* Read the device numbers from the media */ 305 /* Read the device numbers from the media */
306 if (f->metadata->size != sizeof(jdev.old) &&
307 f->metadata->size != sizeof(jdev.new)) {
308 printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size);
309 up(&f->sem);
310 jffs2_do_clear_inode(c, f);
311 make_bad_inode(inode);
312 return;
313 }
295 D1(printk(KERN_DEBUG "Reading device numbers from flash\n")); 314 D1(printk(KERN_DEBUG "Reading device numbers from flash\n"));
296 if (jffs2_read_dnode(c, f, f->metadata, (char *)&rdev, 0, sizeof(rdev)) < 0) { 315 if (jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size) < 0) {
297 /* Eep */ 316 /* Eep */
298 printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino); 317 printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino);
299 up(&f->sem); 318 up(&f->sem);
@@ -301,12 +320,15 @@ void jffs2_read_inode (struct inode *inode)
301 make_bad_inode(inode); 320 make_bad_inode(inode);
302 return; 321 return;
303 } 322 }
323 if (f->metadata->size == sizeof(jdev.old))
324 rdev = old_decode_dev(je16_to_cpu(jdev.old));
325 else
326 rdev = new_decode_dev(je32_to_cpu(jdev.new));
304 327
305 case S_IFSOCK: 328 case S_IFSOCK:
306 case S_IFIFO: 329 case S_IFIFO:
307 inode->i_op = &jffs2_file_inode_operations; 330 inode->i_op = &jffs2_file_inode_operations;
308 init_special_inode(inode, inode->i_mode, 331 init_special_inode(inode, inode->i_mode, rdev);
309 old_decode_dev((je16_to_cpu(rdev))));
310 break; 332 break;
311 333
312 default: 334 default:
@@ -492,6 +514,8 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
492 } 514 }
493 memset(c->inocache_list, 0, INOCACHE_HASHSIZE * sizeof(struct jffs2_inode_cache *)); 515 memset(c->inocache_list, 0, INOCACHE_HASHSIZE * sizeof(struct jffs2_inode_cache *));
494 516
517 jffs2_init_xattr_subsystem(c);
518
495 if ((ret = jffs2_do_mount_fs(c))) 519 if ((ret = jffs2_do_mount_fs(c)))
496 goto out_inohash; 520 goto out_inohash;
497 521
@@ -526,6 +550,7 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
526 else 550 else
527 kfree(c->blocks); 551 kfree(c->blocks);
528 out_inohash: 552 out_inohash:
553 jffs2_clear_xattr_subsystem(c);
529 kfree(c->inocache_list); 554 kfree(c->inocache_list);
530 out_wbuf: 555 out_wbuf:
531 jffs2_flash_cleanup(c); 556 jffs2_flash_cleanup(c);
@@ -639,13 +664,6 @@ static int jffs2_flash_setup(struct jffs2_sb_info *c) {
639 return ret; 664 return ret;
640 } 665 }
641 666
642 /* add setups for other bizarre flashes here... */
643 if (jffs2_nor_ecc(c)) {
644 ret = jffs2_nor_ecc_flash_setup(c);
645 if (ret)
646 return ret;
647 }
648
649 /* and Dataflash */ 667 /* and Dataflash */
650 if (jffs2_dataflash(c)) { 668 if (jffs2_dataflash(c)) {
651 ret = jffs2_dataflash_setup(c); 669 ret = jffs2_dataflash_setup(c);
@@ -669,11 +687,6 @@ void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
669 jffs2_nand_flash_cleanup(c); 687 jffs2_nand_flash_cleanup(c);
670 } 688 }
671 689
672 /* add cleanups for other bizarre flashes here... */
673 if (jffs2_nor_ecc(c)) {
674 jffs2_nor_ecc_flash_cleanup(c);
675 }
676
677 /* and DataFlash */ 690 /* and DataFlash */
678 if (jffs2_dataflash(c)) { 691 if (jffs2_dataflash(c)) {
679 jffs2_dataflash_cleanup(c); 692 jffs2_dataflash_cleanup(c);
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index f9ffece453a3..477c526d638b 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -125,6 +125,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
125 struct jffs2_eraseblock *jeb; 125 struct jffs2_eraseblock *jeb;
126 struct jffs2_raw_node_ref *raw; 126 struct jffs2_raw_node_ref *raw;
127 int ret = 0, inum, nlink; 127 int ret = 0, inum, nlink;
128 int xattr = 0;
128 129
129 if (down_interruptible(&c->alloc_sem)) 130 if (down_interruptible(&c->alloc_sem))
130 return -EINTR; 131 return -EINTR;
@@ -138,7 +139,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
138 the node CRCs etc. Do it now. */ 139 the node CRCs etc. Do it now. */
139 140
140 /* checked_ino is protected by the alloc_sem */ 141 /* checked_ino is protected by the alloc_sem */
141 if (c->checked_ino > c->highest_ino) { 142 if (c->checked_ino > c->highest_ino && xattr) {
142 printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n", 143 printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n",
143 c->unchecked_size); 144 c->unchecked_size);
144 jffs2_dbg_dump_block_lists_nolock(c); 145 jffs2_dbg_dump_block_lists_nolock(c);
@@ -148,6 +149,9 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
148 149
149 spin_unlock(&c->erase_completion_lock); 150 spin_unlock(&c->erase_completion_lock);
150 151
152 if (!xattr)
153 xattr = jffs2_verify_xattr(c);
154
151 spin_lock(&c->inocache_lock); 155 spin_lock(&c->inocache_lock);
152 156
153 ic = jffs2_get_ino_cache(c, c->checked_ino++); 157 ic = jffs2_get_ino_cache(c, c->checked_ino++);
@@ -181,6 +185,10 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
181 and trigger the BUG() above while we haven't yet 185 and trigger the BUG() above while we haven't yet
182 finished checking all its nodes */ 186 finished checking all its nodes */
183 D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino)); 187 D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino));
188 /* We need to come back again for the _same_ inode. We've
189 made no progress in this case, but that should be OK */
190 c->checked_ino--;
191
184 up(&c->alloc_sem); 192 up(&c->alloc_sem);
185 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); 193 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
186 return 0; 194 return 0;
@@ -231,7 +239,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
231 239
232 while(ref_obsolete(raw)) { 240 while(ref_obsolete(raw)) {
233 D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw))); 241 D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw)));
234 raw = raw->next_phys; 242 raw = ref_next(raw);
235 if (unlikely(!raw)) { 243 if (unlikely(!raw)) {
236 printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n"); 244 printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n");
237 printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n", 245 printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n",
@@ -248,16 +256,37 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
248 256
249 if (!raw->next_in_ino) { 257 if (!raw->next_in_ino) {
250 /* Inode-less node. Clean marker, snapshot or something like that */ 258 /* Inode-less node. Clean marker, snapshot or something like that */
251 /* FIXME: If it's something that needs to be copied, including something
252 we don't grok that has JFFS2_NODETYPE_RWCOMPAT_COPY, we should do so */
253 spin_unlock(&c->erase_completion_lock); 259 spin_unlock(&c->erase_completion_lock);
254 jffs2_mark_node_obsolete(c, raw); 260 if (ref_flags(raw) == REF_PRISTINE) {
261 /* It's an unknown node with JFFS2_FEATURE_RWCOMPAT_COPY */
262 jffs2_garbage_collect_pristine(c, NULL, raw);
263 } else {
264 /* Just mark it obsolete */
265 jffs2_mark_node_obsolete(c, raw);
266 }
255 up(&c->alloc_sem); 267 up(&c->alloc_sem);
256 goto eraseit_lock; 268 goto eraseit_lock;
257 } 269 }
258 270
259 ic = jffs2_raw_ref_to_ic(raw); 271 ic = jffs2_raw_ref_to_ic(raw);
260 272
273#ifdef CONFIG_JFFS2_FS_XATTR
274 /* When 'ic' refers xattr_datum/xattr_ref, this node is GCed as xattr.
275 * We can decide whether this node is inode or xattr by ic->class. */
276 if (ic->class == RAWNODE_CLASS_XATTR_DATUM
277 || ic->class == RAWNODE_CLASS_XATTR_REF) {
278 BUG_ON(raw->next_in_ino != (void *)ic);
279 spin_unlock(&c->erase_completion_lock);
280
281 if (ic->class == RAWNODE_CLASS_XATTR_DATUM) {
282 ret = jffs2_garbage_collect_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
283 } else {
284 ret = jffs2_garbage_collect_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
285 }
286 goto release_sem;
287 }
288#endif
289
261 /* We need to hold the inocache. Either the erase_completion_lock or 290 /* We need to hold the inocache. Either the erase_completion_lock or
262 the inocache_lock are sufficient; we trade down since the inocache_lock 291 the inocache_lock are sufficient; we trade down since the inocache_lock
263 causes less contention. */ 292 causes less contention. */
@@ -499,7 +528,6 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
499 struct jffs2_raw_node_ref *raw) 528 struct jffs2_raw_node_ref *raw)
500{ 529{
501 union jffs2_node_union *node; 530 union jffs2_node_union *node;
502 struct jffs2_raw_node_ref *nraw;
503 size_t retlen; 531 size_t retlen;
504 int ret; 532 int ret;
505 uint32_t phys_ofs, alloclen; 533 uint32_t phys_ofs, alloclen;
@@ -508,15 +536,16 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
508 536
509 D1(printk(KERN_DEBUG "Going to GC REF_PRISTINE node at 0x%08x\n", ref_offset(raw))); 537 D1(printk(KERN_DEBUG "Going to GC REF_PRISTINE node at 0x%08x\n", ref_offset(raw)));
510 538
511 rawlen = ref_totlen(c, c->gcblock, raw); 539 alloclen = rawlen = ref_totlen(c, c->gcblock, raw);
512 540
513 /* Ask for a small amount of space (or the totlen if smaller) because we 541 /* Ask for a small amount of space (or the totlen if smaller) because we
514 don't want to force wastage of the end of a block if splitting would 542 don't want to force wastage of the end of a block if splitting would
515 work. */ 543 work. */
516 ret = jffs2_reserve_space_gc(c, min_t(uint32_t, sizeof(struct jffs2_raw_inode) + 544 if (ic && alloclen > sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN)
517 JFFS2_MIN_DATA_LEN, rawlen), &phys_ofs, &alloclen, rawlen); 545 alloclen = sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN;
518 /* this is not the exact summary size of it, 546
519 it is only an upper estimation */ 547 ret = jffs2_reserve_space_gc(c, alloclen, &alloclen, rawlen);
548 /* 'rawlen' is not the exact summary size; it is only an upper estimation */
520 549
521 if (ret) 550 if (ret)
522 return ret; 551 return ret;
@@ -580,22 +609,17 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
580 } 609 }
581 break; 610 break;
582 default: 611 default:
583 printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n", 612 /* If it's inode-less, we don't _know_ what it is. Just copy it intact */
584 ref_offset(raw), je16_to_cpu(node->u.nodetype)); 613 if (ic) {
585 goto bail; 614 printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n",
586 } 615 ref_offset(raw), je16_to_cpu(node->u.nodetype));
587 616 goto bail;
588 nraw = jffs2_alloc_raw_node_ref(); 617 }
589 if (!nraw) {
590 ret = -ENOMEM;
591 goto out_node;
592 } 618 }
593 619
594 /* OK, all the CRCs are good; this node can just be copied as-is. */ 620 /* OK, all the CRCs are good; this node can just be copied as-is. */
595 retry: 621 retry:
596 nraw->flash_offset = phys_ofs; 622 phys_ofs = write_ofs(c);
597 nraw->__totlen = rawlen;
598 nraw->next_phys = NULL;
599 623
600 ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node); 624 ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node);
601 625
@@ -603,17 +627,11 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
603 printk(KERN_NOTICE "Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n", 627 printk(KERN_NOTICE "Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n",
604 rawlen, phys_ofs, ret, retlen); 628 rawlen, phys_ofs, ret, retlen);
605 if (retlen) { 629 if (retlen) {
606 /* Doesn't belong to any inode */ 630 jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, rawlen, NULL);
607 nraw->next_in_ino = NULL;
608
609 nraw->flash_offset |= REF_OBSOLETE;
610 jffs2_add_physical_node_ref(c, nraw);
611 jffs2_mark_node_obsolete(c, nraw);
612 } else { 631 } else {
613 printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", nraw->flash_offset); 632 printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", phys_ofs);
614 jffs2_free_raw_node_ref(nraw);
615 } 633 }
616 if (!retried && (nraw = jffs2_alloc_raw_node_ref())) { 634 if (!retried) {
617 /* Try to reallocate space and retry */ 635 /* Try to reallocate space and retry */
618 uint32_t dummy; 636 uint32_t dummy;
619 struct jffs2_eraseblock *jeb = &c->blocks[phys_ofs / c->sector_size]; 637 struct jffs2_eraseblock *jeb = &c->blocks[phys_ofs / c->sector_size];
@@ -625,7 +643,7 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
625 jffs2_dbg_acct_sanity_check(c,jeb); 643 jffs2_dbg_acct_sanity_check(c,jeb);
626 jffs2_dbg_acct_paranoia_check(c, jeb); 644 jffs2_dbg_acct_paranoia_check(c, jeb);
627 645
628 ret = jffs2_reserve_space_gc(c, rawlen, &phys_ofs, &dummy, rawlen); 646 ret = jffs2_reserve_space_gc(c, rawlen, &dummy, rawlen);
629 /* this is not the exact summary size of it, 647 /* this is not the exact summary size of it,
630 it is only an upper estimation */ 648 it is only an upper estimation */
631 649
@@ -638,25 +656,13 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
638 goto retry; 656 goto retry;
639 } 657 }
640 D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); 658 D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
641 jffs2_free_raw_node_ref(nraw);
642 } 659 }
643 660
644 jffs2_free_raw_node_ref(nraw);
645 if (!ret) 661 if (!ret)
646 ret = -EIO; 662 ret = -EIO;
647 goto out_node; 663 goto out_node;
648 } 664 }
649 nraw->flash_offset |= REF_PRISTINE; 665 jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic);
650 jffs2_add_physical_node_ref(c, nraw);
651
652 /* Link into per-inode list. This is safe because of the ic
653 state being INO_STATE_GC. Note that if we're doing this
654 for an inode which is in-core, the 'nraw' pointer is then
655 going to be fetched from ic->nodes by our caller. */
656 spin_lock(&c->erase_completion_lock);
657 nraw->next_in_ino = ic->nodes;
658 ic->nodes = nraw;
659 spin_unlock(&c->erase_completion_lock);
660 666
661 jffs2_mark_node_obsolete(c, raw); 667 jffs2_mark_node_obsolete(c, raw);
662 D1(printk(KERN_DEBUG "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", ref_offset(raw))); 668 D1(printk(KERN_DEBUG "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", ref_offset(raw)));
@@ -675,19 +681,16 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
675 struct jffs2_full_dnode *new_fn; 681 struct jffs2_full_dnode *new_fn;
676 struct jffs2_raw_inode ri; 682 struct jffs2_raw_inode ri;
677 struct jffs2_node_frag *last_frag; 683 struct jffs2_node_frag *last_frag;
678 jint16_t dev; 684 union jffs2_device_node dev;
679 char *mdata = NULL, mdatalen = 0; 685 char *mdata = NULL, mdatalen = 0;
680 uint32_t alloclen, phys_ofs, ilen; 686 uint32_t alloclen, ilen;
681 int ret; 687 int ret;
682 688
683 if (S_ISBLK(JFFS2_F_I_MODE(f)) || 689 if (S_ISBLK(JFFS2_F_I_MODE(f)) ||
684 S_ISCHR(JFFS2_F_I_MODE(f)) ) { 690 S_ISCHR(JFFS2_F_I_MODE(f)) ) {
685 /* For these, we don't actually need to read the old node */ 691 /* For these, we don't actually need to read the old node */
686 /* FIXME: for minor or major > 255. */ 692 mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f));
687 dev = cpu_to_je16(((JFFS2_F_I_RDEV_MAJ(f) << 8) |
688 JFFS2_F_I_RDEV_MIN(f)));
689 mdata = (char *)&dev; 693 mdata = (char *)&dev;
690 mdatalen = sizeof(dev);
691 D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bytes of kdev_t\n", mdatalen)); 694 D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bytes of kdev_t\n", mdatalen));
692 } else if (S_ISLNK(JFFS2_F_I_MODE(f))) { 695 } else if (S_ISLNK(JFFS2_F_I_MODE(f))) {
693 mdatalen = fn->size; 696 mdatalen = fn->size;
@@ -706,7 +709,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
706 709
707 } 710 }
708 711
709 ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &phys_ofs, &alloclen, 712 ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &alloclen,
710 JFFS2_SUMMARY_INODE_SIZE); 713 JFFS2_SUMMARY_INODE_SIZE);
711 if (ret) { 714 if (ret) {
712 printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n", 715 printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n",
@@ -744,7 +747,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
744 ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); 747 ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
745 ri.data_crc = cpu_to_je32(crc32(0, mdata, mdatalen)); 748 ri.data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
746 749
747 new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, phys_ofs, ALLOC_GC); 750 new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, ALLOC_GC);
748 751
749 if (IS_ERR(new_fn)) { 752 if (IS_ERR(new_fn)) {
750 printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn)); 753 printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn));
@@ -765,7 +768,7 @@ static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_er
765{ 768{
766 struct jffs2_full_dirent *new_fd; 769 struct jffs2_full_dirent *new_fd;
767 struct jffs2_raw_dirent rd; 770 struct jffs2_raw_dirent rd;
768 uint32_t alloclen, phys_ofs; 771 uint32_t alloclen;
769 int ret; 772 int ret;
770 773
771 rd.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); 774 rd.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -787,14 +790,14 @@ static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_er
787 rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8)); 790 rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8));
788 rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize)); 791 rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize));
789 792
790 ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &phys_ofs, &alloclen, 793 ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &alloclen,
791 JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize)); 794 JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize));
792 if (ret) { 795 if (ret) {
793 printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n", 796 printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n",
794 sizeof(rd)+rd.nsize, ret); 797 sizeof(rd)+rd.nsize, ret);
795 return ret; 798 return ret;
796 } 799 }
797 new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, phys_ofs, ALLOC_GC); 800 new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, ALLOC_GC);
798 801
799 if (IS_ERR(new_fd)) { 802 if (IS_ERR(new_fd)) {
800 printk(KERN_WARNING "jffs2_write_dirent in garbage_collect_dirent failed: %ld\n", PTR_ERR(new_fd)); 803 printk(KERN_WARNING "jffs2_write_dirent in garbage_collect_dirent failed: %ld\n", PTR_ERR(new_fd));
@@ -922,7 +925,7 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
922 struct jffs2_raw_inode ri; 925 struct jffs2_raw_inode ri;
923 struct jffs2_node_frag *frag; 926 struct jffs2_node_frag *frag;
924 struct jffs2_full_dnode *new_fn; 927 struct jffs2_full_dnode *new_fn;
925 uint32_t alloclen, phys_ofs, ilen; 928 uint32_t alloclen, ilen;
926 int ret; 929 int ret;
927 930
928 D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n", 931 D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n",
@@ -1001,14 +1004,14 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras
1001 ri.data_crc = cpu_to_je32(0); 1004 ri.data_crc = cpu_to_je32(0);
1002 ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); 1005 ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
1003 1006
1004 ret = jffs2_reserve_space_gc(c, sizeof(ri), &phys_ofs, &alloclen, 1007 ret = jffs2_reserve_space_gc(c, sizeof(ri), &alloclen,
1005 JFFS2_SUMMARY_INODE_SIZE); 1008 JFFS2_SUMMARY_INODE_SIZE);
1006 if (ret) { 1009 if (ret) {
1007 printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n", 1010 printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n",
1008 sizeof(ri), ret); 1011 sizeof(ri), ret);
1009 return ret; 1012 return ret;
1010 } 1013 }
1011 new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, ALLOC_GC); 1014 new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_GC);
1012 1015
1013 if (IS_ERR(new_fn)) { 1016 if (IS_ERR(new_fn)) {
1014 printk(KERN_WARNING "Error writing new hole node: %ld\n", PTR_ERR(new_fn)); 1017 printk(KERN_WARNING "Error writing new hole node: %ld\n", PTR_ERR(new_fn));
@@ -1070,7 +1073,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1070{ 1073{
1071 struct jffs2_full_dnode *new_fn; 1074 struct jffs2_full_dnode *new_fn;
1072 struct jffs2_raw_inode ri; 1075 struct jffs2_raw_inode ri;
1073 uint32_t alloclen, phys_ofs, offset, orig_end, orig_start; 1076 uint32_t alloclen, offset, orig_end, orig_start;
1074 int ret = 0; 1077 int ret = 0;
1075 unsigned char *comprbuf = NULL, *writebuf; 1078 unsigned char *comprbuf = NULL, *writebuf;
1076 unsigned long pg; 1079 unsigned long pg;
@@ -1227,7 +1230,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1227 uint32_t cdatalen; 1230 uint32_t cdatalen;
1228 uint16_t comprtype = JFFS2_COMPR_NONE; 1231 uint16_t comprtype = JFFS2_COMPR_NONE;
1229 1232
1230 ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, 1233 ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN,
1231 &alloclen, JFFS2_SUMMARY_INODE_SIZE); 1234 &alloclen, JFFS2_SUMMARY_INODE_SIZE);
1232 1235
1233 if (ret) { 1236 if (ret) {
@@ -1264,7 +1267,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
1264 ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); 1267 ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
1265 ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen)); 1268 ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen));
1266 1269
1267 new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, phys_ofs, ALLOC_GC); 1270 new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, ALLOC_GC);
1268 1271
1269 jffs2_free_comprbuf(comprbuf, writebuf); 1272 jffs2_free_comprbuf(comprbuf, writebuf);
1270 1273
diff --git a/fs/jffs2/histo.h b/fs/jffs2/histo.h
deleted file mode 100644
index 22a93a08210c..000000000000
--- a/fs/jffs2/histo.h
+++ /dev/null
@@ -1,3 +0,0 @@
1/* This file provides the bit-probabilities for the input file */
2#define BIT_DIVIDER 629
3static int bits[9] = { 179,167,183,165,159,198,178,119,}; /* ia32 .so files */
diff --git a/include/linux/jffs2_fs_i.h b/fs/jffs2/jffs2_fs_i.h
index ad565bf9dcc1..2e0cc8e00b85 100644
--- a/include/linux/jffs2_fs_i.h
+++ b/fs/jffs2/jffs2_fs_i.h
@@ -5,6 +5,7 @@
5 5
6#include <linux/version.h> 6#include <linux/version.h>
7#include <linux/rbtree.h> 7#include <linux/rbtree.h>
8#include <linux/posix_acl.h>
8#include <asm/semaphore.h> 9#include <asm/semaphore.h>
9 10
10struct jffs2_inode_info { 11struct jffs2_inode_info {
@@ -45,6 +46,10 @@ struct jffs2_inode_info {
45 struct inode vfs_inode; 46 struct inode vfs_inode;
46#endif 47#endif
47#endif 48#endif
49#ifdef CONFIG_JFFS2_FS_POSIX_ACL
50 struct posix_acl *i_acl_access;
51 struct posix_acl *i_acl_default;
52#endif
48}; 53};
49 54
50#endif /* _JFFS2_FS_I */ 55#endif /* _JFFS2_FS_I */
diff --git a/include/linux/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
index 4bcfb5570221..935fec1b1201 100644
--- a/include/linux/jffs2_fs_sb.h
+++ b/fs/jffs2/jffs2_fs_sb.h
@@ -100,6 +100,7 @@ struct jffs2_sb_info {
100#ifdef CONFIG_JFFS2_FS_WRITEBUFFER 100#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
101 /* Write-behind buffer for NAND flash */ 101 /* Write-behind buffer for NAND flash */
102 unsigned char *wbuf; 102 unsigned char *wbuf;
103 unsigned char *oobbuf;
103 uint32_t wbuf_ofs; 104 uint32_t wbuf_ofs;
104 uint32_t wbuf_len; 105 uint32_t wbuf_len;
105 struct jffs2_inodirty *wbuf_inodes; 106 struct jffs2_inodirty *wbuf_inodes;
@@ -107,7 +108,7 @@ struct jffs2_sb_info {
107 struct rw_semaphore wbuf_sem; /* Protects the write buffer */ 108 struct rw_semaphore wbuf_sem; /* Protects the write buffer */
108 109
109 /* Information about out-of-band area usage... */ 110 /* Information about out-of-band area usage... */
110 struct nand_oobinfo *oobinfo; 111 struct nand_ecclayout *ecclayout;
111 uint32_t badblock_pos; 112 uint32_t badblock_pos;
112 uint32_t fsdata_pos; 113 uint32_t fsdata_pos;
113 uint32_t fsdata_len; 114 uint32_t fsdata_len;
@@ -115,6 +116,16 @@ struct jffs2_sb_info {
115 116
116 struct jffs2_summary *summary; /* Summary information */ 117 struct jffs2_summary *summary; /* Summary information */
117 118
119#ifdef CONFIG_JFFS2_FS_XATTR
120#define XATTRINDEX_HASHSIZE (57)
121 uint32_t highest_xid;
122 struct list_head xattrindex[XATTRINDEX_HASHSIZE];
123 struct list_head xattr_unchecked;
124 struct jffs2_xattr_ref *xref_temp;
125 struct rw_semaphore xattr_sem;
126 uint32_t xdatum_mem_usage;
127 uint32_t xdatum_mem_threshold;
128#endif
118 /* OS-private pointer for getting back to master superblock info */ 129 /* OS-private pointer for getting back to master superblock info */
119 void *os_priv; 130 void *os_priv;
120}; 131};
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c
index 036cbd11c004..4889d0700c0e 100644
--- a/fs/jffs2/malloc.c
+++ b/fs/jffs2/malloc.c
@@ -26,6 +26,10 @@ static kmem_cache_t *tmp_dnode_info_slab;
26static kmem_cache_t *raw_node_ref_slab; 26static kmem_cache_t *raw_node_ref_slab;
27static kmem_cache_t *node_frag_slab; 27static kmem_cache_t *node_frag_slab;
28static kmem_cache_t *inode_cache_slab; 28static kmem_cache_t *inode_cache_slab;
29#ifdef CONFIG_JFFS2_FS_XATTR
30static kmem_cache_t *xattr_datum_cache;
31static kmem_cache_t *xattr_ref_cache;
32#endif
29 33
30int __init jffs2_create_slab_caches(void) 34int __init jffs2_create_slab_caches(void)
31{ 35{
@@ -53,8 +57,8 @@ int __init jffs2_create_slab_caches(void)
53 if (!tmp_dnode_info_slab) 57 if (!tmp_dnode_info_slab)
54 goto err; 58 goto err;
55 59
56 raw_node_ref_slab = kmem_cache_create("jffs2_raw_node_ref", 60 raw_node_ref_slab = kmem_cache_create("jffs2_refblock",
57 sizeof(struct jffs2_raw_node_ref), 61 sizeof(struct jffs2_raw_node_ref) * (REFS_PER_BLOCK + 1),
58 0, 0, NULL, NULL); 62 0, 0, NULL, NULL);
59 if (!raw_node_ref_slab) 63 if (!raw_node_ref_slab)
60 goto err; 64 goto err;
@@ -68,8 +72,24 @@ int __init jffs2_create_slab_caches(void)
68 inode_cache_slab = kmem_cache_create("jffs2_inode_cache", 72 inode_cache_slab = kmem_cache_create("jffs2_inode_cache",
69 sizeof(struct jffs2_inode_cache), 73 sizeof(struct jffs2_inode_cache),
70 0, 0, NULL, NULL); 74 0, 0, NULL, NULL);
71 if (inode_cache_slab) 75 if (!inode_cache_slab)
72 return 0; 76 goto err;
77
78#ifdef CONFIG_JFFS2_FS_XATTR
79 xattr_datum_cache = kmem_cache_create("jffs2_xattr_datum",
80 sizeof(struct jffs2_xattr_datum),
81 0, 0, NULL, NULL);
82 if (!xattr_datum_cache)
83 goto err;
84
85 xattr_ref_cache = kmem_cache_create("jffs2_xattr_ref",
86 sizeof(struct jffs2_xattr_ref),
87 0, 0, NULL, NULL);
88 if (!xattr_ref_cache)
89 goto err;
90#endif
91
92 return 0;
73 err: 93 err:
74 jffs2_destroy_slab_caches(); 94 jffs2_destroy_slab_caches();
75 return -ENOMEM; 95 return -ENOMEM;
@@ -91,6 +111,12 @@ void jffs2_destroy_slab_caches(void)
91 kmem_cache_destroy(node_frag_slab); 111 kmem_cache_destroy(node_frag_slab);
92 if(inode_cache_slab) 112 if(inode_cache_slab)
93 kmem_cache_destroy(inode_cache_slab); 113 kmem_cache_destroy(inode_cache_slab);
114#ifdef CONFIG_JFFS2_FS_XATTR
115 if (xattr_datum_cache)
116 kmem_cache_destroy(xattr_datum_cache);
117 if (xattr_ref_cache)
118 kmem_cache_destroy(xattr_ref_cache);
119#endif
94} 120}
95 121
96struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize) 122struct jffs2_full_dirent *jffs2_alloc_full_dirent(int namesize)
@@ -164,15 +190,65 @@ void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *x)
164 kmem_cache_free(tmp_dnode_info_slab, x); 190 kmem_cache_free(tmp_dnode_info_slab, x);
165} 191}
166 192
167struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void) 193struct jffs2_raw_node_ref *jffs2_alloc_refblock(void)
168{ 194{
169 struct jffs2_raw_node_ref *ret; 195 struct jffs2_raw_node_ref *ret;
196
170 ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL); 197 ret = kmem_cache_alloc(raw_node_ref_slab, GFP_KERNEL);
171 dbg_memalloc("%p\n", ret); 198 if (ret) {
199 int i = 0;
200 for (i=0; i < REFS_PER_BLOCK; i++) {
201 ret[i].flash_offset = REF_EMPTY_NODE;
202 ret[i].next_in_ino = NULL;
203 }
204 ret[i].flash_offset = REF_LINK_NODE;
205 ret[i].next_in_ino = NULL;
206 }
172 return ret; 207 return ret;
173} 208}
174 209
175void jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *x) 210int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c,
211 struct jffs2_eraseblock *jeb, int nr)
212{
213 struct jffs2_raw_node_ref **p, *ref;
214 int i = nr;
215
216 dbg_memalloc("%d\n", nr);
217
218 p = &jeb->last_node;
219 ref = *p;
220
221 dbg_memalloc("Reserving %d refs for block @0x%08x\n", nr, jeb->offset);
222
223 /* If jeb->last_node is really a valid node then skip over it */
224 if (ref && ref->flash_offset != REF_EMPTY_NODE)
225 ref++;
226
227 while (i) {
228 if (!ref) {
229 dbg_memalloc("Allocating new refblock linked from %p\n", p);
230 ref = *p = jffs2_alloc_refblock();
231 if (!ref)
232 return -ENOMEM;
233 }
234 if (ref->flash_offset == REF_LINK_NODE) {
235 p = &ref->next_in_ino;
236 ref = *p;
237 continue;
238 }
239 i--;
240 ref++;
241 }
242 jeb->allocated_refs = nr;
243
244 dbg_memalloc("Reserved %d refs for block @0x%08x, last_node is %p (%08x,%p)\n",
245 nr, jeb->offset, jeb->last_node, jeb->last_node->flash_offset,
246 jeb->last_node->next_in_ino);
247
248 return 0;
249}
250
251void jffs2_free_refblock(struct jffs2_raw_node_ref *x)
176{ 252{
177 dbg_memalloc("%p\n", x); 253 dbg_memalloc("%p\n", x);
178 kmem_cache_free(raw_node_ref_slab, x); 254 kmem_cache_free(raw_node_ref_slab, x);
@@ -205,3 +281,40 @@ void jffs2_free_inode_cache(struct jffs2_inode_cache *x)
205 dbg_memalloc("%p\n", x); 281 dbg_memalloc("%p\n", x);
206 kmem_cache_free(inode_cache_slab, x); 282 kmem_cache_free(inode_cache_slab, x);
207} 283}
284
285#ifdef CONFIG_JFFS2_FS_XATTR
286struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void)
287{
288 struct jffs2_xattr_datum *xd;
289 xd = kmem_cache_alloc(xattr_datum_cache, GFP_KERNEL);
290 dbg_memalloc("%p\n", xd);
291
292 memset(xd, 0, sizeof(struct jffs2_xattr_datum));
293 xd->class = RAWNODE_CLASS_XATTR_DATUM;
294 INIT_LIST_HEAD(&xd->xindex);
295 return xd;
296}
297
298void jffs2_free_xattr_datum(struct jffs2_xattr_datum *xd)
299{
300 dbg_memalloc("%p\n", xd);
301 kmem_cache_free(xattr_datum_cache, xd);
302}
303
304struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void)
305{
306 struct jffs2_xattr_ref *ref;
307 ref = kmem_cache_alloc(xattr_ref_cache, GFP_KERNEL);
308 dbg_memalloc("%p\n", ref);
309
310 memset(ref, 0, sizeof(struct jffs2_xattr_ref));
311 ref->class = RAWNODE_CLASS_XATTR_REF;
312 return ref;
313}
314
315void jffs2_free_xattr_ref(struct jffs2_xattr_ref *ref)
316{
317 dbg_memalloc("%p\n", ref);
318 kmem_cache_free(xattr_ref_cache, ref);
319}
320#endif
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index 1d46677afd17..927dfe42ba76 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -438,8 +438,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
438 if (c->mtd->point) { 438 if (c->mtd->point) {
439 err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer); 439 err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer);
440 if (!err && retlen < tn->csize) { 440 if (!err && retlen < tn->csize) {
441 JFFS2_WARNING("MTD point returned len too short: %zu " 441 JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize);
442 "instead of %u.\n", retlen, tn->csize);
443 c->mtd->unpoint(c->mtd, buffer, ofs, len); 442 c->mtd->unpoint(c->mtd, buffer, ofs, len);
444 } else if (err) 443 } else if (err)
445 JFFS2_WARNING("MTD point failed: error code %d.\n", err); 444 JFFS2_WARNING("MTD point failed: error code %d.\n", err);
@@ -462,8 +461,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
462 } 461 }
463 462
464 if (retlen != len) { 463 if (retlen != len) {
465 JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", 464 JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", ofs, retlen, len);
466 ofs, retlen, len);
467 err = -EIO; 465 err = -EIO;
468 goto free_out; 466 goto free_out;
469 } 467 }
@@ -940,6 +938,7 @@ void jffs2_free_ino_caches(struct jffs2_sb_info *c)
940 this = c->inocache_list[i]; 938 this = c->inocache_list[i];
941 while (this) { 939 while (this) {
942 next = this->next; 940 next = this->next;
941 jffs2_xattr_free_inode(c, this);
943 jffs2_free_inode_cache(this); 942 jffs2_free_inode_cache(this);
944 this = next; 943 this = next;
945 } 944 }
@@ -954,9 +953,13 @@ void jffs2_free_raw_node_refs(struct jffs2_sb_info *c)
954 953
955 for (i=0; i<c->nr_blocks; i++) { 954 for (i=0; i<c->nr_blocks; i++) {
956 this = c->blocks[i].first_node; 955 this = c->blocks[i].first_node;
957 while(this) { 956 while (this) {
958 next = this->next_phys; 957 if (this[REFS_PER_BLOCK].flash_offset == REF_LINK_NODE)
959 jffs2_free_raw_node_ref(this); 958 next = this[REFS_PER_BLOCK].next_in_ino;
959 else
960 next = NULL;
961
962 jffs2_free_refblock(this);
960 this = next; 963 this = next;
961 } 964 }
962 c->blocks[i].first_node = c->blocks[i].last_node = NULL; 965 c->blocks[i].first_node = c->blocks[i].last_node = NULL;
@@ -1047,3 +1050,169 @@ void jffs2_kill_fragtree(struct rb_root *root, struct jffs2_sb_info *c)
1047 cond_resched(); 1050 cond_resched();
1048 } 1051 }
1049} 1052}
1053
1054struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c,
1055 struct jffs2_eraseblock *jeb,
1056 uint32_t ofs, uint32_t len,
1057 struct jffs2_inode_cache *ic)
1058{
1059 struct jffs2_raw_node_ref *ref;
1060
1061 BUG_ON(!jeb->allocated_refs);
1062 jeb->allocated_refs--;
1063
1064 ref = jeb->last_node;
1065
1066 dbg_noderef("Last node at %p is (%08x,%p)\n", ref, ref->flash_offset,
1067 ref->next_in_ino);
1068
1069 while (ref->flash_offset != REF_EMPTY_NODE) {
1070 if (ref->flash_offset == REF_LINK_NODE)
1071 ref = ref->next_in_ino;
1072 else
1073 ref++;
1074 }
1075
1076 dbg_noderef("New ref is %p (%08x becomes %08x,%p) len 0x%x\n", ref,
1077 ref->flash_offset, ofs, ref->next_in_ino, len);
1078
1079 ref->flash_offset = ofs;
1080
1081 if (!jeb->first_node) {
1082 jeb->first_node = ref;
1083 BUG_ON(ref_offset(ref) != jeb->offset);
1084 } else if (unlikely(ref_offset(ref) != jeb->offset + c->sector_size - jeb->free_size)) {
1085 uint32_t last_len = ref_totlen(c, jeb, jeb->last_node);
1086
1087 JFFS2_ERROR("Adding new ref %p at (0x%08x-0x%08x) not immediately after previous (0x%08x-0x%08x)\n",
1088 ref, ref_offset(ref), ref_offset(ref)+len,
1089 ref_offset(jeb->last_node),
1090 ref_offset(jeb->last_node)+last_len);
1091 BUG();
1092 }
1093 jeb->last_node = ref;
1094
1095 if (ic) {
1096 ref->next_in_ino = ic->nodes;
1097 ic->nodes = ref;
1098 } else {
1099 ref->next_in_ino = NULL;
1100 }
1101
1102 switch(ref_flags(ref)) {
1103 case REF_UNCHECKED:
1104 c->unchecked_size += len;
1105 jeb->unchecked_size += len;
1106 break;
1107
1108 case REF_NORMAL:
1109 case REF_PRISTINE:
1110 c->used_size += len;
1111 jeb->used_size += len;
1112 break;
1113
1114 case REF_OBSOLETE:
1115 c->dirty_size += len;
1116 jeb->dirty_size += len;
1117 break;
1118 }
1119 c->free_size -= len;
1120 jeb->free_size -= len;
1121
1122#ifdef TEST_TOTLEN
1123 /* Set (and test) __totlen field... for now */
1124 ref->__totlen = len;
1125 ref_totlen(c, jeb, ref);
1126#endif
1127 return ref;
1128}
1129
1130/* No locking, no reservation of 'ref'. Do not use on a live file system */
1131int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
1132 uint32_t size)
1133{
1134 if (!size)
1135 return 0;
1136 if (unlikely(size > jeb->free_size)) {
1137 printk(KERN_CRIT "Dirty space 0x%x larger then free_size 0x%x (wasted 0x%x)\n",
1138 size, jeb->free_size, jeb->wasted_size);
1139 BUG();
1140 }
1141 /* REF_EMPTY_NODE is !obsolete, so that works OK */
1142 if (jeb->last_node && ref_obsolete(jeb->last_node)) {
1143#ifdef TEST_TOTLEN
1144 jeb->last_node->__totlen += size;
1145#endif
1146 c->dirty_size += size;
1147 c->free_size -= size;
1148 jeb->dirty_size += size;
1149 jeb->free_size -= size;
1150 } else {
1151 uint32_t ofs = jeb->offset + c->sector_size - jeb->free_size;
1152 ofs |= REF_OBSOLETE;
1153
1154 jffs2_link_node_ref(c, jeb, ofs, size, NULL);
1155 }
1156
1157 return 0;
1158}
1159
1160/* Calculate totlen from surrounding nodes or eraseblock */
1161static inline uint32_t __ref_totlen(struct jffs2_sb_info *c,
1162 struct jffs2_eraseblock *jeb,
1163 struct jffs2_raw_node_ref *ref)
1164{
1165 uint32_t ref_end;
1166 struct jffs2_raw_node_ref *next_ref = ref_next(ref);
1167
1168 if (next_ref)
1169 ref_end = ref_offset(next_ref);
1170 else {
1171 if (!jeb)
1172 jeb = &c->blocks[ref->flash_offset / c->sector_size];
1173
1174 /* Last node in block. Use free_space */
1175 if (unlikely(ref != jeb->last_node)) {
1176 printk(KERN_CRIT "ref %p @0x%08x is not jeb->last_node (%p @0x%08x)\n",
1177 ref, ref_offset(ref), jeb->last_node, jeb->last_node?ref_offset(jeb->last_node):0);
1178 BUG();
1179 }
1180 ref_end = jeb->offset + c->sector_size - jeb->free_size;
1181 }
1182 return ref_end - ref_offset(ref);
1183}
1184
1185uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
1186 struct jffs2_raw_node_ref *ref)
1187{
1188 uint32_t ret;
1189
1190 ret = __ref_totlen(c, jeb, ref);
1191
1192#ifdef TEST_TOTLEN
1193 if (unlikely(ret != ref->__totlen)) {
1194 if (!jeb)
1195 jeb = &c->blocks[ref->flash_offset / c->sector_size];
1196
1197 printk(KERN_CRIT "Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n",
1198 ref, ref_offset(ref), ref_offset(ref)+ref->__totlen,
1199 ret, ref->__totlen);
1200 if (ref_next(ref)) {
1201 printk(KERN_CRIT "next %p (0x%08x-0x%08x)\n", ref_next(ref), ref_offset(ref_next(ref)),
1202 ref_offset(ref_next(ref))+ref->__totlen);
1203 } else
1204 printk(KERN_CRIT "No next ref. jeb->last_node is %p\n", jeb->last_node);
1205
1206 printk(KERN_CRIT "jeb->wasted_size %x, dirty_size %x, used_size %x, free_size %x\n", jeb->wasted_size, jeb->dirty_size, jeb->used_size, jeb->free_size);
1207
1208#if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS)
1209 __jffs2_dbg_dump_node_refs_nolock(c, jeb);
1210#endif
1211
1212 WARN_ON(1);
1213
1214 ret = ref->__totlen;
1215 }
1216#endif /* TEST_TOTLEN */
1217 return ret;
1218}
diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
index 23a67bb3052f..7ad8ee043880 100644
--- a/fs/jffs2/nodelist.h
+++ b/fs/jffs2/nodelist.h
@@ -18,8 +18,10 @@
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/types.h> 19#include <linux/types.h>
20#include <linux/jffs2.h> 20#include <linux/jffs2.h>
21#include <linux/jffs2_fs_sb.h> 21#include "jffs2_fs_sb.h"
22#include <linux/jffs2_fs_i.h> 22#include "jffs2_fs_i.h"
23#include "xattr.h"
24#include "acl.h"
23#include "summary.h" 25#include "summary.h"
24 26
25#ifdef __ECOS 27#ifdef __ECOS
@@ -75,14 +77,50 @@
75struct jffs2_raw_node_ref 77struct jffs2_raw_node_ref
76{ 78{
77 struct jffs2_raw_node_ref *next_in_ino; /* Points to the next raw_node_ref 79 struct jffs2_raw_node_ref *next_in_ino; /* Points to the next raw_node_ref
78 for this inode. If this is the last, it points to the inode_cache 80 for this object. If this _is_ the last, it points to the inode_cache,
79 for this inode instead. The inode_cache will have NULL in the first 81 xattr_ref or xattr_datum instead. The common part of those structures
80 word so you know when you've got there :) */ 82 has NULL in the first word. See jffs2_raw_ref_to_ic() below */
81 struct jffs2_raw_node_ref *next_phys;
82 uint32_t flash_offset; 83 uint32_t flash_offset;
84#define TEST_TOTLEN
85#ifdef TEST_TOTLEN
83 uint32_t __totlen; /* This may die; use ref_totlen(c, jeb, ) below */ 86 uint32_t __totlen; /* This may die; use ref_totlen(c, jeb, ) below */
87#endif
84}; 88};
85 89
90#define REF_LINK_NODE ((int32_t)-1)
91#define REF_EMPTY_NODE ((int32_t)-2)
92
93/* Use blocks of about 256 bytes */
94#define REFS_PER_BLOCK ((255/sizeof(struct jffs2_raw_node_ref))-1)
95
96static inline struct jffs2_raw_node_ref *ref_next(struct jffs2_raw_node_ref *ref)
97{
98 ref++;
99
100 /* Link to another block of refs */
101 if (ref->flash_offset == REF_LINK_NODE) {
102 ref = ref->next_in_ino;
103 if (!ref)
104 return ref;
105 }
106
107 /* End of chain */
108 if (ref->flash_offset == REF_EMPTY_NODE)
109 return NULL;
110
111 return ref;
112}
113
114static inline struct jffs2_inode_cache *jffs2_raw_ref_to_ic(struct jffs2_raw_node_ref *raw)
115{
116 while(raw->next_in_ino)
117 raw = raw->next_in_ino;
118
119 /* NB. This can be a jffs2_xattr_datum or jffs2_xattr_ref and
120 not actually a jffs2_inode_cache. Check ->class */
121 return ((struct jffs2_inode_cache *)raw);
122}
123
86 /* flash_offset & 3 always has to be zero, because nodes are 124 /* flash_offset & 3 always has to be zero, because nodes are
87 always aligned at 4 bytes. So we have a couple of extra bits 125 always aligned at 4 bytes. So we have a couple of extra bits
88 to play with, which indicate the node's status; see below: */ 126 to play with, which indicate the node's status; see below: */
@@ -95,6 +133,11 @@ struct jffs2_raw_node_ref
95#define ref_obsolete(ref) (((ref)->flash_offset & 3) == REF_OBSOLETE) 133#define ref_obsolete(ref) (((ref)->flash_offset & 3) == REF_OBSOLETE)
96#define mark_ref_normal(ref) do { (ref)->flash_offset = ref_offset(ref) | REF_NORMAL; } while(0) 134#define mark_ref_normal(ref) do { (ref)->flash_offset = ref_offset(ref) | REF_NORMAL; } while(0)
97 135
136/* NB: REF_PRISTINE for an inode-less node (ref->next_in_ino == NULL) indicates
137 it is an unknown node of type JFFS2_NODETYPE_RWCOMPAT_COPY, so it'll get
138 copied. If you need to do anything different to GC inode-less nodes, then
139 you need to modify gc.c accordingly. */
140
98/* For each inode in the filesystem, we need to keep a record of 141/* For each inode in the filesystem, we need to keep a record of
99 nlink, because it would be a PITA to scan the whole directory tree 142 nlink, because it would be a PITA to scan the whole directory tree
100 at read_inode() time to calculate it, and to keep sufficient information 143 at read_inode() time to calculate it, and to keep sufficient information
@@ -103,15 +146,27 @@ struct jffs2_raw_node_ref
103 a pointer to the first physical node which is part of this inode, too. 146 a pointer to the first physical node which is part of this inode, too.
104*/ 147*/
105struct jffs2_inode_cache { 148struct jffs2_inode_cache {
149 /* First part of structure is shared with other objects which
150 can terminate the raw node refs' next_in_ino list -- which
151 currently struct jffs2_xattr_datum and struct jffs2_xattr_ref. */
152
106 struct jffs2_full_dirent *scan_dents; /* Used during scan to hold 153 struct jffs2_full_dirent *scan_dents; /* Used during scan to hold
107 temporary lists of dirents, and later must be set to 154 temporary lists of dirents, and later must be set to
108 NULL to mark the end of the raw_node_ref->next_in_ino 155 NULL to mark the end of the raw_node_ref->next_in_ino
109 chain. */ 156 chain. */
110 struct jffs2_inode_cache *next;
111 struct jffs2_raw_node_ref *nodes; 157 struct jffs2_raw_node_ref *nodes;
158 uint8_t class; /* It's used for identification */
159
160 /* end of shared structure */
161
162 uint8_t flags;
163 uint16_t state;
112 uint32_t ino; 164 uint32_t ino;
165 struct jffs2_inode_cache *next;
166#ifdef CONFIG_JFFS2_FS_XATTR
167 struct jffs2_xattr_ref *xref;
168#endif
113 int nlink; 169 int nlink;
114 int state;
115}; 170};
116 171
117/* Inode states for 'state' above. We need the 'GC' state to prevent 172/* Inode states for 'state' above. We need the 'GC' state to prevent
@@ -125,8 +180,16 @@ struct jffs2_inode_cache {
125#define INO_STATE_READING 5 /* In read_inode() */ 180#define INO_STATE_READING 5 /* In read_inode() */
126#define INO_STATE_CLEARING 6 /* In clear_inode() */ 181#define INO_STATE_CLEARING 6 /* In clear_inode() */
127 182
183#define INO_FLAGS_XATTR_CHECKED 0x01 /* has no duplicate xattr_ref */
184
185#define RAWNODE_CLASS_INODE_CACHE 0
186#define RAWNODE_CLASS_XATTR_DATUM 1
187#define RAWNODE_CLASS_XATTR_REF 2
188
128#define INOCACHE_HASHSIZE 128 189#define INOCACHE_HASHSIZE 128
129 190
191#define write_ofs(c) ((c)->nextblock->offset + (c)->sector_size - (c)->nextblock->free_size)
192
130/* 193/*
131 Larger representation of a raw node, kept in-core only when the 194 Larger representation of a raw node, kept in-core only when the
132 struct inode for this particular ino is instantiated. 195 struct inode for this particular ino is instantiated.
@@ -192,6 +255,7 @@ struct jffs2_eraseblock
192 uint32_t wasted_size; 255 uint32_t wasted_size;
193 uint32_t free_size; /* Note that sector_size - free_size 256 uint32_t free_size; /* Note that sector_size - free_size
194 is the address of the first free space */ 257 is the address of the first free space */
258 uint32_t allocated_refs;
195 struct jffs2_raw_node_ref *first_node; 259 struct jffs2_raw_node_ref *first_node;
196 struct jffs2_raw_node_ref *last_node; 260 struct jffs2_raw_node_ref *last_node;
197 261
@@ -203,57 +267,7 @@ static inline int jffs2_blocks_use_vmalloc(struct jffs2_sb_info *c)
203 return ((c->flash_size / c->sector_size) * sizeof (struct jffs2_eraseblock)) > (128 * 1024); 267 return ((c->flash_size / c->sector_size) * sizeof (struct jffs2_eraseblock)) > (128 * 1024);
204} 268}
205 269
206/* Calculate totlen from surrounding nodes or eraseblock */ 270#define ref_totlen(a, b, c) __jffs2_ref_totlen((a), (b), (c))
207static inline uint32_t __ref_totlen(struct jffs2_sb_info *c,
208 struct jffs2_eraseblock *jeb,
209 struct jffs2_raw_node_ref *ref)
210{
211 uint32_t ref_end;
212
213 if (ref->next_phys)
214 ref_end = ref_offset(ref->next_phys);
215 else {
216 if (!jeb)
217 jeb = &c->blocks[ref->flash_offset / c->sector_size];
218
219 /* Last node in block. Use free_space */
220 BUG_ON(ref != jeb->last_node);
221 ref_end = jeb->offset + c->sector_size - jeb->free_size;
222 }
223 return ref_end - ref_offset(ref);
224}
225
226static inline uint32_t ref_totlen(struct jffs2_sb_info *c,
227 struct jffs2_eraseblock *jeb,
228 struct jffs2_raw_node_ref *ref)
229{
230 uint32_t ret;
231
232#if CONFIG_JFFS2_FS_DEBUG > 0
233 if (jeb && jeb != &c->blocks[ref->flash_offset / c->sector_size]) {
234 printk(KERN_CRIT "ref_totlen called with wrong block -- at 0x%08x instead of 0x%08x; ref 0x%08x\n",
235 jeb->offset, c->blocks[ref->flash_offset / c->sector_size].offset, ref_offset(ref));
236 BUG();
237 }
238#endif
239
240#if 1
241 ret = ref->__totlen;
242#else
243 /* This doesn't actually work yet */
244 ret = __ref_totlen(c, jeb, ref);
245 if (ret != ref->__totlen) {
246 printk(KERN_CRIT "Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n",
247 ref, ref_offset(ref), ref_offset(ref)+ref->__totlen,
248 ret, ref->__totlen);
249 if (!jeb)
250 jeb = &c->blocks[ref->flash_offset / c->sector_size];
251 jffs2_dbg_dump_node_refs_nolock(c, jeb);
252 BUG();
253 }
254#endif
255 return ret;
256}
257 271
258#define ALLOC_NORMAL 0 /* Normal allocation */ 272#define ALLOC_NORMAL 0 /* Normal allocation */
259#define ALLOC_DELETION 1 /* Deletion node. Best to allow it */ 273#define ALLOC_DELETION 1 /* Deletion node. Best to allow it */
@@ -268,13 +282,15 @@ static inline uint32_t ref_totlen(struct jffs2_sb_info *c,
268 282
269#define PAD(x) (((x)+3)&~3) 283#define PAD(x) (((x)+3)&~3)
270 284
271static inline struct jffs2_inode_cache *jffs2_raw_ref_to_ic(struct jffs2_raw_node_ref *raw) 285static inline int jffs2_encode_dev(union jffs2_device_node *jdev, dev_t rdev)
272{ 286{
273 while(raw->next_in_ino) { 287 if (old_valid_dev(rdev)) {
274 raw = raw->next_in_ino; 288 jdev->old = cpu_to_je16(old_encode_dev(rdev));
289 return sizeof(jdev->old);
290 } else {
291 jdev->new = cpu_to_je32(new_encode_dev(rdev));
292 return sizeof(jdev->new);
275 } 293 }
276
277 return ((struct jffs2_inode_cache *)raw);
278} 294}
279 295
280static inline struct jffs2_node_frag *frag_first(struct rb_root *root) 296static inline struct jffs2_node_frag *frag_first(struct rb_root *root)
@@ -324,28 +340,44 @@ void jffs2_obsolete_node_frag(struct jffs2_sb_info *c, struct jffs2_node_frag *t
324int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn); 340int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_full_dnode *fn);
325void jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size); 341void jffs2_truncate_fragtree (struct jffs2_sb_info *c, struct rb_root *list, uint32_t size);
326int jffs2_add_older_frag_to_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_tmp_dnode_info *tn); 342int jffs2_add_older_frag_to_fragtree(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_tmp_dnode_info *tn);
343struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c,
344 struct jffs2_eraseblock *jeb,
345 uint32_t ofs, uint32_t len,
346 struct jffs2_inode_cache *ic);
347extern uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c,
348 struct jffs2_eraseblock *jeb,
349 struct jffs2_raw_node_ref *ref);
327 350
328/* nodemgmt.c */ 351/* nodemgmt.c */
329int jffs2_thread_should_wake(struct jffs2_sb_info *c); 352int jffs2_thread_should_wake(struct jffs2_sb_info *c);
330int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, 353int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
331 uint32_t *len, int prio, uint32_t sumsize); 354 uint32_t *len, int prio, uint32_t sumsize);
332int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, 355int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
333 uint32_t *len, uint32_t sumsize); 356 uint32_t *len, uint32_t sumsize);
334int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new); 357struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
358 uint32_t ofs, uint32_t len,
359 struct jffs2_inode_cache *ic);
335void jffs2_complete_reservation(struct jffs2_sb_info *c); 360void jffs2_complete_reservation(struct jffs2_sb_info *c);
336void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *raw); 361void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *raw);
337 362
338/* write.c */ 363/* write.c */
339int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri); 364int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint32_t mode, struct jffs2_raw_inode *ri);
340 365
341struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode); 366struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
342struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode); 367 struct jffs2_raw_inode *ri, const unsigned char *data,
368 uint32_t datalen, int alloc_mode);
369struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
370 struct jffs2_raw_dirent *rd, const unsigned char *name,
371 uint32_t namelen, int alloc_mode);
343int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, 372int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
344 struct jffs2_raw_inode *ri, unsigned char *buf, 373 struct jffs2_raw_inode *ri, unsigned char *buf,
345 uint32_t offset, uint32_t writelen, uint32_t *retlen); 374 uint32_t offset, uint32_t writelen, uint32_t *retlen);
346int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const char *name, int namelen); 375int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, struct jffs2_inode_info *f,
347int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name, int namelen, struct jffs2_inode_info *dead_f, uint32_t time); 376 struct jffs2_raw_inode *ri, const char *name, int namelen);
348int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino, uint8_t type, const char *name, int namelen, uint32_t time); 377int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, const char *name,
378 int namelen, struct jffs2_inode_info *dead_f, uint32_t time);
379int jffs2_do_link(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint32_t ino,
380 uint8_t type, const char *name, int namelen, uint32_t time);
349 381
350 382
351/* readinode.c */ 383/* readinode.c */
@@ -368,12 +400,19 @@ struct jffs2_raw_inode *jffs2_alloc_raw_inode(void);
368void jffs2_free_raw_inode(struct jffs2_raw_inode *); 400void jffs2_free_raw_inode(struct jffs2_raw_inode *);
369struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void); 401struct jffs2_tmp_dnode_info *jffs2_alloc_tmp_dnode_info(void);
370void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *); 402void jffs2_free_tmp_dnode_info(struct jffs2_tmp_dnode_info *);
371struct jffs2_raw_node_ref *jffs2_alloc_raw_node_ref(void); 403int jffs2_prealloc_raw_node_refs(struct jffs2_sb_info *c,
372void jffs2_free_raw_node_ref(struct jffs2_raw_node_ref *); 404 struct jffs2_eraseblock *jeb, int nr);
405void jffs2_free_refblock(struct jffs2_raw_node_ref *);
373struct jffs2_node_frag *jffs2_alloc_node_frag(void); 406struct jffs2_node_frag *jffs2_alloc_node_frag(void);
374void jffs2_free_node_frag(struct jffs2_node_frag *); 407void jffs2_free_node_frag(struct jffs2_node_frag *);
375struct jffs2_inode_cache *jffs2_alloc_inode_cache(void); 408struct jffs2_inode_cache *jffs2_alloc_inode_cache(void);
376void jffs2_free_inode_cache(struct jffs2_inode_cache *); 409void jffs2_free_inode_cache(struct jffs2_inode_cache *);
410#ifdef CONFIG_JFFS2_FS_XATTR
411struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void);
412void jffs2_free_xattr_datum(struct jffs2_xattr_datum *);
413struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void);
414void jffs2_free_xattr_ref(struct jffs2_xattr_ref *);
415#endif
377 416
378/* gc.c */ 417/* gc.c */
379int jffs2_garbage_collect_pass(struct jffs2_sb_info *c); 418int jffs2_garbage_collect_pass(struct jffs2_sb_info *c);
@@ -393,12 +432,14 @@ int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf,
393 uint32_t ofs, uint32_t len); 432 uint32_t ofs, uint32_t len);
394struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino); 433struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino);
395int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); 434int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
435int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t size);
396 436
397/* build.c */ 437/* build.c */
398int jffs2_do_mount_fs(struct jffs2_sb_info *c); 438int jffs2_do_mount_fs(struct jffs2_sb_info *c);
399 439
400/* erase.c */ 440/* erase.c */
401void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count); 441void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count);
442void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
402 443
403#ifdef CONFIG_JFFS2_FS_WRITEBUFFER 444#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
404/* wbuf.c */ 445/* wbuf.c */
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index 49127a1f0458..8bedfd2ff689 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -23,13 +23,12 @@
23 * jffs2_reserve_space - request physical space to write nodes to flash 23 * jffs2_reserve_space - request physical space to write nodes to flash
24 * @c: superblock info 24 * @c: superblock info
25 * @minsize: Minimum acceptable size of allocation 25 * @minsize: Minimum acceptable size of allocation
26 * @ofs: Returned value of node offset
27 * @len: Returned value of allocation length 26 * @len: Returned value of allocation length
28 * @prio: Allocation type - ALLOC_{NORMAL,DELETION} 27 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
29 * 28 *
30 * Requests a block of physical space on the flash. Returns zero for success 29 * Requests a block of physical space on the flash. Returns zero for success
31 * and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC 30 * and puts 'len' into the appropriate place, or returns -ENOSPC or other
32 * or other error if appropriate. 31 * error if appropriate. Doesn't return len since that's
33 * 32 *
34 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem 33 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
35 * allocation semaphore, to prevent more than one allocation from being 34 * allocation semaphore, to prevent more than one allocation from being
@@ -40,9 +39,9 @@
40 */ 39 */
41 40
42static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, 41static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
43 uint32_t *ofs, uint32_t *len, uint32_t sumsize); 42 uint32_t *len, uint32_t sumsize);
44 43
45int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, 44int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
46 uint32_t *len, int prio, uint32_t sumsize) 45 uint32_t *len, int prio, uint32_t sumsize)
47{ 46{
48 int ret = -EAGAIN; 47 int ret = -EAGAIN;
@@ -132,19 +131,21 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs
132 spin_lock(&c->erase_completion_lock); 131 spin_lock(&c->erase_completion_lock);
133 } 132 }
134 133
135 ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize); 134 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
136 if (ret) { 135 if (ret) {
137 D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret)); 136 D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
138 } 137 }
139 } 138 }
140 spin_unlock(&c->erase_completion_lock); 139 spin_unlock(&c->erase_completion_lock);
140 if (!ret)
141 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
141 if (ret) 142 if (ret)
142 up(&c->alloc_sem); 143 up(&c->alloc_sem);
143 return ret; 144 return ret;
144} 145}
145 146
146int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, 147int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
147 uint32_t *len, uint32_t sumsize) 148 uint32_t *len, uint32_t sumsize)
148{ 149{
149 int ret = -EAGAIN; 150 int ret = -EAGAIN;
150 minsize = PAD(minsize); 151 minsize = PAD(minsize);
@@ -153,12 +154,15 @@ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *
153 154
154 spin_lock(&c->erase_completion_lock); 155 spin_lock(&c->erase_completion_lock);
155 while(ret == -EAGAIN) { 156 while(ret == -EAGAIN) {
156 ret = jffs2_do_reserve_space(c, minsize, ofs, len, sumsize); 157 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
157 if (ret) { 158 if (ret) {
158 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret)); 159 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
159 } 160 }
160 } 161 }
161 spin_unlock(&c->erase_completion_lock); 162 spin_unlock(&c->erase_completion_lock);
163 if (!ret)
164 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
165
162 return ret; 166 return ret;
163} 167}
164 168
@@ -259,10 +263,11 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c)
259} 263}
260 264
261/* Called with alloc sem _and_ erase_completion_lock */ 265/* Called with alloc sem _and_ erase_completion_lock */
262static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, uint32_t sumsize) 266static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
267 uint32_t *len, uint32_t sumsize)
263{ 268{
264 struct jffs2_eraseblock *jeb = c->nextblock; 269 struct jffs2_eraseblock *jeb = c->nextblock;
265 uint32_t reserved_size; /* for summary information at the end of the jeb */ 270 uint32_t reserved_size; /* for summary information at the end of the jeb */
266 int ret; 271 int ret;
267 272
268 restart: 273 restart:
@@ -312,6 +317,8 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uin
312 } 317 }
313 } else { 318 } else {
314 if (jeb && minsize > jeb->free_size) { 319 if (jeb && minsize > jeb->free_size) {
320 uint32_t waste;
321
315 /* Skip the end of this block and file it as having some dirty space */ 322 /* Skip the end of this block and file it as having some dirty space */
316 /* If there's a pending write to it, flush now */ 323 /* If there's a pending write to it, flush now */
317 324
@@ -324,10 +331,26 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uin
324 goto restart; 331 goto restart;
325 } 332 }
326 333
327 c->wasted_size += jeb->free_size; 334 spin_unlock(&c->erase_completion_lock);
328 c->free_size -= jeb->free_size; 335
329 jeb->wasted_size += jeb->free_size; 336 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
330 jeb->free_size = 0; 337 if (ret)
338 return ret;
339 /* Just lock it again and continue. Nothing much can change because
340 we hold c->alloc_sem anyway. In fact, it's not entirely clear why
341 we hold c->erase_completion_lock in the majority of this function...
342 but that's a question for another (more caffeine-rich) day. */
343 spin_lock(&c->erase_completion_lock);
344
345 waste = jeb->free_size;
346 jffs2_link_node_ref(c, jeb,
347 (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
348 waste, NULL);
349 /* FIXME: that made it count as dirty. Convert to wasted */
350 jeb->dirty_size -= waste;
351 c->dirty_size -= waste;
352 jeb->wasted_size += waste;
353 c->wasted_size += waste;
331 354
332 jffs2_close_nextblock(c, jeb); 355 jffs2_close_nextblock(c, jeb);
333 jeb = NULL; 356 jeb = NULL;
@@ -349,7 +372,6 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uin
349 } 372 }
350 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has 373 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
351 enough space */ 374 enough space */
352 *ofs = jeb->offset + (c->sector_size - jeb->free_size);
353 *len = jeb->free_size - reserved_size; 375 *len = jeb->free_size - reserved_size;
354 376
355 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size && 377 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
@@ -365,7 +387,8 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uin
365 spin_lock(&c->erase_completion_lock); 387 spin_lock(&c->erase_completion_lock);
366 } 388 }
367 389
368 D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs)); 390 D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n",
391 *len, jeb->offset + (c->sector_size - jeb->free_size)));
369 return 0; 392 return 0;
370} 393}
371 394
@@ -374,7 +397,6 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uin
374 * @c: superblock info 397 * @c: superblock info
375 * @new: new node reference to add 398 * @new: new node reference to add
376 * @len: length of this physical node 399 * @len: length of this physical node
377 * @dirty: dirty flag for new node
378 * 400 *
379 * Should only be used to report nodes for which space has been allocated 401 * Should only be used to report nodes for which space has been allocated
380 * by jffs2_reserve_space. 402 * by jffs2_reserve_space.
@@ -382,42 +404,30 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uin
382 * Must be called with the alloc_sem held. 404 * Must be called with the alloc_sem held.
383 */ 405 */
384 406
385int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new) 407struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
408 uint32_t ofs, uint32_t len,
409 struct jffs2_inode_cache *ic)
386{ 410{
387 struct jffs2_eraseblock *jeb; 411 struct jffs2_eraseblock *jeb;
388 uint32_t len; 412 struct jffs2_raw_node_ref *new;
389 413
390 jeb = &c->blocks[new->flash_offset / c->sector_size]; 414 jeb = &c->blocks[ofs / c->sector_size];
391 len = ref_totlen(c, jeb, new);
392 415
393 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len)); 416 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n",
417 ofs & ~3, ofs & 3, len));
394#if 1 418#if 1
395 /* we could get some obsolete nodes after nextblock was refiled 419 /* Allow non-obsolete nodes only to be added at the end of c->nextblock,
396 in wbuf.c */ 420 if c->nextblock is set. Note that wbuf.c will file obsolete nodes
397 if ((c->nextblock || !ref_obsolete(new)) 421 even after refiling c->nextblock */
398 &&(jeb != c->nextblock || ref_offset(new) != jeb->offset + (c->sector_size - jeb->free_size))) { 422 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
423 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
399 printk(KERN_WARNING "argh. node added in wrong place\n"); 424 printk(KERN_WARNING "argh. node added in wrong place\n");
400 jffs2_free_raw_node_ref(new); 425 return ERR_PTR(-EINVAL);
401 return -EINVAL;
402 } 426 }
403#endif 427#endif
404 spin_lock(&c->erase_completion_lock); 428 spin_lock(&c->erase_completion_lock);
405 429
406 if (!jeb->first_node) 430 new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
407 jeb->first_node = new;
408 if (jeb->last_node)
409 jeb->last_node->next_phys = new;
410 jeb->last_node = new;
411
412 jeb->free_size -= len;
413 c->free_size -= len;
414 if (ref_obsolete(new)) {
415 jeb->dirty_size += len;
416 c->dirty_size += len;
417 } else {
418 jeb->used_size += len;
419 c->used_size += len;
420 }
421 431
422 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) { 432 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
423 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */ 433 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
@@ -438,7 +448,7 @@ int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_r
438 448
439 spin_unlock(&c->erase_completion_lock); 449 spin_unlock(&c->erase_completion_lock);
440 450
441 return 0; 451 return new;
442} 452}
443 453
444 454
@@ -470,8 +480,9 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
470 struct jffs2_unknown_node n; 480 struct jffs2_unknown_node n;
471 int ret, addedsize; 481 int ret, addedsize;
472 size_t retlen; 482 size_t retlen;
483 uint32_t freed_len;
473 484
474 if(!ref) { 485 if(unlikely(!ref)) {
475 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n"); 486 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
476 return; 487 return;
477 } 488 }
@@ -499,32 +510,34 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
499 510
500 spin_lock(&c->erase_completion_lock); 511 spin_lock(&c->erase_completion_lock);
501 512
513 freed_len = ref_totlen(c, jeb, ref);
514
502 if (ref_flags(ref) == REF_UNCHECKED) { 515 if (ref_flags(ref) == REF_UNCHECKED) {
503 D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) { 516 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
504 printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n", 517 printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
505 ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size); 518 freed_len, blocknr, ref->flash_offset, jeb->used_size);
506 BUG(); 519 BUG();
507 }) 520 })
508 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref))); 521 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
509 jeb->unchecked_size -= ref_totlen(c, jeb, ref); 522 jeb->unchecked_size -= freed_len;
510 c->unchecked_size -= ref_totlen(c, jeb, ref); 523 c->unchecked_size -= freed_len;
511 } else { 524 } else {
512 D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) { 525 D1(if (unlikely(jeb->used_size < freed_len)) {
513 printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n", 526 printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
514 ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size); 527 freed_len, blocknr, ref->flash_offset, jeb->used_size);
515 BUG(); 528 BUG();
516 }) 529 })
517 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), ref_totlen(c, jeb, ref))); 530 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
518 jeb->used_size -= ref_totlen(c, jeb, ref); 531 jeb->used_size -= freed_len;
519 c->used_size -= ref_totlen(c, jeb, ref); 532 c->used_size -= freed_len;
520 } 533 }
521 534
522 // Take care, that wasted size is taken into concern 535 // Take care, that wasted size is taken into concern
523 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) { 536 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
524 D1(printk(KERN_DEBUG "Dirtying\n")); 537 D1(printk("Dirtying\n"));
525 addedsize = ref_totlen(c, jeb, ref); 538 addedsize = freed_len;
526 jeb->dirty_size += ref_totlen(c, jeb, ref); 539 jeb->dirty_size += freed_len;
527 c->dirty_size += ref_totlen(c, jeb, ref); 540 c->dirty_size += freed_len;
528 541
529 /* Convert wasted space to dirty, if not a bad block */ 542 /* Convert wasted space to dirty, if not a bad block */
530 if (jeb->wasted_size) { 543 if (jeb->wasted_size) {
@@ -543,10 +556,10 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
543 } 556 }
544 } 557 }
545 } else { 558 } else {
546 D1(printk(KERN_DEBUG "Wasting\n")); 559 D1(printk("Wasting\n"));
547 addedsize = 0; 560 addedsize = 0;
548 jeb->wasted_size += ref_totlen(c, jeb, ref); 561 jeb->wasted_size += freed_len;
549 c->wasted_size += ref_totlen(c, jeb, ref); 562 c->wasted_size += freed_len;
550 } 563 }
551 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE; 564 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
552 565
@@ -622,7 +635,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
622 /* The erase_free_sem is locked, and has been since before we marked the node obsolete 635 /* The erase_free_sem is locked, and has been since before we marked the node obsolete
623 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that 636 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
624 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet 637 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
625 by jffs2_free_all_node_refs() in erase.c. Which is nice. */ 638 by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
626 639
627 D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref))); 640 D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
628 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); 641 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
@@ -634,8 +647,8 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
634 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen); 647 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
635 goto out_erase_sem; 648 goto out_erase_sem;
636 } 649 }
637 if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) { 650 if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
638 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref)); 651 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
639 goto out_erase_sem; 652 goto out_erase_sem;
640 } 653 }
641 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) { 654 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
@@ -671,6 +684,10 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
671 spin_lock(&c->erase_completion_lock); 684 spin_lock(&c->erase_completion_lock);
672 685
673 ic = jffs2_raw_ref_to_ic(ref); 686 ic = jffs2_raw_ref_to_ic(ref);
687 /* It seems we should never call jffs2_mark_node_obsolete() for
688 XATTR nodes.... yet. Make sure we notice if/when we change
689 that :) */
690 BUG_ON(ic->class != RAWNODE_CLASS_INODE_CACHE);
674 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino)) 691 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
675 ; 692 ;
676 693
@@ -683,51 +700,6 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
683 spin_unlock(&c->erase_completion_lock); 700 spin_unlock(&c->erase_completion_lock);
684 } 701 }
685 702
686
687 /* Merge with the next node in the physical list, if there is one
688 and if it's also obsolete and if it doesn't belong to any inode */
689 if (ref->next_phys && ref_obsolete(ref->next_phys) &&
690 !ref->next_phys->next_in_ino) {
691 struct jffs2_raw_node_ref *n = ref->next_phys;
692
693 spin_lock(&c->erase_completion_lock);
694
695 ref->__totlen += n->__totlen;
696 ref->next_phys = n->next_phys;
697 if (jeb->last_node == n) jeb->last_node = ref;
698 if (jeb->gc_node == n) {
699 /* gc will be happy continuing gc on this node */
700 jeb->gc_node=ref;
701 }
702 spin_unlock(&c->erase_completion_lock);
703
704 jffs2_free_raw_node_ref(n);
705 }
706
707 /* Also merge with the previous node in the list, if there is one
708 and that one is obsolete */
709 if (ref != jeb->first_node ) {
710 struct jffs2_raw_node_ref *p = jeb->first_node;
711
712 spin_lock(&c->erase_completion_lock);
713
714 while (p->next_phys != ref)
715 p = p->next_phys;
716
717 if (ref_obsolete(p) && !ref->next_in_ino) {
718 p->__totlen += ref->__totlen;
719 if (jeb->last_node == ref) {
720 jeb->last_node = p;
721 }
722 if (jeb->gc_node == ref) {
723 /* gc will be happy continuing gc on this node */
724 jeb->gc_node=p;
725 }
726 p->next_phys = ref->next_phys;
727 jffs2_free_raw_node_ref(ref);
728 }
729 spin_unlock(&c->erase_completion_lock);
730 }
731 out_erase_sem: 703 out_erase_sem:
732 up(&c->erase_free_sem); 704 up(&c->erase_free_sem);
733} 705}
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index d307cf548625..cd4021bcb944 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -31,9 +31,7 @@ struct kvec;
31#define JFFS2_F_I_MODE(f) (OFNI_EDONI_2SFFJ(f)->i_mode) 31#define JFFS2_F_I_MODE(f) (OFNI_EDONI_2SFFJ(f)->i_mode)
32#define JFFS2_F_I_UID(f) (OFNI_EDONI_2SFFJ(f)->i_uid) 32#define JFFS2_F_I_UID(f) (OFNI_EDONI_2SFFJ(f)->i_uid)
33#define JFFS2_F_I_GID(f) (OFNI_EDONI_2SFFJ(f)->i_gid) 33#define JFFS2_F_I_GID(f) (OFNI_EDONI_2SFFJ(f)->i_gid)
34 34#define JFFS2_F_I_RDEV(f) (OFNI_EDONI_2SFFJ(f)->i_rdev)
35#define JFFS2_F_I_RDEV_MIN(f) (iminor(OFNI_EDONI_2SFFJ(f)))
36#define JFFS2_F_I_RDEV_MAJ(f) (imajor(OFNI_EDONI_2SFFJ(f)))
37 35
38#define ITIME(sec) ((struct timespec){sec, 0}) 36#define ITIME(sec) ((struct timespec){sec, 0})
39#define I_SEC(tv) ((tv).tv_sec) 37#define I_SEC(tv) ((tv).tv_sec)
@@ -60,6 +58,10 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
60 f->target = NULL; 58 f->target = NULL;
61 f->flags = 0; 59 f->flags = 0;
62 f->usercompr = 0; 60 f->usercompr = 0;
61#ifdef CONFIG_JFFS2_FS_POSIX_ACL
62 f->i_acl_access = JFFS2_ACL_NOT_CACHED;
63 f->i_acl_default = JFFS2_ACL_NOT_CACHED;
64#endif
63} 65}
64 66
65 67
@@ -90,13 +92,10 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
90#define jffs2_flash_writev(a,b,c,d,e,f) jffs2_flash_direct_writev(a,b,c,d,e) 92#define jffs2_flash_writev(a,b,c,d,e,f) jffs2_flash_direct_writev(a,b,c,d,e)
91#define jffs2_wbuf_timeout NULL 93#define jffs2_wbuf_timeout NULL
92#define jffs2_wbuf_process NULL 94#define jffs2_wbuf_process NULL
93#define jffs2_nor_ecc(c) (0)
94#define jffs2_dataflash(c) (0) 95#define jffs2_dataflash(c) (0)
95#define jffs2_nor_wbuf_flash(c) (0)
96#define jffs2_nor_ecc_flash_setup(c) (0)
97#define jffs2_nor_ecc_flash_cleanup(c) do {} while (0)
98#define jffs2_dataflash_setup(c) (0) 96#define jffs2_dataflash_setup(c) (0)
99#define jffs2_dataflash_cleanup(c) do {} while (0) 97#define jffs2_dataflash_cleanup(c) do {} while (0)
98#define jffs2_nor_wbuf_flash(c) (0)
100#define jffs2_nor_wbuf_flash_setup(c) (0) 99#define jffs2_nor_wbuf_flash_setup(c) (0)
101#define jffs2_nor_wbuf_flash_cleanup(c) do {} while (0) 100#define jffs2_nor_wbuf_flash_cleanup(c) do {} while (0)
102 101
@@ -107,9 +106,7 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
107#ifdef CONFIG_JFFS2_SUMMARY 106#ifdef CONFIG_JFFS2_SUMMARY
108#define jffs2_can_mark_obsolete(c) (0) 107#define jffs2_can_mark_obsolete(c) (0)
109#else 108#else
110#define jffs2_can_mark_obsolete(c) \ 109#define jffs2_can_mark_obsolete(c) (c->mtd->flags & (MTD_BIT_WRITEABLE))
111 ((c->mtd->type == MTD_NORFLASH && !(c->mtd->flags & (MTD_ECC|MTD_PROGRAM_REGIONS))) || \
112 c->mtd->type == MTD_RAM)
113#endif 110#endif
114 111
115#define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH) 112#define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH)
@@ -133,15 +130,11 @@ int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c);
133int jffs2_nand_flash_setup(struct jffs2_sb_info *c); 130int jffs2_nand_flash_setup(struct jffs2_sb_info *c);
134void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c); 131void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c);
135 132
136#define jffs2_nor_ecc(c) (c->mtd->type == MTD_NORFLASH && (c->mtd->flags & MTD_ECC))
137int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c);
138void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c);
139
140#define jffs2_dataflash(c) (c->mtd->type == MTD_DATAFLASH) 133#define jffs2_dataflash(c) (c->mtd->type == MTD_DATAFLASH)
141int jffs2_dataflash_setup(struct jffs2_sb_info *c); 134int jffs2_dataflash_setup(struct jffs2_sb_info *c);
142void jffs2_dataflash_cleanup(struct jffs2_sb_info *c); 135void jffs2_dataflash_cleanup(struct jffs2_sb_info *c);
143 136
144#define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && (c->mtd->flags & MTD_PROGRAM_REGIONS)) 137#define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && ! (c->mtd->flags & MTD_BIT_WRITEABLE))
145int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c); 138int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c);
146void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c); 139void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c);
147 140
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index f1695642d0f7..5ea4faafa2d3 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -116,19 +116,42 @@ static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_r
116 uint32_t *latest_mctime, uint32_t *mctime_ver) 116 uint32_t *latest_mctime, uint32_t *mctime_ver)
117{ 117{
118 struct jffs2_full_dirent *fd; 118 struct jffs2_full_dirent *fd;
119 uint32_t crc;
119 120
120 /* The direntry nodes are checked during the flash scanning */
121 BUG_ON(ref_flags(ref) == REF_UNCHECKED);
122 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ 121 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
123 BUG_ON(ref_obsolete(ref)); 122 BUG_ON(ref_obsolete(ref));
124 123
125 /* Sanity check */ 124 crc = crc32(0, rd, sizeof(*rd) - 8);
126 if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) { 125 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
127 JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n", 126 JFFS2_NOTICE("header CRC failed on dirent node at %#08x: read %#08x, calculated %#08x\n",
128 ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen)); 127 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
129 return 1; 128 return 1;
130 } 129 }
131 130
131 /* If we've never checked the CRCs on this node, check them now */
132 if (ref_flags(ref) == REF_UNCHECKED) {
133 struct jffs2_eraseblock *jeb;
134 int len;
135
136 /* Sanity check */
137 if (unlikely(PAD((rd->nsize + sizeof(*rd))) != PAD(je32_to_cpu(rd->totlen)))) {
138 JFFS2_ERROR("illegal nsize in node at %#08x: nsize %#02x, totlen %#04x\n",
139 ref_offset(ref), rd->nsize, je32_to_cpu(rd->totlen));
140 return 1;
141 }
142
143 jeb = &c->blocks[ref->flash_offset / c->sector_size];
144 len = ref_totlen(c, jeb, ref);
145
146 spin_lock(&c->erase_completion_lock);
147 jeb->used_size += len;
148 jeb->unchecked_size -= len;
149 c->used_size += len;
150 c->unchecked_size -= len;
151 ref->flash_offset = ref_offset(ref) | REF_PRISTINE;
152 spin_unlock(&c->erase_completion_lock);
153 }
154
132 fd = jffs2_alloc_full_dirent(rd->nsize + 1); 155 fd = jffs2_alloc_full_dirent(rd->nsize + 1);
133 if (unlikely(!fd)) 156 if (unlikely(!fd))
134 return -ENOMEM; 157 return -ENOMEM;
@@ -198,13 +221,21 @@ static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
198 struct jffs2_tmp_dnode_info *tn; 221 struct jffs2_tmp_dnode_info *tn;
199 uint32_t len, csize; 222 uint32_t len, csize;
200 int ret = 1; 223 int ret = 1;
224 uint32_t crc;
201 225
202 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */ 226 /* Obsoleted. This cannot happen, surely? dwmw2 20020308 */
203 BUG_ON(ref_obsolete(ref)); 227 BUG_ON(ref_obsolete(ref));
204 228
229 crc = crc32(0, rd, sizeof(*rd) - 8);
230 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
231 JFFS2_NOTICE("node CRC failed on dnode at %#08x: read %#08x, calculated %#08x\n",
232 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
233 return 1;
234 }
235
205 tn = jffs2_alloc_tmp_dnode_info(); 236 tn = jffs2_alloc_tmp_dnode_info();
206 if (!tn) { 237 if (!tn) {
207 JFFS2_ERROR("failed to allocate tn (%d bytes).\n", sizeof(*tn)); 238 JFFS2_ERROR("failed to allocate tn (%zu bytes).\n", sizeof(*tn));
208 return -ENOMEM; 239 return -ENOMEM;
209 } 240 }
210 241
@@ -213,14 +244,6 @@ static inline int read_dnode(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
213 244
214 /* If we've never checked the CRCs on this node, check them now */ 245 /* If we've never checked the CRCs on this node, check them now */
215 if (ref_flags(ref) == REF_UNCHECKED) { 246 if (ref_flags(ref) == REF_UNCHECKED) {
216 uint32_t crc;
217
218 crc = crc32(0, rd, sizeof(*rd) - 8);
219 if (unlikely(crc != je32_to_cpu(rd->node_crc))) {
220 JFFS2_NOTICE("header CRC failed on node at %#08x: read %#08x, calculated %#08x\n",
221 ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
222 goto free_out;
223 }
224 247
225 /* Sanity checks */ 248 /* Sanity checks */
226 if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) || 249 if (unlikely(je32_to_cpu(rd->offset) > je32_to_cpu(rd->isize)) ||
@@ -343,7 +366,7 @@ free_out:
343 * Helper function for jffs2_get_inode_nodes(). 366 * Helper function for jffs2_get_inode_nodes().
344 * It is called every time an unknown node is found. 367 * It is called every time an unknown node is found.
345 * 368 *
346 * Returns: 0 on succes; 369 * Returns: 0 on success;
347 * 1 if the node should be marked obsolete; 370 * 1 if the node should be marked obsolete;
348 * negative error code on failure. 371 * negative error code on failure.
349 */ 372 */
@@ -354,37 +377,30 @@ static inline int read_unknown(struct jffs2_sb_info *c, struct jffs2_raw_node_re
354 377
355 un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype)); 378 un->nodetype = cpu_to_je16(JFFS2_NODE_ACCURATE | je16_to_cpu(un->nodetype));
356 379
357 if (crc32(0, un, sizeof(struct jffs2_unknown_node) - 4) != je32_to_cpu(un->hdr_crc)) { 380 switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
358 /* Hmmm. This should have been caught at scan time. */
359 JFFS2_NOTICE("node header CRC failed at %#08x. But it must have been OK earlier.\n", ref_offset(ref));
360 jffs2_dbg_dump_node(c, ref_offset(ref));
361 return 1;
362 } else {
363 switch(je16_to_cpu(un->nodetype) & JFFS2_COMPAT_MASK) {
364 381
365 case JFFS2_FEATURE_INCOMPAT: 382 case JFFS2_FEATURE_INCOMPAT:
366 JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n", 383 JFFS2_ERROR("unknown INCOMPAT nodetype %#04X at %#08x\n",
367 je16_to_cpu(un->nodetype), ref_offset(ref)); 384 je16_to_cpu(un->nodetype), ref_offset(ref));
368 /* EEP */ 385 /* EEP */
369 BUG(); 386 BUG();
370 break; 387 break;
371 388
372 case JFFS2_FEATURE_ROCOMPAT: 389 case JFFS2_FEATURE_ROCOMPAT:
373 JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n", 390 JFFS2_ERROR("unknown ROCOMPAT nodetype %#04X at %#08x\n",
374 je16_to_cpu(un->nodetype), ref_offset(ref)); 391 je16_to_cpu(un->nodetype), ref_offset(ref));
375 BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO)); 392 BUG_ON(!(c->flags & JFFS2_SB_FLAG_RO));
376 break; 393 break;
377 394
378 case JFFS2_FEATURE_RWCOMPAT_COPY: 395 case JFFS2_FEATURE_RWCOMPAT_COPY:
379 JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n", 396 JFFS2_NOTICE("unknown RWCOMPAT_COPY nodetype %#04X at %#08x\n",
380 je16_to_cpu(un->nodetype), ref_offset(ref)); 397 je16_to_cpu(un->nodetype), ref_offset(ref));
381 break; 398 break;
382 399
383 case JFFS2_FEATURE_RWCOMPAT_DELETE: 400 case JFFS2_FEATURE_RWCOMPAT_DELETE:
384 JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n", 401 JFFS2_NOTICE("unknown RWCOMPAT_DELETE nodetype %#04X at %#08x\n",
385 je16_to_cpu(un->nodetype), ref_offset(ref)); 402 je16_to_cpu(un->nodetype), ref_offset(ref));
386 return 1; 403 return 1;
387 }
388 } 404 }
389 405
390 return 0; 406 return 0;
@@ -434,7 +450,7 @@ static int read_more(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref,
434 } 450 }
435 451
436 if (retlen < len) { 452 if (retlen < len) {
437 JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", 453 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n",
438 offs, retlen, len); 454 offs, retlen, len);
439 return -EIO; 455 return -EIO;
440 } 456 }
@@ -542,13 +558,25 @@ static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_inf
542 } 558 }
543 559
544 if (retlen < len) { 560 if (retlen < len) {
545 JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ref_offset(ref), retlen, len); 561 JFFS2_ERROR("short read at %#08x: %zu instead of %d.\n", ref_offset(ref), retlen, len);
546 err = -EIO; 562 err = -EIO;
547 goto free_out; 563 goto free_out;
548 } 564 }
549 565
550 node = (union jffs2_node_union *)bufstart; 566 node = (union jffs2_node_union *)bufstart;
551 567
568 /* No need to mask in the valid bit; it shouldn't be invalid */
569 if (je32_to_cpu(node->u.hdr_crc) != crc32(0, node, sizeof(node->u)-4)) {
570 JFFS2_NOTICE("Node header CRC failed at %#08x. {%04x,%04x,%08x,%08x}\n",
571 ref_offset(ref), je16_to_cpu(node->u.magic),
572 je16_to_cpu(node->u.nodetype),
573 je32_to_cpu(node->u.totlen),
574 je32_to_cpu(node->u.hdr_crc));
575 jffs2_dbg_dump_node(c, ref_offset(ref));
576 jffs2_mark_node_obsolete(c, ref);
577 goto cont;
578 }
579
552 switch (je16_to_cpu(node->u.nodetype)) { 580 switch (je16_to_cpu(node->u.nodetype)) {
553 581
554 case JFFS2_NODETYPE_DIRENT: 582 case JFFS2_NODETYPE_DIRENT:
@@ -606,6 +634,7 @@ static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_inf
606 goto free_out; 634 goto free_out;
607 635
608 } 636 }
637 cont:
609 spin_lock(&c->erase_completion_lock); 638 spin_lock(&c->erase_completion_lock);
610 } 639 }
611 640
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index cf55b221fc2b..61618080b86f 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -65,6 +65,28 @@ static inline uint32_t EMPTY_SCAN_SIZE(uint32_t sector_size) {
65 return DEFAULT_EMPTY_SCAN_SIZE; 65 return DEFAULT_EMPTY_SCAN_SIZE;
66} 66}
67 67
68static int file_dirty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
69{
70 int ret;
71
72 if ((ret = jffs2_prealloc_raw_node_refs(c, jeb, 1)))
73 return ret;
74 if ((ret = jffs2_scan_dirty_space(c, jeb, jeb->free_size)))
75 return ret;
76 /* Turned wasted size into dirty, since we apparently
77 think it's recoverable now. */
78 jeb->dirty_size += jeb->wasted_size;
79 c->dirty_size += jeb->wasted_size;
80 c->wasted_size -= jeb->wasted_size;
81 jeb->wasted_size = 0;
82 if (VERYDIRTY(c, jeb->dirty_size)) {
83 list_add(&jeb->list, &c->very_dirty_list);
84 } else {
85 list_add(&jeb->list, &c->dirty_list);
86 }
87 return 0;
88}
89
68int jffs2_scan_medium(struct jffs2_sb_info *c) 90int jffs2_scan_medium(struct jffs2_sb_info *c)
69{ 91{
70 int i, ret; 92 int i, ret;
@@ -170,34 +192,20 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
170 (!c->nextblock || c->nextblock->free_size < jeb->free_size)) { 192 (!c->nextblock || c->nextblock->free_size < jeb->free_size)) {
171 /* Better candidate for the next writes to go to */ 193 /* Better candidate for the next writes to go to */
172 if (c->nextblock) { 194 if (c->nextblock) {
173 c->nextblock->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size; 195 ret = file_dirty(c, c->nextblock);
174 c->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size; 196 if (ret)
175 c->free_size -= c->nextblock->free_size; 197 return ret;
176 c->wasted_size -= c->nextblock->wasted_size;
177 c->nextblock->free_size = c->nextblock->wasted_size = 0;
178 if (VERYDIRTY(c, c->nextblock->dirty_size)) {
179 list_add(&c->nextblock->list, &c->very_dirty_list);
180 } else {
181 list_add(&c->nextblock->list, &c->dirty_list);
182 }
183 /* deleting summary information of the old nextblock */ 198 /* deleting summary information of the old nextblock */
184 jffs2_sum_reset_collected(c->summary); 199 jffs2_sum_reset_collected(c->summary);
185 } 200 }
186 /* update collected summary infromation for the current nextblock */ 201 /* update collected summary information for the current nextblock */
187 jffs2_sum_move_collected(c, s); 202 jffs2_sum_move_collected(c, s);
188 D1(printk(KERN_DEBUG "jffs2_scan_medium(): new nextblock = 0x%08x\n", jeb->offset)); 203 D1(printk(KERN_DEBUG "jffs2_scan_medium(): new nextblock = 0x%08x\n", jeb->offset));
189 c->nextblock = jeb; 204 c->nextblock = jeb;
190 } else { 205 } else {
191 jeb->dirty_size += jeb->free_size + jeb->wasted_size; 206 ret = file_dirty(c, jeb);
192 c->dirty_size += jeb->free_size + jeb->wasted_size; 207 if (ret)
193 c->free_size -= jeb->free_size; 208 return ret;
194 c->wasted_size -= jeb->wasted_size;
195 jeb->free_size = jeb->wasted_size = 0;
196 if (VERYDIRTY(c, jeb->dirty_size)) {
197 list_add(&jeb->list, &c->very_dirty_list);
198 } else {
199 list_add(&jeb->list, &c->dirty_list);
200 }
201 } 209 }
202 break; 210 break;
203 211
@@ -222,9 +230,6 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
222 } 230 }
223 } 231 }
224 232
225 if (jffs2_sum_active() && s)
226 kfree(s);
227
228 /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */ 233 /* Nextblock dirty is always seen as wasted, because we cannot recycle it now */
229 if (c->nextblock && (c->nextblock->dirty_size)) { 234 if (c->nextblock && (c->nextblock->dirty_size)) {
230 c->nextblock->wasted_size += c->nextblock->dirty_size; 235 c->nextblock->wasted_size += c->nextblock->dirty_size;
@@ -242,11 +247,8 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
242 247
243 D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n", 248 D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n",
244 skip)); 249 skip));
245 c->nextblock->wasted_size += skip; 250 jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
246 c->wasted_size += skip; 251 jffs2_scan_dirty_space(c, c->nextblock, skip);
247
248 c->nextblock->free_size -= skip;
249 c->free_size -= skip;
250 } 252 }
251#endif 253#endif
252 if (c->nr_erasing_blocks) { 254 if (c->nr_erasing_blocks) {
@@ -266,6 +268,9 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
266 else 268 else
267 c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size); 269 c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
268#endif 270#endif
271 if (s)
272 kfree(s);
273
269 return ret; 274 return ret;
270} 275}
271 276
@@ -290,7 +295,7 @@ int jffs2_fill_scan_buf (struct jffs2_sb_info *c, void *buf,
290int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) 295int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
291{ 296{
292 if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size 297 if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size
293 && (!jeb->first_node || !jeb->first_node->next_phys) ) 298 && (!jeb->first_node || !ref_next(jeb->first_node)) )
294 return BLK_STATE_CLEANMARKER; 299 return BLK_STATE_CLEANMARKER;
295 300
296 /* move blocks with max 4 byte dirty space to cleanlist */ 301 /* move blocks with max 4 byte dirty space to cleanlist */
@@ -306,11 +311,119 @@ int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *je
306 return BLK_STATE_ALLDIRTY; 311 return BLK_STATE_ALLDIRTY;
307} 312}
308 313
314#ifdef CONFIG_JFFS2_FS_XATTR
315static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
316 struct jffs2_raw_xattr *rx, uint32_t ofs,
317 struct jffs2_summary *s)
318{
319 struct jffs2_xattr_datum *xd;
320 uint32_t totlen, crc;
321 int err;
322
323 crc = crc32(0, rx, sizeof(struct jffs2_raw_xattr) - 4);
324 if (crc != je32_to_cpu(rx->node_crc)) {
325 if (je32_to_cpu(rx->node_crc) != 0xffffffff)
326 JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
327 ofs, je32_to_cpu(rx->node_crc), crc);
328 if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
329 return err;
330 return 0;
331 }
332
333 totlen = PAD(sizeof(*rx) + rx->name_len + 1 + je16_to_cpu(rx->value_len));
334 if (totlen != je32_to_cpu(rx->totlen)) {
335 JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n",
336 ofs, je32_to_cpu(rx->totlen), totlen);
337 if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
338 return err;
339 return 0;
340 }
341
342 xd = jffs2_setup_xattr_datum(c, je32_to_cpu(rx->xid), je32_to_cpu(rx->version));
343 if (IS_ERR(xd)) {
344 if (PTR_ERR(xd) == -EEXIST) {
345 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rx->totlen)))))
346 return err;
347 return 0;
348 }
349 return PTR_ERR(xd);
350 }
351 xd->xprefix = rx->xprefix;
352 xd->name_len = rx->name_len;
353 xd->value_len = je16_to_cpu(rx->value_len);
354 xd->data_crc = je32_to_cpu(rx->data_crc);
355
356 xd->node = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL);
357 /* FIXME */ xd->node->next_in_ino = (void *)xd;
358
359 if (jffs2_sum_active())
360 jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset);
361 dbg_xattr("scaning xdatum at %#08x (xid=%u, version=%u)\n",
362 ofs, xd->xid, xd->version);
363 return 0;
364}
365
366static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
367 struct jffs2_raw_xref *rr, uint32_t ofs,
368 struct jffs2_summary *s)
369{
370 struct jffs2_xattr_ref *ref;
371 uint32_t crc;
372 int err;
373
374 crc = crc32(0, rr, sizeof(*rr) - 4);
375 if (crc != je32_to_cpu(rr->node_crc)) {
376 if (je32_to_cpu(rr->node_crc) != 0xffffffff)
377 JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
378 ofs, je32_to_cpu(rr->node_crc), crc);
379 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen)))))
380 return err;
381 return 0;
382 }
383
384 if (PAD(sizeof(struct jffs2_raw_xref)) != je32_to_cpu(rr->totlen)) {
385 JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%zd\n",
386 ofs, je32_to_cpu(rr->totlen),
387 PAD(sizeof(struct jffs2_raw_xref)));
388 if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rr->totlen))))
389 return err;
390 return 0;
391 }
392
393 ref = jffs2_alloc_xattr_ref();
394 if (!ref)
395 return -ENOMEM;
396
397 /* BEFORE jffs2_build_xattr_subsystem() called,
398 * ref->xid is used to store 32bit xid, xd is not used
399 * ref->ino is used to store 32bit inode-number, ic is not used
400 * Thoes variables are declared as union, thus using those
401 * are exclusive. In a similar way, ref->next is temporarily
402 * used to chain all xattr_ref object. It's re-chained to
403 * jffs2_inode_cache in jffs2_build_xattr_subsystem() correctly.
404 */
405 ref->ino = je32_to_cpu(rr->ino);
406 ref->xid = je32_to_cpu(rr->xid);
407 ref->next = c->xref_temp;
408 c->xref_temp = ref;
409
410 ref->node = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), NULL);
411 /* FIXME */ ref->node->next_in_ino = (void *)ref;
412
413 if (jffs2_sum_active())
414 jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset);
415 dbg_xattr("scan xref at %#08x (xid=%u, ino=%u)\n",
416 ofs, ref->xid, ref->ino);
417 return 0;
418}
419#endif
420
421/* Called with 'buf_size == 0' if buf is in fact a pointer _directly_ into
422 the flash, XIP-style */
309static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 423static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
310 unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) { 424 unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) {
311 struct jffs2_unknown_node *node; 425 struct jffs2_unknown_node *node;
312 struct jffs2_unknown_node crcnode; 426 struct jffs2_unknown_node crcnode;
313 struct jffs2_sum_marker *sm;
314 uint32_t ofs, prevofs; 427 uint32_t ofs, prevofs;
315 uint32_t hdr_crc, buf_ofs, buf_len; 428 uint32_t hdr_crc, buf_ofs, buf_len;
316 int err; 429 int err;
@@ -344,44 +457,75 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
344#endif 457#endif
345 458
346 if (jffs2_sum_active()) { 459 if (jffs2_sum_active()) {
347 sm = kmalloc(sizeof(struct jffs2_sum_marker), GFP_KERNEL); 460 struct jffs2_sum_marker *sm;
348 if (!sm) { 461 void *sumptr = NULL;
349 return -ENOMEM; 462 uint32_t sumlen;
350 } 463
351 464 if (!buf_size) {
352 err = jffs2_fill_scan_buf(c, (unsigned char *) sm, jeb->offset + c->sector_size - 465 /* XIP case. Just look, point at the summary if it's there */
353 sizeof(struct jffs2_sum_marker), sizeof(struct jffs2_sum_marker)); 466 sm = (void *)buf + c->sector_size - sizeof(*sm);
354 if (err) { 467 if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
355 kfree(sm); 468 sumptr = buf + je32_to_cpu(sm->offset);
356 return err; 469 sumlen = c->sector_size - je32_to_cpu(sm->offset);
357 } 470 }
358 471 } else {
359 if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC ) { 472 /* If NAND flash, read a whole page of it. Else just the end */
360 err = jffs2_sum_scan_sumnode(c, jeb, je32_to_cpu(sm->offset), &pseudo_random); 473 if (c->wbuf_pagesize)
361 if (err) { 474 buf_len = c->wbuf_pagesize;
362 kfree(sm); 475 else
476 buf_len = sizeof(*sm);
477
478 /* Read as much as we want into the _end_ of the preallocated buffer */
479 err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len,
480 jeb->offset + c->sector_size - buf_len,
481 buf_len);
482 if (err)
363 return err; 483 return err;
484
485 sm = (void *)buf + buf_size - sizeof(*sm);
486 if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
487 sumlen = c->sector_size - je32_to_cpu(sm->offset);
488 sumptr = buf + buf_size - sumlen;
489
490 /* Now, make sure the summary itself is available */
491 if (sumlen > buf_size) {
492 /* Need to kmalloc for this. */
493 sumptr = kmalloc(sumlen, GFP_KERNEL);
494 if (!sumptr)
495 return -ENOMEM;
496 memcpy(sumptr + sumlen - buf_len, buf + buf_size - buf_len, buf_len);
497 }
498 if (buf_len < sumlen) {
499 /* Need to read more so that the entire summary node is present */
500 err = jffs2_fill_scan_buf(c, sumptr,
501 jeb->offset + c->sector_size - sumlen,
502 sumlen - buf_len);
503 if (err)
504 return err;
505 }
364 } 506 }
507
365 } 508 }
366 509
367 kfree(sm); 510 if (sumptr) {
511 err = jffs2_sum_scan_sumnode(c, jeb, sumptr, sumlen, &pseudo_random);
368 512
369 ofs = jeb->offset; 513 if (buf_size && sumlen > buf_size)
370 prevofs = jeb->offset - 1; 514 kfree(sumptr);
515 /* If it returns with a real error, bail.
516 If it returns positive, that's a block classification
517 (i.e. BLK_STATE_xxx) so return that too.
518 If it returns zero, fall through to full scan. */
519 if (err)
520 return err;
521 }
371 } 522 }
372 523
373 buf_ofs = jeb->offset; 524 buf_ofs = jeb->offset;
374 525
375 if (!buf_size) { 526 if (!buf_size) {
527 /* This is the XIP case -- we're reading _directly_ from the flash chip */
376 buf_len = c->sector_size; 528 buf_len = c->sector_size;
377
378 if (jffs2_sum_active()) {
379 /* must reread because of summary test */
380 err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len);
381 if (err)
382 return err;
383 }
384
385 } else { 529 } else {
386 buf_len = EMPTY_SCAN_SIZE(c->sector_size); 530 buf_len = EMPTY_SCAN_SIZE(c->sector_size);
387 err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len); 531 err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len);
@@ -418,7 +562,10 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
418 if (ofs) { 562 if (ofs) {
419 D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset, 563 D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset,
420 jeb->offset + ofs)); 564 jeb->offset + ofs));
421 DIRTY_SPACE(ofs); 565 if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1)))
566 return err;
567 if ((err = jffs2_scan_dirty_space(c, jeb, ofs)))
568 return err;
422 } 569 }
423 570
424 /* Now ofs is a complete physical flash offset as it always was... */ 571 /* Now ofs is a complete physical flash offset as it always was... */
@@ -433,6 +580,11 @@ scan_more:
433 580
434 jffs2_dbg_acct_paranoia_check_nolock(c, jeb); 581 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
435 582
583 /* Make sure there are node refs available for use */
584 err = jffs2_prealloc_raw_node_refs(c, jeb, 2);
585 if (err)
586 return err;
587
436 cond_resched(); 588 cond_resched();
437 589
438 if (ofs & 3) { 590 if (ofs & 3) {
@@ -442,7 +594,8 @@ scan_more:
442 } 594 }
443 if (ofs == prevofs) { 595 if (ofs == prevofs) {
444 printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs); 596 printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs);
445 DIRTY_SPACE(4); 597 if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
598 return err;
446 ofs += 4; 599 ofs += 4;
447 continue; 600 continue;
448 } 601 }
@@ -451,7 +604,8 @@ scan_more:
451 if (jeb->offset + c->sector_size < ofs + sizeof(*node)) { 604 if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
452 D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node), 605 D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node),
453 jeb->offset, c->sector_size, ofs, sizeof(*node))); 606 jeb->offset, c->sector_size, ofs, sizeof(*node)));
454 DIRTY_SPACE((jeb->offset + c->sector_size)-ofs); 607 if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs)))
608 return err;
455 break; 609 break;
456 } 610 }
457 611
@@ -481,7 +635,8 @@ scan_more:
481 if (*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff) { 635 if (*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff) {
482 printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n", 636 printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n",
483 empty_start, ofs); 637 empty_start, ofs);
484 DIRTY_SPACE(ofs-empty_start); 638 if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start)))
639 return err;
485 goto scan_more; 640 goto scan_more;
486 } 641 }
487 642
@@ -494,7 +649,7 @@ scan_more:
494 /* If we're only checking the beginning of a block with a cleanmarker, 649 /* If we're only checking the beginning of a block with a cleanmarker,
495 bail now */ 650 bail now */
496 if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && 651 if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
497 c->cleanmarker_size && !jeb->dirty_size && !jeb->first_node->next_phys) { 652 c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) {
498 D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size))); 653 D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size)));
499 return BLK_STATE_CLEANMARKER; 654 return BLK_STATE_CLEANMARKER;
500 } 655 }
@@ -518,20 +673,23 @@ scan_more:
518 673
519 if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) { 674 if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
520 printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs); 675 printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs);
521 DIRTY_SPACE(4); 676 if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
677 return err;
522 ofs += 4; 678 ofs += 4;
523 continue; 679 continue;
524 } 680 }
525 if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) { 681 if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
526 D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs)); 682 D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs));
527 DIRTY_SPACE(4); 683 if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
684 return err;
528 ofs += 4; 685 ofs += 4;
529 continue; 686 continue;
530 } 687 }
531 if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) { 688 if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
532 printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs); 689 printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs);
533 printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n"); 690 printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n");
534 DIRTY_SPACE(4); 691 if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
692 return err;
535 ofs += 4; 693 ofs += 4;
536 continue; 694 continue;
537 } 695 }
@@ -540,7 +698,8 @@ scan_more:
540 noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", 698 noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
541 JFFS2_MAGIC_BITMASK, ofs, 699 JFFS2_MAGIC_BITMASK, ofs,
542 je16_to_cpu(node->magic)); 700 je16_to_cpu(node->magic));
543 DIRTY_SPACE(4); 701 if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
702 return err;
544 ofs += 4; 703 ofs += 4;
545 continue; 704 continue;
546 } 705 }
@@ -557,7 +716,8 @@ scan_more:
557 je32_to_cpu(node->totlen), 716 je32_to_cpu(node->totlen),
558 je32_to_cpu(node->hdr_crc), 717 je32_to_cpu(node->hdr_crc),
559 hdr_crc); 718 hdr_crc);
560 DIRTY_SPACE(4); 719 if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
720 return err;
561 ofs += 4; 721 ofs += 4;
562 continue; 722 continue;
563 } 723 }
@@ -568,7 +728,8 @@ scan_more:
568 printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n", 728 printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
569 ofs, je32_to_cpu(node->totlen)); 729 ofs, je32_to_cpu(node->totlen));
570 printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n"); 730 printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n");
571 DIRTY_SPACE(4); 731 if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
732 return err;
572 ofs += 4; 733 ofs += 4;
573 continue; 734 continue;
574 } 735 }
@@ -576,7 +737,8 @@ scan_more:
576 if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) { 737 if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
577 /* Wheee. This is an obsoleted node */ 738 /* Wheee. This is an obsoleted node */
578 D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs)); 739 D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs));
579 DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); 740 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
741 return err;
580 ofs += PAD(je32_to_cpu(node->totlen)); 742 ofs += PAD(je32_to_cpu(node->totlen));
581 continue; 743 continue;
582 } 744 }
@@ -614,30 +776,59 @@ scan_more:
614 ofs += PAD(je32_to_cpu(node->totlen)); 776 ofs += PAD(je32_to_cpu(node->totlen));
615 break; 777 break;
616 778
779#ifdef CONFIG_JFFS2_FS_XATTR
780 case JFFS2_NODETYPE_XATTR:
781 if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
782 buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
783 D1(printk(KERN_DEBUG "Fewer than %d bytes (xattr node)"
784 " left to end of buf. Reading 0x%x at 0x%08x\n",
785 je32_to_cpu(node->totlen), buf_len, ofs));
786 err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
787 if (err)
788 return err;
789 buf_ofs = ofs;
790 node = (void *)buf;
791 }
792 err = jffs2_scan_xattr_node(c, jeb, (void *)node, ofs, s);
793 if (err)
794 return err;
795 ofs += PAD(je32_to_cpu(node->totlen));
796 break;
797 case JFFS2_NODETYPE_XREF:
798 if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
799 buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
800 D1(printk(KERN_DEBUG "Fewer than %d bytes (xref node)"
801 " left to end of buf. Reading 0x%x at 0x%08x\n",
802 je32_to_cpu(node->totlen), buf_len, ofs));
803 err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
804 if (err)
805 return err;
806 buf_ofs = ofs;
807 node = (void *)buf;
808 }
809 err = jffs2_scan_xref_node(c, jeb, (void *)node, ofs, s);
810 if (err)
811 return err;
812 ofs += PAD(je32_to_cpu(node->totlen));
813 break;
814#endif /* CONFIG_JFFS2_FS_XATTR */
815
617 case JFFS2_NODETYPE_CLEANMARKER: 816 case JFFS2_NODETYPE_CLEANMARKER:
618 D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs)); 817 D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs));
619 if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { 818 if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
620 printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", 819 printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
621 ofs, je32_to_cpu(node->totlen), c->cleanmarker_size); 820 ofs, je32_to_cpu(node->totlen), c->cleanmarker_size);
622 DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node))); 821 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
822 return err;
623 ofs += PAD(sizeof(struct jffs2_unknown_node)); 823 ofs += PAD(sizeof(struct jffs2_unknown_node));
624 } else if (jeb->first_node) { 824 } else if (jeb->first_node) {
625 printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset); 825 printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset);
626 DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node))); 826 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
827 return err;
627 ofs += PAD(sizeof(struct jffs2_unknown_node)); 828 ofs += PAD(sizeof(struct jffs2_unknown_node));
628 } else { 829 } else {
629 struct jffs2_raw_node_ref *marker_ref = jffs2_alloc_raw_node_ref(); 830 jffs2_link_node_ref(c, jeb, ofs | REF_NORMAL, c->cleanmarker_size, NULL);
630 if (!marker_ref) {
631 printk(KERN_NOTICE "Failed to allocate node ref for clean marker\n");
632 return -ENOMEM;
633 }
634 marker_ref->next_in_ino = NULL;
635 marker_ref->next_phys = NULL;
636 marker_ref->flash_offset = ofs | REF_NORMAL;
637 marker_ref->__totlen = c->cleanmarker_size;
638 jeb->first_node = jeb->last_node = marker_ref;
639 831
640 USED_SPACE(PAD(c->cleanmarker_size));
641 ofs += PAD(c->cleanmarker_size); 832 ofs += PAD(c->cleanmarker_size);
642 } 833 }
643 break; 834 break;
@@ -645,7 +836,8 @@ scan_more:
645 case JFFS2_NODETYPE_PADDING: 836 case JFFS2_NODETYPE_PADDING:
646 if (jffs2_sum_active()) 837 if (jffs2_sum_active())
647 jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen)); 838 jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen));
648 DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); 839 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
840 return err;
649 ofs += PAD(je32_to_cpu(node->totlen)); 841 ofs += PAD(je32_to_cpu(node->totlen));
650 break; 842 break;
651 843
@@ -656,7 +848,8 @@ scan_more:
656 c->flags |= JFFS2_SB_FLAG_RO; 848 c->flags |= JFFS2_SB_FLAG_RO;
657 if (!(jffs2_is_readonly(c))) 849 if (!(jffs2_is_readonly(c)))
658 return -EROFS; 850 return -EROFS;
659 DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); 851 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
852 return err;
660 ofs += PAD(je32_to_cpu(node->totlen)); 853 ofs += PAD(je32_to_cpu(node->totlen));
661 break; 854 break;
662 855
@@ -666,15 +859,21 @@ scan_more:
666 859
667 case JFFS2_FEATURE_RWCOMPAT_DELETE: 860 case JFFS2_FEATURE_RWCOMPAT_DELETE:
668 D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs)); 861 D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
669 DIRTY_SPACE(PAD(je32_to_cpu(node->totlen))); 862 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
863 return err;
670 ofs += PAD(je32_to_cpu(node->totlen)); 864 ofs += PAD(je32_to_cpu(node->totlen));
671 break; 865 break;
672 866
673 case JFFS2_FEATURE_RWCOMPAT_COPY: 867 case JFFS2_FEATURE_RWCOMPAT_COPY: {
674 D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs)); 868 D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
675 USED_SPACE(PAD(je32_to_cpu(node->totlen))); 869
870 jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL);
871
872 /* We can't summarise nodes we don't grok */
873 jffs2_sum_disable_collecting(s);
676 ofs += PAD(je32_to_cpu(node->totlen)); 874 ofs += PAD(je32_to_cpu(node->totlen));
677 break; 875 break;
876 }
678 } 877 }
679 } 878 }
680 } 879 }
@@ -687,9 +886,9 @@ scan_more:
687 } 886 }
688 } 887 }
689 888
690 D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x\n", jeb->offset, 889 D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n",
691 jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size)); 890 jeb->offset,jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size, jeb->wasted_size));
692 891
693 /* mark_node_obsolete can add to wasted !! */ 892 /* mark_node_obsolete can add to wasted !! */
694 if (jeb->wasted_size) { 893 if (jeb->wasted_size) {
695 jeb->dirty_size += jeb->wasted_size; 894 jeb->dirty_size += jeb->wasted_size;
@@ -730,9 +929,9 @@ struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uin
730static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 929static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
731 struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s) 930 struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s)
732{ 931{
733 struct jffs2_raw_node_ref *raw;
734 struct jffs2_inode_cache *ic; 932 struct jffs2_inode_cache *ic;
735 uint32_t ino = je32_to_cpu(ri->ino); 933 uint32_t ino = je32_to_cpu(ri->ino);
934 int err;
736 935
737 D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs)); 936 D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs));
738 937
@@ -745,12 +944,6 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
745 Which means that the _full_ amount of time to get to proper write mode with GC 944 Which means that the _full_ amount of time to get to proper write mode with GC
746 operational may actually be _longer_ than before. Sucks to be me. */ 945 operational may actually be _longer_ than before. Sucks to be me. */
747 946
748 raw = jffs2_alloc_raw_node_ref();
749 if (!raw) {
750 printk(KERN_NOTICE "jffs2_scan_inode_node(): allocation of node reference failed\n");
751 return -ENOMEM;
752 }
753
754 ic = jffs2_get_ino_cache(c, ino); 947 ic = jffs2_get_ino_cache(c, ino);
755 if (!ic) { 948 if (!ic) {
756 /* Inocache get failed. Either we read a bogus ino# or it's just genuinely the 949 /* Inocache get failed. Either we read a bogus ino# or it's just genuinely the
@@ -762,30 +955,17 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
762 printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", 955 printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
763 ofs, je32_to_cpu(ri->node_crc), crc); 956 ofs, je32_to_cpu(ri->node_crc), crc);
764 /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */ 957 /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
765 DIRTY_SPACE(PAD(je32_to_cpu(ri->totlen))); 958 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(ri->totlen)))))
766 jffs2_free_raw_node_ref(raw); 959 return err;
767 return 0; 960 return 0;
768 } 961 }
769 ic = jffs2_scan_make_ino_cache(c, ino); 962 ic = jffs2_scan_make_ino_cache(c, ino);
770 if (!ic) { 963 if (!ic)
771 jffs2_free_raw_node_ref(raw);
772 return -ENOMEM; 964 return -ENOMEM;
773 }
774 } 965 }
775 966
776 /* Wheee. It worked */ 967 /* Wheee. It worked */
777 968 jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic);
778 raw->flash_offset = ofs | REF_UNCHECKED;
779 raw->__totlen = PAD(je32_to_cpu(ri->totlen));
780 raw->next_phys = NULL;
781 raw->next_in_ino = ic->nodes;
782
783 ic->nodes = raw;
784 if (!jeb->first_node)
785 jeb->first_node = raw;
786 if (jeb->last_node)
787 jeb->last_node->next_phys = raw;
788 jeb->last_node = raw;
789 969
790 D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n", 970 D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
791 je32_to_cpu(ri->ino), je32_to_cpu(ri->version), 971 je32_to_cpu(ri->ino), je32_to_cpu(ri->version),
@@ -794,8 +974,6 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
794 974
795 pseudo_random += je32_to_cpu(ri->version); 975 pseudo_random += je32_to_cpu(ri->version);
796 976
797 UNCHECKED_SPACE(PAD(je32_to_cpu(ri->totlen)));
798
799 if (jffs2_sum_active()) { 977 if (jffs2_sum_active()) {
800 jffs2_sum_add_inode_mem(s, ri, ofs - jeb->offset); 978 jffs2_sum_add_inode_mem(s, ri, ofs - jeb->offset);
801 } 979 }
@@ -806,10 +984,10 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
806static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 984static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
807 struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s) 985 struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s)
808{ 986{
809 struct jffs2_raw_node_ref *raw;
810 struct jffs2_full_dirent *fd; 987 struct jffs2_full_dirent *fd;
811 struct jffs2_inode_cache *ic; 988 struct jffs2_inode_cache *ic;
812 uint32_t crc; 989 uint32_t crc;
990 int err;
813 991
814 D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs)); 992 D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs));
815 993
@@ -821,7 +999,8 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
821 printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", 999 printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
822 ofs, je32_to_cpu(rd->node_crc), crc); 1000 ofs, je32_to_cpu(rd->node_crc), crc);
823 /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */ 1001 /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
824 DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen))); 1002 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
1003 return err;
825 return 0; 1004 return 0;
826 } 1005 }
827 1006
@@ -842,40 +1021,23 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
842 jffs2_free_full_dirent(fd); 1021 jffs2_free_full_dirent(fd);
843 /* FIXME: Why do we believe totlen? */ 1022 /* FIXME: Why do we believe totlen? */
844 /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */ 1023 /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */
845 DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen))); 1024 if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
1025 return err;
846 return 0; 1026 return 0;
847 } 1027 }
848 raw = jffs2_alloc_raw_node_ref();
849 if (!raw) {
850 jffs2_free_full_dirent(fd);
851 printk(KERN_NOTICE "jffs2_scan_dirent_node(): allocation of node reference failed\n");
852 return -ENOMEM;
853 }
854 ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino)); 1028 ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino));
855 if (!ic) { 1029 if (!ic) {
856 jffs2_free_full_dirent(fd); 1030 jffs2_free_full_dirent(fd);
857 jffs2_free_raw_node_ref(raw);
858 return -ENOMEM; 1031 return -ENOMEM;
859 } 1032 }
860 1033
861 raw->__totlen = PAD(je32_to_cpu(rd->totlen)); 1034 fd->raw = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rd->totlen)), ic);
862 raw->flash_offset = ofs | REF_PRISTINE;
863 raw->next_phys = NULL;
864 raw->next_in_ino = ic->nodes;
865 ic->nodes = raw;
866 if (!jeb->first_node)
867 jeb->first_node = raw;
868 if (jeb->last_node)
869 jeb->last_node->next_phys = raw;
870 jeb->last_node = raw;
871 1035
872 fd->raw = raw;
873 fd->next = NULL; 1036 fd->next = NULL;
874 fd->version = je32_to_cpu(rd->version); 1037 fd->version = je32_to_cpu(rd->version);
875 fd->ino = je32_to_cpu(rd->ino); 1038 fd->ino = je32_to_cpu(rd->ino);
876 fd->nhash = full_name_hash(fd->name, rd->nsize); 1039 fd->nhash = full_name_hash(fd->name, rd->nsize);
877 fd->type = rd->type; 1040 fd->type = rd->type;
878 USED_SPACE(PAD(je32_to_cpu(rd->totlen)));
879 jffs2_add_fd_to_list(c, fd, &ic->scan_dents); 1041 jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
880 1042
881 if (jffs2_sum_active()) { 1043 if (jffs2_sum_active()) {
diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c
new file mode 100644
index 000000000000..52a9894a6364
--- /dev/null
+++ b/fs/jffs2/security.c
@@ -0,0 +1,82 @@
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2006 NEC Corporation
5 *
6 * Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
10 */
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14#include <linux/time.h>
15#include <linux/pagemap.h>
16#include <linux/highmem.h>
17#include <linux/crc32.h>
18#include <linux/jffs2.h>
19#include <linux/xattr.h>
20#include <linux/mtd/mtd.h>
21#include <linux/security.h>
22#include "nodelist.h"
23
24/* ---- Initial Security Label Attachment -------------- */
25int jffs2_init_security(struct inode *inode, struct inode *dir)
26{
27 int rc;
28 size_t len;
29 void *value;
30 char *name;
31
32 rc = security_inode_init_security(inode, dir, &name, &value, &len);
33 if (rc) {
34 if (rc == -EOPNOTSUPP)
35 return 0;
36 return rc;
37 }
38 rc = do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY, name, value, len, 0);
39
40 kfree(name);
41 kfree(value);
42 return rc;
43}
44
45/* ---- XATTR Handler for "security.*" ----------------- */
46static int jffs2_security_getxattr(struct inode *inode, const char *name,
47 void *buffer, size_t size)
48{
49 if (!strcmp(name, ""))
50 return -EINVAL;
51
52 return do_jffs2_getxattr(inode, JFFS2_XPREFIX_SECURITY, name, buffer, size);
53}
54
55static int jffs2_security_setxattr(struct inode *inode, const char *name, const void *buffer,
56 size_t size, int flags)
57{
58 if (!strcmp(name, ""))
59 return -EINVAL;
60
61 return do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY, name, buffer, size, flags);
62}
63
64static size_t jffs2_security_listxattr(struct inode *inode, char *list, size_t list_size,
65 const char *name, size_t name_len)
66{
67 size_t retlen = XATTR_SECURITY_PREFIX_LEN + name_len + 1;
68
69 if (list && retlen <= list_size) {
70 strcpy(list, XATTR_SECURITY_PREFIX);
71 strcpy(list + XATTR_SECURITY_PREFIX_LEN, name);
72 }
73
74 return retlen;
75}
76
77struct xattr_handler jffs2_security_xattr_handler = {
78 .prefix = XATTR_SECURITY_PREFIX,
79 .list = jffs2_security_listxattr,
80 .set = jffs2_security_setxattr,
81 .get = jffs2_security_getxattr
82};
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index fb9cec61fcf2..0b02fc79e4d1 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -5,6 +5,7 @@
5 * Zoltan Sogor <weth@inf.u-szeged.hu>, 5 * Zoltan Sogor <weth@inf.u-szeged.hu>,
6 * Patrik Kluba <pajko@halom.u-szeged.hu>, 6 * Patrik Kluba <pajko@halom.u-szeged.hu>,
7 * University of Szeged, Hungary 7 * University of Szeged, Hungary
8 * 2005 KaiGai Kohei <kaigai@ak.jp.nec.com>
8 * 9 *
9 * For licensing information, see the file 'LICENCE' in this directory. 10 * For licensing information, see the file 'LICENCE' in this directory.
10 * 11 *
@@ -81,6 +82,19 @@ static int jffs2_sum_add_mem(struct jffs2_summary *s, union jffs2_sum_mem *item)
81 dbg_summary("dirent (%u) added to summary\n", 82 dbg_summary("dirent (%u) added to summary\n",
82 je32_to_cpu(item->d.ino)); 83 je32_to_cpu(item->d.ino));
83 break; 84 break;
85#ifdef CONFIG_JFFS2_FS_XATTR
86 case JFFS2_NODETYPE_XATTR:
87 s->sum_size += JFFS2_SUMMARY_XATTR_SIZE;
88 s->sum_num++;
89 dbg_summary("xattr (xid=%u, version=%u) added to summary\n",
90 je32_to_cpu(item->x.xid), je32_to_cpu(item->x.version));
91 break;
92 case JFFS2_NODETYPE_XREF:
93 s->sum_size += JFFS2_SUMMARY_XREF_SIZE;
94 s->sum_num++;
95 dbg_summary("xref added to summary\n");
96 break;
97#endif
84 default: 98 default:
85 JFFS2_WARNING("UNKNOWN node type %u\n", 99 JFFS2_WARNING("UNKNOWN node type %u\n",
86 je16_to_cpu(item->u.nodetype)); 100 je16_to_cpu(item->u.nodetype));
@@ -141,6 +155,40 @@ int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *r
141 return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp); 155 return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp);
142} 156}
143 157
158#ifdef CONFIG_JFFS2_FS_XATTR
159int jffs2_sum_add_xattr_mem(struct jffs2_summary *s, struct jffs2_raw_xattr *rx, uint32_t ofs)
160{
161 struct jffs2_sum_xattr_mem *temp;
162
163 temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL);
164 if (!temp)
165 return -ENOMEM;
166
167 temp->nodetype = rx->nodetype;
168 temp->xid = rx->xid;
169 temp->version = rx->version;
170 temp->offset = cpu_to_je32(ofs);
171 temp->totlen = rx->totlen;
172 temp->next = NULL;
173
174 return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp);
175}
176
177int jffs2_sum_add_xref_mem(struct jffs2_summary *s, struct jffs2_raw_xref *rr, uint32_t ofs)
178{
179 struct jffs2_sum_xref_mem *temp;
180
181 temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL);
182 if (!temp)
183 return -ENOMEM;
184
185 temp->nodetype = rr->nodetype;
186 temp->offset = cpu_to_je32(ofs);
187 temp->next = NULL;
188
189 return jffs2_sum_add_mem(s, (union jffs2_sum_mem *)temp);
190}
191#endif
144/* Cleanup every collected summary information */ 192/* Cleanup every collected summary information */
145 193
146static void jffs2_sum_clean_collected(struct jffs2_summary *s) 194static void jffs2_sum_clean_collected(struct jffs2_summary *s)
@@ -259,7 +307,40 @@ int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs,
259 307
260 return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp); 308 return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp);
261 } 309 }
310#ifdef CONFIG_JFFS2_FS_XATTR
311 case JFFS2_NODETYPE_XATTR: {
312 struct jffs2_sum_xattr_mem *temp;
313 if (je32_to_cpu(node->x.version) == 0xffffffff)
314 return 0;
315 temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL);
316 if (!temp)
317 goto no_mem;
318
319 temp->nodetype = node->x.nodetype;
320 temp->xid = node->x.xid;
321 temp->version = node->x.version;
322 temp->totlen = node->x.totlen;
323 temp->offset = cpu_to_je32(ofs);
324 temp->next = NULL;
325
326 return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp);
327 }
328 case JFFS2_NODETYPE_XREF: {
329 struct jffs2_sum_xref_mem *temp;
330
331 if (je32_to_cpu(node->r.ino) == 0xffffffff
332 && je32_to_cpu(node->r.xid) == 0xffffffff)
333 return 0;
334 temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL);
335 if (!temp)
336 goto no_mem;
337 temp->nodetype = node->r.nodetype;
338 temp->offset = cpu_to_je32(ofs);
339 temp->next = NULL;
262 340
341 return jffs2_sum_add_mem(c->summary, (union jffs2_sum_mem *)temp);
342 }
343#endif
263 case JFFS2_NODETYPE_PADDING: 344 case JFFS2_NODETYPE_PADDING:
264 dbg_summary("node PADDING\n"); 345 dbg_summary("node PADDING\n");
265 c->summary->sum_padded += je32_to_cpu(node->u.totlen); 346 c->summary->sum_padded += je32_to_cpu(node->u.totlen);
@@ -288,23 +369,41 @@ no_mem:
288 return -ENOMEM; 369 return -ENOMEM;
289} 370}
290 371
372static struct jffs2_raw_node_ref *sum_link_node_ref(struct jffs2_sb_info *c,
373 struct jffs2_eraseblock *jeb,
374 uint32_t ofs, uint32_t len,
375 struct jffs2_inode_cache *ic)
376{
377 /* If there was a gap, mark it dirty */
378 if ((ofs & ~3) > c->sector_size - jeb->free_size) {
379 /* Ew. Summary doesn't actually tell us explicitly about dirty space */
380 jffs2_scan_dirty_space(c, jeb, (ofs & ~3) - (c->sector_size - jeb->free_size));
381 }
382
383 return jffs2_link_node_ref(c, jeb, jeb->offset + ofs, len, ic);
384}
291 385
292/* Process the stored summary information - helper function for jffs2_sum_scan_sumnode() */ 386/* Process the stored summary information - helper function for jffs2_sum_scan_sumnode() */
293 387
294static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 388static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
295 struct jffs2_raw_summary *summary, uint32_t *pseudo_random) 389 struct jffs2_raw_summary *summary, uint32_t *pseudo_random)
296{ 390{
297 struct jffs2_raw_node_ref *raw;
298 struct jffs2_inode_cache *ic; 391 struct jffs2_inode_cache *ic;
299 struct jffs2_full_dirent *fd; 392 struct jffs2_full_dirent *fd;
300 void *sp; 393 void *sp;
301 int i, ino; 394 int i, ino;
395 int err;
302 396
303 sp = summary->sum; 397 sp = summary->sum;
304 398
305 for (i=0; i<je32_to_cpu(summary->sum_num); i++) { 399 for (i=0; i<je32_to_cpu(summary->sum_num); i++) {
306 dbg_summary("processing summary index %d\n", i); 400 dbg_summary("processing summary index %d\n", i);
307 401
402 /* Make sure there's a spare ref for dirty space */
403 err = jffs2_prealloc_raw_node_refs(c, jeb, 2);
404 if (err)
405 return err;
406
308 switch (je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype)) { 407 switch (je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype)) {
309 case JFFS2_NODETYPE_INODE: { 408 case JFFS2_NODETYPE_INODE: {
310 struct jffs2_sum_inode_flash *spi; 409 struct jffs2_sum_inode_flash *spi;
@@ -312,38 +411,20 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras
312 411
313 ino = je32_to_cpu(spi->inode); 412 ino = je32_to_cpu(spi->inode);
314 413
315 dbg_summary("Inode at 0x%08x\n", 414 dbg_summary("Inode at 0x%08x-0x%08x\n",
316 jeb->offset + je32_to_cpu(spi->offset)); 415 jeb->offset + je32_to_cpu(spi->offset),
317 416 jeb->offset + je32_to_cpu(spi->offset) + je32_to_cpu(spi->totlen));
318 raw = jffs2_alloc_raw_node_ref();
319 if (!raw) {
320 JFFS2_NOTICE("allocation of node reference failed\n");
321 kfree(summary);
322 return -ENOMEM;
323 }
324 417
325 ic = jffs2_scan_make_ino_cache(c, ino); 418 ic = jffs2_scan_make_ino_cache(c, ino);
326 if (!ic) { 419 if (!ic) {
327 JFFS2_NOTICE("scan_make_ino_cache failed\n"); 420 JFFS2_NOTICE("scan_make_ino_cache failed\n");
328 jffs2_free_raw_node_ref(raw);
329 kfree(summary);
330 return -ENOMEM; 421 return -ENOMEM;
331 } 422 }
332 423
333 raw->flash_offset = (jeb->offset + je32_to_cpu(spi->offset)) | REF_UNCHECKED; 424 sum_link_node_ref(c, jeb, je32_to_cpu(spi->offset) | REF_UNCHECKED,
334 raw->__totlen = PAD(je32_to_cpu(spi->totlen)); 425 PAD(je32_to_cpu(spi->totlen)), ic);
335 raw->next_phys = NULL;
336 raw->next_in_ino = ic->nodes;
337
338 ic->nodes = raw;
339 if (!jeb->first_node)
340 jeb->first_node = raw;
341 if (jeb->last_node)
342 jeb->last_node->next_phys = raw;
343 jeb->last_node = raw;
344 *pseudo_random += je32_to_cpu(spi->version);
345 426
346 UNCHECKED_SPACE(PAD(je32_to_cpu(spi->totlen))); 427 *pseudo_random += je32_to_cpu(spi->version);
347 428
348 sp += JFFS2_SUMMARY_INODE_SIZE; 429 sp += JFFS2_SUMMARY_INODE_SIZE;
349 430
@@ -354,52 +435,33 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras
354 struct jffs2_sum_dirent_flash *spd; 435 struct jffs2_sum_dirent_flash *spd;
355 spd = sp; 436 spd = sp;
356 437
357 dbg_summary("Dirent at 0x%08x\n", 438 dbg_summary("Dirent at 0x%08x-0x%08x\n",
358 jeb->offset + je32_to_cpu(spd->offset)); 439 jeb->offset + je32_to_cpu(spd->offset),
440 jeb->offset + je32_to_cpu(spd->offset) + je32_to_cpu(spd->totlen));
441
359 442
360 fd = jffs2_alloc_full_dirent(spd->nsize+1); 443 fd = jffs2_alloc_full_dirent(spd->nsize+1);
361 if (!fd) { 444 if (!fd)
362 kfree(summary);
363 return -ENOMEM; 445 return -ENOMEM;
364 }
365 446
366 memcpy(&fd->name, spd->name, spd->nsize); 447 memcpy(&fd->name, spd->name, spd->nsize);
367 fd->name[spd->nsize] = 0; 448 fd->name[spd->nsize] = 0;
368 449
369 raw = jffs2_alloc_raw_node_ref();
370 if (!raw) {
371 jffs2_free_full_dirent(fd);
372 JFFS2_NOTICE("allocation of node reference failed\n");
373 kfree(summary);
374 return -ENOMEM;
375 }
376
377 ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(spd->pino)); 450 ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(spd->pino));
378 if (!ic) { 451 if (!ic) {
379 jffs2_free_full_dirent(fd); 452 jffs2_free_full_dirent(fd);
380 jffs2_free_raw_node_ref(raw);
381 kfree(summary);
382 return -ENOMEM; 453 return -ENOMEM;
383 } 454 }
384 455
385 raw->__totlen = PAD(je32_to_cpu(spd->totlen)); 456 fd->raw = sum_link_node_ref(c, jeb, je32_to_cpu(spd->offset) | REF_UNCHECKED,
386 raw->flash_offset = (jeb->offset + je32_to_cpu(spd->offset)) | REF_PRISTINE; 457 PAD(je32_to_cpu(spd->totlen)), ic);
387 raw->next_phys = NULL; 458
388 raw->next_in_ino = ic->nodes;
389 ic->nodes = raw;
390 if (!jeb->first_node)
391 jeb->first_node = raw;
392 if (jeb->last_node)
393 jeb->last_node->next_phys = raw;
394 jeb->last_node = raw;
395
396 fd->raw = raw;
397 fd->next = NULL; 459 fd->next = NULL;
398 fd->version = je32_to_cpu(spd->version); 460 fd->version = je32_to_cpu(spd->version);
399 fd->ino = je32_to_cpu(spd->ino); 461 fd->ino = je32_to_cpu(spd->ino);
400 fd->nhash = full_name_hash(fd->name, spd->nsize); 462 fd->nhash = full_name_hash(fd->name, spd->nsize);
401 fd->type = spd->type; 463 fd->type = spd->type;
402 USED_SPACE(PAD(je32_to_cpu(spd->totlen))); 464
403 jffs2_add_fd_to_list(c, fd, &ic->scan_dents); 465 jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
404 466
405 *pseudo_random += je32_to_cpu(spd->version); 467 *pseudo_random += je32_to_cpu(spd->version);
@@ -408,48 +470,105 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras
408 470
409 break; 471 break;
410 } 472 }
473#ifdef CONFIG_JFFS2_FS_XATTR
474 case JFFS2_NODETYPE_XATTR: {
475 struct jffs2_xattr_datum *xd;
476 struct jffs2_sum_xattr_flash *spx;
477
478 spx = (struct jffs2_sum_xattr_flash *)sp;
479 dbg_summary("xattr at %#08x-%#08x (xid=%u, version=%u)\n",
480 jeb->offset + je32_to_cpu(spx->offset),
481 jeb->offset + je32_to_cpu(spx->offset) + je32_to_cpu(spx->totlen),
482 je32_to_cpu(spx->xid), je32_to_cpu(spx->version));
483
484 xd = jffs2_setup_xattr_datum(c, je32_to_cpu(spx->xid),
485 je32_to_cpu(spx->version));
486 if (IS_ERR(xd)) {
487 if (PTR_ERR(xd) == -EEXIST) {
488 /* a newer version of xd exists */
489 if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(spx->totlen))))
490 return err;
491 sp += JFFS2_SUMMARY_XATTR_SIZE;
492 break;
493 }
494 JFFS2_NOTICE("allocation of xattr_datum failed\n");
495 return PTR_ERR(xd);
496 }
497
498 xd->node = sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED,
499 PAD(je32_to_cpu(spx->totlen)), NULL);
500 /* FIXME */ xd->node->next_in_ino = (void *)xd;
501
502 *pseudo_random += je32_to_cpu(spx->xid);
503 sp += JFFS2_SUMMARY_XATTR_SIZE;
504
505 break;
506 }
507 case JFFS2_NODETYPE_XREF: {
508 struct jffs2_xattr_ref *ref;
509 struct jffs2_sum_xref_flash *spr;
510
511 spr = (struct jffs2_sum_xref_flash *)sp;
512 dbg_summary("xref at %#08x-%#08x\n",
513 jeb->offset + je32_to_cpu(spr->offset),
514 jeb->offset + je32_to_cpu(spr->offset) +
515 (uint32_t)PAD(sizeof(struct jffs2_raw_xref)));
516
517 ref = jffs2_alloc_xattr_ref();
518 if (!ref) {
519 JFFS2_NOTICE("allocation of xattr_datum failed\n");
520 return -ENOMEM;
521 }
522 ref->ino = 0xfffffffe;
523 ref->xid = 0xfffffffd;
524 ref->next = c->xref_temp;
525 c->xref_temp = ref;
411 526
527 ref->node = sum_link_node_ref(c, jeb, je32_to_cpu(spr->offset) | REF_UNCHECKED,
528 PAD(sizeof(struct jffs2_raw_xref)), NULL);
529 /* FIXME */ ref->node->next_in_ino = (void *)ref;
530
531 *pseudo_random += ref->node->flash_offset;
532 sp += JFFS2_SUMMARY_XREF_SIZE;
533
534 break;
535 }
536#endif
412 default : { 537 default : {
413 JFFS2_WARNING("Unsupported node type found in summary! Exiting..."); 538 uint16_t nodetype = je16_to_cpu(((struct jffs2_sum_unknown_flash *)sp)->nodetype);
414 kfree(summary); 539 JFFS2_WARNING("Unsupported node type %x found in summary! Exiting...\n", nodetype);
415 return -EIO; 540 if ((nodetype & JFFS2_COMPAT_MASK) == JFFS2_FEATURE_INCOMPAT)
541 return -EIO;
542
543 /* For compatible node types, just fall back to the full scan */
544 c->wasted_size -= jeb->wasted_size;
545 c->free_size += c->sector_size - jeb->free_size;
546 c->used_size -= jeb->used_size;
547 c->dirty_size -= jeb->dirty_size;
548 jeb->wasted_size = jeb->used_size = jeb->dirty_size = 0;
549 jeb->free_size = c->sector_size;
550
551 jffs2_free_jeb_node_refs(c, jeb);
552 return -ENOTRECOVERABLE;
416 } 553 }
417 } 554 }
418 } 555 }
419
420 kfree(summary);
421 return 0; 556 return 0;
422} 557}
423 558
424/* Process the summary node - called from jffs2_scan_eraseblock() */ 559/* Process the summary node - called from jffs2_scan_eraseblock() */
425
426int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 560int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
427 uint32_t ofs, uint32_t *pseudo_random) 561 struct jffs2_raw_summary *summary, uint32_t sumsize,
562 uint32_t *pseudo_random)
428{ 563{
429 struct jffs2_unknown_node crcnode; 564 struct jffs2_unknown_node crcnode;
430 struct jffs2_raw_node_ref *cache_ref; 565 int ret, ofs;
431 struct jffs2_raw_summary *summary;
432 int ret, sumsize;
433 uint32_t crc; 566 uint32_t crc;
434 567
435 sumsize = c->sector_size - ofs; 568 ofs = c->sector_size - sumsize;
436 ofs += jeb->offset;
437 569
438 dbg_summary("summary found for 0x%08x at 0x%08x (0x%x bytes)\n", 570 dbg_summary("summary found for 0x%08x at 0x%08x (0x%x bytes)\n",
439 jeb->offset, ofs, sumsize); 571 jeb->offset, jeb->offset + ofs, sumsize);
440
441 summary = kmalloc(sumsize, GFP_KERNEL);
442
443 if (!summary) {
444 return -ENOMEM;
445 }
446
447 ret = jffs2_fill_scan_buf(c, (unsigned char *)summary, ofs, sumsize);
448
449 if (ret) {
450 kfree(summary);
451 return ret;
452 }
453 572
454 /* OK, now check for node validity and CRC */ 573 /* OK, now check for node validity and CRC */
455 crcnode.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); 574 crcnode.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -486,66 +605,49 @@ int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb
486 605
487 dbg_summary("Summary : CLEANMARKER node \n"); 606 dbg_summary("Summary : CLEANMARKER node \n");
488 607
608 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
609 if (ret)
610 return ret;
611
489 if (je32_to_cpu(summary->cln_mkr) != c->cleanmarker_size) { 612 if (je32_to_cpu(summary->cln_mkr) != c->cleanmarker_size) {
490 dbg_summary("CLEANMARKER node has totlen 0x%x != normal 0x%x\n", 613 dbg_summary("CLEANMARKER node has totlen 0x%x != normal 0x%x\n",
491 je32_to_cpu(summary->cln_mkr), c->cleanmarker_size); 614 je32_to_cpu(summary->cln_mkr), c->cleanmarker_size);
492 UNCHECKED_SPACE(PAD(je32_to_cpu(summary->cln_mkr))); 615 if ((ret = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(summary->cln_mkr)))))
616 return ret;
493 } else if (jeb->first_node) { 617 } else if (jeb->first_node) {
494 dbg_summary("CLEANMARKER node not first node in block " 618 dbg_summary("CLEANMARKER node not first node in block "
495 "(0x%08x)\n", jeb->offset); 619 "(0x%08x)\n", jeb->offset);
496 UNCHECKED_SPACE(PAD(je32_to_cpu(summary->cln_mkr))); 620 if ((ret = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(summary->cln_mkr)))))
621 return ret;
497 } else { 622 } else {
498 struct jffs2_raw_node_ref *marker_ref = jffs2_alloc_raw_node_ref(); 623 jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL,
499 624 je32_to_cpu(summary->cln_mkr), NULL);
500 if (!marker_ref) {
501 JFFS2_NOTICE("Failed to allocate node ref for clean marker\n");
502 kfree(summary);
503 return -ENOMEM;
504 }
505
506 marker_ref->next_in_ino = NULL;
507 marker_ref->next_phys = NULL;
508 marker_ref->flash_offset = jeb->offset | REF_NORMAL;
509 marker_ref->__totlen = je32_to_cpu(summary->cln_mkr);
510 jeb->first_node = jeb->last_node = marker_ref;
511
512 USED_SPACE( PAD(je32_to_cpu(summary->cln_mkr)) );
513 } 625 }
514 } 626 }
515 627
516 if (je32_to_cpu(summary->padded)) {
517 DIRTY_SPACE(je32_to_cpu(summary->padded));
518 }
519
520 ret = jffs2_sum_process_sum_data(c, jeb, summary, pseudo_random); 628 ret = jffs2_sum_process_sum_data(c, jeb, summary, pseudo_random);
629 /* -ENOTRECOVERABLE isn't a fatal error -- it means we should do a full
630 scan of this eraseblock. So return zero */
631 if (ret == -ENOTRECOVERABLE)
632 return 0;
521 if (ret) 633 if (ret)
522 return ret; 634 return ret; /* real error */
523 635
524 /* for PARANOIA_CHECK */ 636 /* for PARANOIA_CHECK */
525 cache_ref = jffs2_alloc_raw_node_ref(); 637 ret = jffs2_prealloc_raw_node_refs(c, jeb, 2);
526 638 if (ret)
527 if (!cache_ref) { 639 return ret;
528 JFFS2_NOTICE("Failed to allocate node ref for cache\n");
529 return -ENOMEM;
530 }
531
532 cache_ref->next_in_ino = NULL;
533 cache_ref->next_phys = NULL;
534 cache_ref->flash_offset = ofs | REF_NORMAL;
535 cache_ref->__totlen = sumsize;
536
537 if (!jeb->first_node)
538 jeb->first_node = cache_ref;
539 if (jeb->last_node)
540 jeb->last_node->next_phys = cache_ref;
541 jeb->last_node = cache_ref;
542 640
543 USED_SPACE(sumsize); 641 sum_link_node_ref(c, jeb, ofs | REF_NORMAL, sumsize, NULL);
544 642
545 jeb->wasted_size += jeb->free_size; 643 if (unlikely(jeb->free_size)) {
546 c->wasted_size += jeb->free_size; 644 JFFS2_WARNING("Free size 0x%x bytes in eraseblock @0x%08x with summary?\n",
547 c->free_size -= jeb->free_size; 645 jeb->free_size, jeb->offset);
548 jeb->free_size = 0; 646 jeb->wasted_size += jeb->free_size;
647 c->wasted_size += jeb->free_size;
648 c->free_size -= jeb->free_size;
649 jeb->free_size = 0;
650 }
549 651
550 return jffs2_scan_classify_jeb(c, jeb); 652 return jffs2_scan_classify_jeb(c, jeb);
551 653
@@ -564,6 +666,7 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
564 union jffs2_sum_mem *temp; 666 union jffs2_sum_mem *temp;
565 struct jffs2_sum_marker *sm; 667 struct jffs2_sum_marker *sm;
566 struct kvec vecs[2]; 668 struct kvec vecs[2];
669 uint32_t sum_ofs;
567 void *wpage; 670 void *wpage;
568 int ret; 671 int ret;
569 size_t retlen; 672 size_t retlen;
@@ -581,16 +684,17 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
581 wpage = c->summary->sum_buf; 684 wpage = c->summary->sum_buf;
582 685
583 while (c->summary->sum_num) { 686 while (c->summary->sum_num) {
687 temp = c->summary->sum_list_head;
584 688
585 switch (je16_to_cpu(c->summary->sum_list_head->u.nodetype)) { 689 switch (je16_to_cpu(temp->u.nodetype)) {
586 case JFFS2_NODETYPE_INODE: { 690 case JFFS2_NODETYPE_INODE: {
587 struct jffs2_sum_inode_flash *sino_ptr = wpage; 691 struct jffs2_sum_inode_flash *sino_ptr = wpage;
588 692
589 sino_ptr->nodetype = c->summary->sum_list_head->i.nodetype; 693 sino_ptr->nodetype = temp->i.nodetype;
590 sino_ptr->inode = c->summary->sum_list_head->i.inode; 694 sino_ptr->inode = temp->i.inode;
591 sino_ptr->version = c->summary->sum_list_head->i.version; 695 sino_ptr->version = temp->i.version;
592 sino_ptr->offset = c->summary->sum_list_head->i.offset; 696 sino_ptr->offset = temp->i.offset;
593 sino_ptr->totlen = c->summary->sum_list_head->i.totlen; 697 sino_ptr->totlen = temp->i.totlen;
594 698
595 wpage += JFFS2_SUMMARY_INODE_SIZE; 699 wpage += JFFS2_SUMMARY_INODE_SIZE;
596 700
@@ -600,30 +704,60 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
600 case JFFS2_NODETYPE_DIRENT: { 704 case JFFS2_NODETYPE_DIRENT: {
601 struct jffs2_sum_dirent_flash *sdrnt_ptr = wpage; 705 struct jffs2_sum_dirent_flash *sdrnt_ptr = wpage;
602 706
603 sdrnt_ptr->nodetype = c->summary->sum_list_head->d.nodetype; 707 sdrnt_ptr->nodetype = temp->d.nodetype;
604 sdrnt_ptr->totlen = c->summary->sum_list_head->d.totlen; 708 sdrnt_ptr->totlen = temp->d.totlen;
605 sdrnt_ptr->offset = c->summary->sum_list_head->d.offset; 709 sdrnt_ptr->offset = temp->d.offset;
606 sdrnt_ptr->pino = c->summary->sum_list_head->d.pino; 710 sdrnt_ptr->pino = temp->d.pino;
607 sdrnt_ptr->version = c->summary->sum_list_head->d.version; 711 sdrnt_ptr->version = temp->d.version;
608 sdrnt_ptr->ino = c->summary->sum_list_head->d.ino; 712 sdrnt_ptr->ino = temp->d.ino;
609 sdrnt_ptr->nsize = c->summary->sum_list_head->d.nsize; 713 sdrnt_ptr->nsize = temp->d.nsize;
610 sdrnt_ptr->type = c->summary->sum_list_head->d.type; 714 sdrnt_ptr->type = temp->d.type;
611 715
612 memcpy(sdrnt_ptr->name, c->summary->sum_list_head->d.name, 716 memcpy(sdrnt_ptr->name, temp->d.name,
613 c->summary->sum_list_head->d.nsize); 717 temp->d.nsize);
614 718
615 wpage += JFFS2_SUMMARY_DIRENT_SIZE(c->summary->sum_list_head->d.nsize); 719 wpage += JFFS2_SUMMARY_DIRENT_SIZE(temp->d.nsize);
616 720
617 break; 721 break;
618 } 722 }
723#ifdef CONFIG_JFFS2_FS_XATTR
724 case JFFS2_NODETYPE_XATTR: {
725 struct jffs2_sum_xattr_flash *sxattr_ptr = wpage;
726
727 temp = c->summary->sum_list_head;
728 sxattr_ptr->nodetype = temp->x.nodetype;
729 sxattr_ptr->xid = temp->x.xid;
730 sxattr_ptr->version = temp->x.version;
731 sxattr_ptr->offset = temp->x.offset;
732 sxattr_ptr->totlen = temp->x.totlen;
733
734 wpage += JFFS2_SUMMARY_XATTR_SIZE;
735 break;
736 }
737 case JFFS2_NODETYPE_XREF: {
738 struct jffs2_sum_xref_flash *sxref_ptr = wpage;
619 739
740 temp = c->summary->sum_list_head;
741 sxref_ptr->nodetype = temp->r.nodetype;
742 sxref_ptr->offset = temp->r.offset;
743
744 wpage += JFFS2_SUMMARY_XREF_SIZE;
745 break;
746 }
747#endif
620 default : { 748 default : {
621 BUG(); /* unknown node in summary information */ 749 if ((je16_to_cpu(temp->u.nodetype) & JFFS2_COMPAT_MASK)
750 == JFFS2_FEATURE_RWCOMPAT_COPY) {
751 dbg_summary("Writing unknown RWCOMPAT_COPY node type %x\n",
752 je16_to_cpu(temp->u.nodetype));
753 jffs2_sum_disable_collecting(c->summary);
754 } else {
755 BUG(); /* unknown node in summary information */
756 }
622 } 757 }
623 } 758 }
624 759
625 temp = c->summary->sum_list_head; 760 c->summary->sum_list_head = temp->u.next;
626 c->summary->sum_list_head = c->summary->sum_list_head->u.next;
627 kfree(temp); 761 kfree(temp);
628 762
629 c->summary->sum_num--; 763 c->summary->sum_num--;
@@ -645,25 +779,34 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
645 vecs[1].iov_base = c->summary->sum_buf; 779 vecs[1].iov_base = c->summary->sum_buf;
646 vecs[1].iov_len = datasize; 780 vecs[1].iov_len = datasize;
647 781
648 dbg_summary("JFFS2: writing out data to flash to pos : 0x%08x\n", 782 sum_ofs = jeb->offset + c->sector_size - jeb->free_size;
649 jeb->offset + c->sector_size - jeb->free_size);
650 783
651 spin_unlock(&c->erase_completion_lock); 784 dbg_summary("JFFS2: writing out data to flash to pos : 0x%08x\n",
652 ret = jffs2_flash_writev(c, vecs, 2, jeb->offset + c->sector_size - 785 sum_ofs);
653 jeb->free_size, &retlen, 0);
654 spin_lock(&c->erase_completion_lock);
655 786
787 ret = jffs2_flash_writev(c, vecs, 2, sum_ofs, &retlen, 0);
656 788
657 if (ret || (retlen != infosize)) { 789 if (ret || (retlen != infosize)) {
658 JFFS2_WARNING("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", 790
659 infosize, jeb->offset + c->sector_size - jeb->free_size, ret, retlen); 791 JFFS2_WARNING("Write of %u bytes at 0x%08x failed. returned %d, retlen %zd\n",
792 infosize, sum_ofs, ret, retlen);
793
794 if (retlen) {
795 /* Waste remaining space */
796 spin_lock(&c->erase_completion_lock);
797 jffs2_link_node_ref(c, jeb, sum_ofs | REF_OBSOLETE, infosize, NULL);
798 spin_unlock(&c->erase_completion_lock);
799 }
660 800
661 c->summary->sum_size = JFFS2_SUMMARY_NOSUM_SIZE; 801 c->summary->sum_size = JFFS2_SUMMARY_NOSUM_SIZE;
662 WASTED_SPACE(infosize);
663 802
664 return 1; 803 return 0;
665 } 804 }
666 805
806 spin_lock(&c->erase_completion_lock);
807 jffs2_link_node_ref(c, jeb, sum_ofs | REF_NORMAL, infosize, NULL);
808 spin_unlock(&c->erase_completion_lock);
809
667 return 0; 810 return 0;
668} 811}
669 812
@@ -671,13 +814,16 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
671 814
672int jffs2_sum_write_sumnode(struct jffs2_sb_info *c) 815int jffs2_sum_write_sumnode(struct jffs2_sb_info *c)
673{ 816{
674 struct jffs2_raw_node_ref *summary_ref; 817 int datasize, infosize, padsize;
675 int datasize, infosize, padsize, ret;
676 struct jffs2_eraseblock *jeb; 818 struct jffs2_eraseblock *jeb;
819 int ret;
677 820
678 dbg_summary("called\n"); 821 dbg_summary("called\n");
679 822
823 spin_unlock(&c->erase_completion_lock);
824
680 jeb = c->nextblock; 825 jeb = c->nextblock;
826 jffs2_prealloc_raw_node_refs(c, jeb, 1);
681 827
682 if (!c->summary->sum_num || !c->summary->sum_list_head) { 828 if (!c->summary->sum_num || !c->summary->sum_list_head) {
683 JFFS2_WARNING("Empty summary info!!!\n"); 829 JFFS2_WARNING("Empty summary info!!!\n");
@@ -696,35 +842,11 @@ int jffs2_sum_write_sumnode(struct jffs2_sb_info *c)
696 jffs2_sum_disable_collecting(c->summary); 842 jffs2_sum_disable_collecting(c->summary);
697 843
698 JFFS2_WARNING("Not enough space for summary, padsize = %d\n", padsize); 844 JFFS2_WARNING("Not enough space for summary, padsize = %d\n", padsize);
845 spin_lock(&c->erase_completion_lock);
699 return 0; 846 return 0;
700 } 847 }
701 848
702 ret = jffs2_sum_write_data(c, jeb, infosize, datasize, padsize); 849 ret = jffs2_sum_write_data(c, jeb, infosize, datasize, padsize);
703 if (ret)
704 return 0; /* can't write out summary, block is marked as NOSUM_SIZE */
705
706 /* for ACCT_PARANOIA_CHECK */
707 spin_unlock(&c->erase_completion_lock);
708 summary_ref = jffs2_alloc_raw_node_ref();
709 spin_lock(&c->erase_completion_lock); 850 spin_lock(&c->erase_completion_lock);
710 851 return ret;
711 if (!summary_ref) {
712 JFFS2_NOTICE("Failed to allocate node ref for summary\n");
713 return -ENOMEM;
714 }
715
716 summary_ref->next_in_ino = NULL;
717 summary_ref->next_phys = NULL;
718 summary_ref->flash_offset = (jeb->offset + c->sector_size - jeb->free_size) | REF_NORMAL;
719 summary_ref->__totlen = infosize;
720
721 if (!jeb->first_node)
722 jeb->first_node = summary_ref;
723 if (jeb->last_node)
724 jeb->last_node->next_phys = summary_ref;
725 jeb->last_node = summary_ref;
726
727 USED_SPACE(infosize);
728
729 return 0;
730} 852}
diff --git a/fs/jffs2/summary.h b/fs/jffs2/summary.h
index b7a678be1709..6bf1f6aa4552 100644
--- a/fs/jffs2/summary.h
+++ b/fs/jffs2/summary.h
@@ -18,23 +18,6 @@
18#include <linux/uio.h> 18#include <linux/uio.h>
19#include <linux/jffs2.h> 19#include <linux/jffs2.h>
20 20
21#define DIRTY_SPACE(x) do { typeof(x) _x = (x); \
22 c->free_size -= _x; c->dirty_size += _x; \
23 jeb->free_size -= _x ; jeb->dirty_size += _x; \
24 }while(0)
25#define USED_SPACE(x) do { typeof(x) _x = (x); \
26 c->free_size -= _x; c->used_size += _x; \
27 jeb->free_size -= _x ; jeb->used_size += _x; \
28 }while(0)
29#define WASTED_SPACE(x) do { typeof(x) _x = (x); \
30 c->free_size -= _x; c->wasted_size += _x; \
31 jeb->free_size -= _x ; jeb->wasted_size += _x; \
32 }while(0)
33#define UNCHECKED_SPACE(x) do { typeof(x) _x = (x); \
34 c->free_size -= _x; c->unchecked_size += _x; \
35 jeb->free_size -= _x ; jeb->unchecked_size += _x; \
36 }while(0)
37
38#define BLK_STATE_ALLFF 0 21#define BLK_STATE_ALLFF 0
39#define BLK_STATE_CLEAN 1 22#define BLK_STATE_CLEAN 1
40#define BLK_STATE_PARTDIRTY 2 23#define BLK_STATE_PARTDIRTY 2
@@ -45,6 +28,8 @@
45#define JFFS2_SUMMARY_NOSUM_SIZE 0xffffffff 28#define JFFS2_SUMMARY_NOSUM_SIZE 0xffffffff
46#define JFFS2_SUMMARY_INODE_SIZE (sizeof(struct jffs2_sum_inode_flash)) 29#define JFFS2_SUMMARY_INODE_SIZE (sizeof(struct jffs2_sum_inode_flash))
47#define JFFS2_SUMMARY_DIRENT_SIZE(x) (sizeof(struct jffs2_sum_dirent_flash) + (x)) 30#define JFFS2_SUMMARY_DIRENT_SIZE(x) (sizeof(struct jffs2_sum_dirent_flash) + (x))
31#define JFFS2_SUMMARY_XATTR_SIZE (sizeof(struct jffs2_sum_xattr_flash))
32#define JFFS2_SUMMARY_XREF_SIZE (sizeof(struct jffs2_sum_xref_flash))
48 33
49/* Summary structures used on flash */ 34/* Summary structures used on flash */
50 35
@@ -75,11 +60,28 @@ struct jffs2_sum_dirent_flash
75 uint8_t name[0]; /* dirent name */ 60 uint8_t name[0]; /* dirent name */
76} __attribute__((packed)); 61} __attribute__((packed));
77 62
63struct jffs2_sum_xattr_flash
64{
65 jint16_t nodetype; /* == JFFS2_NODETYPE_XATR */
66 jint32_t xid; /* xattr identifier */
67 jint32_t version; /* version number */
68 jint32_t offset; /* offset on jeb */
69 jint32_t totlen; /* node length */
70} __attribute__((packed));
71
72struct jffs2_sum_xref_flash
73{
74 jint16_t nodetype; /* == JFFS2_NODETYPE_XREF */
75 jint32_t offset; /* offset on jeb */
76} __attribute__((packed));
77
78union jffs2_sum_flash 78union jffs2_sum_flash
79{ 79{
80 struct jffs2_sum_unknown_flash u; 80 struct jffs2_sum_unknown_flash u;
81 struct jffs2_sum_inode_flash i; 81 struct jffs2_sum_inode_flash i;
82 struct jffs2_sum_dirent_flash d; 82 struct jffs2_sum_dirent_flash d;
83 struct jffs2_sum_xattr_flash x;
84 struct jffs2_sum_xref_flash r;
83}; 85};
84 86
85/* Summary structures used in the memory */ 87/* Summary structures used in the memory */
@@ -114,11 +116,30 @@ struct jffs2_sum_dirent_mem
114 uint8_t name[0]; /* dirent name */ 116 uint8_t name[0]; /* dirent name */
115} __attribute__((packed)); 117} __attribute__((packed));
116 118
119struct jffs2_sum_xattr_mem
120{
121 union jffs2_sum_mem *next;
122 jint16_t nodetype;
123 jint32_t xid;
124 jint32_t version;
125 jint32_t offset;
126 jint32_t totlen;
127} __attribute__((packed));
128
129struct jffs2_sum_xref_mem
130{
131 union jffs2_sum_mem *next;
132 jint16_t nodetype;
133 jint32_t offset;
134} __attribute__((packed));
135
117union jffs2_sum_mem 136union jffs2_sum_mem
118{ 137{
119 struct jffs2_sum_unknown_mem u; 138 struct jffs2_sum_unknown_mem u;
120 struct jffs2_sum_inode_mem i; 139 struct jffs2_sum_inode_mem i;
121 struct jffs2_sum_dirent_mem d; 140 struct jffs2_sum_dirent_mem d;
141 struct jffs2_sum_xattr_mem x;
142 struct jffs2_sum_xref_mem r;
122}; 143};
123 144
124/* Summary related information stored in superblock */ 145/* Summary related information stored in superblock */
@@ -159,8 +180,11 @@ int jffs2_sum_write_sumnode(struct jffs2_sb_info *c);
159int jffs2_sum_add_padding_mem(struct jffs2_summary *s, uint32_t size); 180int jffs2_sum_add_padding_mem(struct jffs2_summary *s, uint32_t size);
160int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri, uint32_t ofs); 181int jffs2_sum_add_inode_mem(struct jffs2_summary *s, struct jffs2_raw_inode *ri, uint32_t ofs);
161int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd, uint32_t ofs); 182int jffs2_sum_add_dirent_mem(struct jffs2_summary *s, struct jffs2_raw_dirent *rd, uint32_t ofs);
183int jffs2_sum_add_xattr_mem(struct jffs2_summary *s, struct jffs2_raw_xattr *rx, uint32_t ofs);
184int jffs2_sum_add_xref_mem(struct jffs2_summary *s, struct jffs2_raw_xref *rr, uint32_t ofs);
162int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, 185int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
163 uint32_t ofs, uint32_t *pseudo_random); 186 struct jffs2_raw_summary *summary, uint32_t sumlen,
187 uint32_t *pseudo_random);
164 188
165#else /* SUMMARY DISABLED */ 189#else /* SUMMARY DISABLED */
166 190
@@ -176,7 +200,9 @@ int jffs2_sum_scan_sumnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb
176#define jffs2_sum_add_padding_mem(a,b) 200#define jffs2_sum_add_padding_mem(a,b)
177#define jffs2_sum_add_inode_mem(a,b,c) 201#define jffs2_sum_add_inode_mem(a,b,c)
178#define jffs2_sum_add_dirent_mem(a,b,c) 202#define jffs2_sum_add_dirent_mem(a,b,c)
179#define jffs2_sum_scan_sumnode(a,b,c,d) (0) 203#define jffs2_sum_add_xattr_mem(a,b,c)
204#define jffs2_sum_add_xref_mem(a,b,c)
205#define jffs2_sum_scan_sumnode(a,b,c,d,e) (0)
180 206
181#endif /* CONFIG_JFFS2_SUMMARY */ 207#endif /* CONFIG_JFFS2_SUMMARY */
182 208
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index ffd8e84b22cc..9d0521451f59 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -151,7 +151,10 @@ static struct super_block *jffs2_get_sb_mtd(struct file_system_type *fs_type,
151 151
152 sb->s_op = &jffs2_super_operations; 152 sb->s_op = &jffs2_super_operations;
153 sb->s_flags = flags | MS_NOATIME; 153 sb->s_flags = flags | MS_NOATIME;
154 154 sb->s_xattr = jffs2_xattr_handlers;
155#ifdef CONFIG_JFFS2_FS_POSIX_ACL
156 sb->s_flags |= MS_POSIXACL;
157#endif
155 ret = jffs2_do_fill_super(sb, data, flags & MS_SILENT ? 1 : 0); 158 ret = jffs2_do_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
156 159
157 if (ret) { 160 if (ret) {
@@ -293,6 +296,7 @@ static void jffs2_put_super (struct super_block *sb)
293 kfree(c->blocks); 296 kfree(c->blocks);
294 jffs2_flash_cleanup(c); 297 jffs2_flash_cleanup(c);
295 kfree(c->inocache_list); 298 kfree(c->inocache_list);
299 jffs2_clear_xattr_subsystem(c);
296 if (c->mtd->sync) 300 if (c->mtd->sync)
297 c->mtd->sync(c->mtd); 301 c->mtd->sync(c->mtd);
298 302
@@ -320,6 +324,18 @@ static int __init init_jffs2_fs(void)
320{ 324{
321 int ret; 325 int ret;
322 326
327 /* Paranoia checks for on-medium structures. If we ask GCC
328 to pack them with __attribute__((packed)) then it _also_
329 assumes that they're not aligned -- so it emits crappy
330 code on some architectures. Ideally we want an attribute
331 which means just 'no padding', without the alignment
332 thing. But GCC doesn't have that -- we have to just
333 hope the structs are the right sizes, instead. */
334 BUG_ON(sizeof(struct jffs2_unknown_node) != 12);
335 BUG_ON(sizeof(struct jffs2_raw_dirent) != 40);
336 BUG_ON(sizeof(struct jffs2_raw_inode) != 68);
337 BUG_ON(sizeof(struct jffs2_raw_summary) != 32);
338
323 printk(KERN_INFO "JFFS2 version 2.2." 339 printk(KERN_INFO "JFFS2 version 2.2."
324#ifdef CONFIG_JFFS2_FS_WRITEBUFFER 340#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
325 " (NAND)" 341 " (NAND)"
@@ -327,7 +343,7 @@ static int __init init_jffs2_fs(void)
327#ifdef CONFIG_JFFS2_SUMMARY 343#ifdef CONFIG_JFFS2_SUMMARY
328 " (SUMMARY) " 344 " (SUMMARY) "
329#endif 345#endif
330 " (C) 2001-2003 Red Hat, Inc.\n"); 346 " (C) 2001-2006 Red Hat, Inc.\n");
331 347
332 jffs2_inode_cachep = kmem_cache_create("jffs2_i", 348 jffs2_inode_cachep = kmem_cache_create("jffs2_i",
333 sizeof(struct jffs2_inode_info), 349 sizeof(struct jffs2_inode_info),
diff --git a/fs/jffs2/symlink.c b/fs/jffs2/symlink.c
index d55754fe8925..fc211b6e9b03 100644
--- a/fs/jffs2/symlink.c
+++ b/fs/jffs2/symlink.c
@@ -24,7 +24,12 @@ struct inode_operations jffs2_symlink_inode_operations =
24{ 24{
25 .readlink = generic_readlink, 25 .readlink = generic_readlink,
26 .follow_link = jffs2_follow_link, 26 .follow_link = jffs2_follow_link,
27 .setattr = jffs2_setattr 27 .permission = jffs2_permission,
28 .setattr = jffs2_setattr,
29 .setxattr = jffs2_setxattr,
30 .getxattr = jffs2_getxattr,
31 .listxattr = jffs2_listxattr,
32 .removexattr = jffs2_removexattr
28}; 33};
29 34
30static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd) 35static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd)
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 4cebf0e57c46..a7f153f79ecb 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -156,69 +156,130 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock
156 jffs2_erase_pending_trigger(c); 156 jffs2_erase_pending_trigger(c);
157 } 157 }
158 158
159 /* Adjust its size counts accordingly */ 159 if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
160 c->wasted_size += jeb->free_size; 160 uint32_t oldfree = jeb->free_size;
161 c->free_size -= jeb->free_size; 161
162 jeb->wasted_size += jeb->free_size; 162 jffs2_link_node_ref(c, jeb,
163 jeb->free_size = 0; 163 (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
164 oldfree, NULL);
165 /* convert to wasted */
166 c->wasted_size += oldfree;
167 jeb->wasted_size += oldfree;
168 c->dirty_size -= oldfree;
169 jeb->dirty_size -= oldfree;
170 }
164 171
165 jffs2_dbg_dump_block_lists_nolock(c); 172 jffs2_dbg_dump_block_lists_nolock(c);
166 jffs2_dbg_acct_sanity_check_nolock(c,jeb); 173 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
167 jffs2_dbg_acct_paranoia_check_nolock(c, jeb); 174 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
168} 175}
169 176
177static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
178 struct jffs2_inode_info *f,
179 struct jffs2_raw_node_ref *raw,
180 union jffs2_node_union *node)
181{
182 struct jffs2_node_frag *frag;
183 struct jffs2_full_dirent *fd;
184
185 dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
186 node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
187
188 BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
189 je16_to_cpu(node->u.magic) != 0);
190
191 switch (je16_to_cpu(node->u.nodetype)) {
192 case JFFS2_NODETYPE_INODE:
193 if (f->metadata && f->metadata->raw == raw) {
194 dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
195 return &f->metadata->raw;
196 }
197 frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
198 BUG_ON(!frag);
199 /* Find a frag which refers to the full_dnode we want to modify */
200 while (!frag->node || frag->node->raw != raw) {
201 frag = frag_next(frag);
202 BUG_ON(!frag);
203 }
204 dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
205 return &frag->node->raw;
206
207 case JFFS2_NODETYPE_DIRENT:
208 for (fd = f->dents; fd; fd = fd->next) {
209 if (fd->raw == raw) {
210 dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
211 return &fd->raw;
212 }
213 }
214 BUG();
215
216 default:
217 dbg_noderef("Don't care about replacing raw for nodetype %x\n",
218 je16_to_cpu(node->u.nodetype));
219 break;
220 }
221 return NULL;
222}
223
170/* Recover from failure to write wbuf. Recover the nodes up to the 224/* Recover from failure to write wbuf. Recover the nodes up to the
171 * wbuf, not the one which we were starting to try to write. */ 225 * wbuf, not the one which we were starting to try to write. */
172 226
173static void jffs2_wbuf_recover(struct jffs2_sb_info *c) 227static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
174{ 228{
175 struct jffs2_eraseblock *jeb, *new_jeb; 229 struct jffs2_eraseblock *jeb, *new_jeb;
176 struct jffs2_raw_node_ref **first_raw, **raw; 230 struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
177 size_t retlen; 231 size_t retlen;
178 int ret; 232 int ret;
233 int nr_refile = 0;
179 unsigned char *buf; 234 unsigned char *buf;
180 uint32_t start, end, ofs, len; 235 uint32_t start, end, ofs, len;
181 236
182 spin_lock(&c->erase_completion_lock);
183
184 jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; 237 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
185 238
239 spin_lock(&c->erase_completion_lock);
186 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY); 240 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
241 spin_unlock(&c->erase_completion_lock);
242
243 BUG_ON(!ref_obsolete(jeb->last_node));
187 244
188 /* Find the first node to be recovered, by skipping over every 245 /* Find the first node to be recovered, by skipping over every
189 node which ends before the wbuf starts, or which is obsolete. */ 246 node which ends before the wbuf starts, or which is obsolete. */
190 first_raw = &jeb->first_node; 247 for (next = raw = jeb->first_node; next; raw = next) {
191 while (*first_raw && 248 next = ref_next(raw);
192 (ref_obsolete(*first_raw) || 249
193 (ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) { 250 if (ref_obsolete(raw) ||
194 D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n", 251 (next && ref_offset(next) <= c->wbuf_ofs)) {
195 ref_offset(*first_raw), ref_flags(*first_raw), 252 dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
196 (ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw)), 253 ref_offset(raw), ref_flags(raw),
197 c->wbuf_ofs)); 254 (ref_offset(raw) + ref_totlen(c, jeb, raw)),
198 first_raw = &(*first_raw)->next_phys; 255 c->wbuf_ofs);
256 continue;
257 }
258 dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
259 ref_offset(raw), ref_flags(raw),
260 (ref_offset(raw) + ref_totlen(c, jeb, raw)));
261
262 first_raw = raw;
263 break;
199 } 264 }
200 265
201 if (!*first_raw) { 266 if (!first_raw) {
202 /* All nodes were obsolete. Nothing to recover. */ 267 /* All nodes were obsolete. Nothing to recover. */
203 D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n")); 268 D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
204 spin_unlock(&c->erase_completion_lock); 269 c->wbuf_len = 0;
205 return; 270 return;
206 } 271 }
207 272
208 start = ref_offset(*first_raw); 273 start = ref_offset(first_raw);
209 end = ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw); 274 end = ref_offset(jeb->last_node);
210 275 nr_refile = 1;
211 /* Find the last node to be recovered */
212 raw = first_raw;
213 while ((*raw)) {
214 if (!ref_obsolete(*raw))
215 end = ref_offset(*raw) + ref_totlen(c, jeb, *raw);
216 276
217 raw = &(*raw)->next_phys; 277 /* Count the number of refs which need to be copied */
218 } 278 while ((raw = ref_next(raw)) != jeb->last_node)
219 spin_unlock(&c->erase_completion_lock); 279 nr_refile++;
220 280
221 D1(printk(KERN_DEBUG "wbuf recover %08x-%08x\n", start, end)); 281 dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
282 start, end, end - start, nr_refile);
222 283
223 buf = NULL; 284 buf = NULL;
224 if (start < c->wbuf_ofs) { 285 if (start < c->wbuf_ofs) {
@@ -233,28 +294,37 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
233 } 294 }
234 295
235 /* Do the read... */ 296 /* Do the read... */
236 if (jffs2_cleanmarker_oob(c)) 297 ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
237 ret = c->mtd->read_ecc(c->mtd, start, c->wbuf_ofs - start, &retlen, buf, NULL, c->oobinfo);
238 else
239 ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
240 298
241 if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) { 299 /* ECC recovered ? */
242 /* ECC recovered */ 300 if ((ret == -EUCLEAN || ret == -EBADMSG) &&
301 (retlen == c->wbuf_ofs - start))
243 ret = 0; 302 ret = 0;
244 } 303
245 if (ret || retlen != c->wbuf_ofs - start) { 304 if (ret || retlen != c->wbuf_ofs - start) {
246 printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n"); 305 printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
247 306
248 kfree(buf); 307 kfree(buf);
249 buf = NULL; 308 buf = NULL;
250 read_failed: 309 read_failed:
251 first_raw = &(*first_raw)->next_phys; 310 first_raw = ref_next(first_raw);
311 nr_refile--;
312 while (first_raw && ref_obsolete(first_raw)) {
313 first_raw = ref_next(first_raw);
314 nr_refile--;
315 }
316
252 /* If this was the only node to be recovered, give up */ 317 /* If this was the only node to be recovered, give up */
253 if (!(*first_raw)) 318 if (!first_raw) {
319 c->wbuf_len = 0;
254 return; 320 return;
321 }
255 322
256 /* It wasn't. Go on and try to recover nodes complete in the wbuf */ 323 /* It wasn't. Go on and try to recover nodes complete in the wbuf */
257 start = ref_offset(*first_raw); 324 start = ref_offset(first_raw);
325 dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
326 start, end, end - start, nr_refile);
327
258 } else { 328 } else {
259 /* Read succeeded. Copy the remaining data from the wbuf */ 329 /* Read succeeded. Copy the remaining data from the wbuf */
260 memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs); 330 memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
@@ -263,14 +333,23 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
263 /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards. 333 /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
264 Either 'buf' contains the data, or we find it in the wbuf */ 334 Either 'buf' contains the data, or we find it in the wbuf */
265 335
266
267 /* ... and get an allocation of space from a shiny new block instead */ 336 /* ... and get an allocation of space from a shiny new block instead */
268 ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len, JFFS2_SUMMARY_NOSUM_SIZE); 337 ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
269 if (ret) { 338 if (ret) {
270 printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n"); 339 printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
271 kfree(buf); 340 kfree(buf);
272 return; 341 return;
273 } 342 }
343
344 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
345 if (ret) {
346 printk(KERN_WARNING "Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
347 kfree(buf);
348 return;
349 }
350
351 ofs = write_ofs(c);
352
274 if (end-start >= c->wbuf_pagesize) { 353 if (end-start >= c->wbuf_pagesize) {
275 /* Need to do another write immediately, but it's possible 354 /* Need to do another write immediately, but it's possible
276 that this is just because the wbuf itself is completely 355 that this is just because the wbuf itself is completely
@@ -288,36 +367,22 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
288 if (breakme++ == 20) { 367 if (breakme++ == 20) {
289 printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs); 368 printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
290 breakme = 0; 369 breakme = 0;
291 c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen, 370 c->mtd->write(c->mtd, ofs, towrite, &retlen,
292 brokenbuf, NULL, c->oobinfo); 371 brokenbuf);
293 ret = -EIO; 372 ret = -EIO;
294 } else 373 } else
295#endif 374#endif
296 if (jffs2_cleanmarker_oob(c)) 375 ret = c->mtd->write(c->mtd, ofs, towrite, &retlen,
297 ret = c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen, 376 rewrite_buf);
298 rewrite_buf, NULL, c->oobinfo);
299 else
300 ret = c->mtd->write(c->mtd, ofs, towrite, &retlen, rewrite_buf);
301 377
302 if (ret || retlen != towrite) { 378 if (ret || retlen != towrite) {
303 /* Argh. We tried. Really we did. */ 379 /* Argh. We tried. Really we did. */
304 printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n"); 380 printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
305 kfree(buf); 381 kfree(buf);
306 382
307 if (retlen) { 383 if (retlen)
308 struct jffs2_raw_node_ref *raw2; 384 jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
309
310 raw2 = jffs2_alloc_raw_node_ref();
311 if (!raw2)
312 return;
313 385
314 raw2->flash_offset = ofs | REF_OBSOLETE;
315 raw2->__totlen = ref_totlen(c, jeb, *first_raw);
316 raw2->next_phys = NULL;
317 raw2->next_in_ino = NULL;
318
319 jffs2_add_physical_node_ref(c, raw2);
320 }
321 return; 386 return;
322 } 387 }
323 printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs); 388 printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
@@ -326,12 +391,10 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
326 c->wbuf_ofs = ofs + towrite; 391 c->wbuf_ofs = ofs + towrite;
327 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len); 392 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
328 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */ 393 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
329 kfree(buf);
330 } else { 394 } else {
331 /* OK, now we're left with the dregs in whichever buffer we're using */ 395 /* OK, now we're left with the dregs in whichever buffer we're using */
332 if (buf) { 396 if (buf) {
333 memcpy(c->wbuf, buf, end-start); 397 memcpy(c->wbuf, buf, end-start);
334 kfree(buf);
335 } else { 398 } else {
336 memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start); 399 memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
337 } 400 }
@@ -343,62 +406,111 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
343 new_jeb = &c->blocks[ofs / c->sector_size]; 406 new_jeb = &c->blocks[ofs / c->sector_size];
344 407
345 spin_lock(&c->erase_completion_lock); 408 spin_lock(&c->erase_completion_lock);
346 if (new_jeb->first_node) { 409 for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
347 /* Odd, but possible with ST flash later maybe */ 410 uint32_t rawlen = ref_totlen(c, jeb, raw);
348 new_jeb->last_node->next_phys = *first_raw; 411 struct jffs2_inode_cache *ic;
349 } else { 412 struct jffs2_raw_node_ref *new_ref;
350 new_jeb->first_node = *first_raw; 413 struct jffs2_raw_node_ref **adjust_ref = NULL;
351 } 414 struct jffs2_inode_info *f = NULL;
352
353 raw = first_raw;
354 while (*raw) {
355 uint32_t rawlen = ref_totlen(c, jeb, *raw);
356 415
357 D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n", 416 D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
358 rawlen, ref_offset(*raw), ref_flags(*raw), ofs)); 417 rawlen, ref_offset(raw), ref_flags(raw), ofs));
418
419 ic = jffs2_raw_ref_to_ic(raw);
420
421 /* Ick. This XATTR mess should be fixed shortly... */
422 if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
423 struct jffs2_xattr_datum *xd = (void *)ic;
424 BUG_ON(xd->node != raw);
425 adjust_ref = &xd->node;
426 raw->next_in_ino = NULL;
427 ic = NULL;
428 } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
429 struct jffs2_xattr_datum *xr = (void *)ic;
430 BUG_ON(xr->node != raw);
431 adjust_ref = &xr->node;
432 raw->next_in_ino = NULL;
433 ic = NULL;
434 } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
435 struct jffs2_raw_node_ref **p = &ic->nodes;
436
437 /* Remove the old node from the per-inode list */
438 while (*p && *p != (void *)ic) {
439 if (*p == raw) {
440 (*p) = (raw->next_in_ino);
441 raw->next_in_ino = NULL;
442 break;
443 }
444 p = &((*p)->next_in_ino);
445 }
359 446
360 if (ref_obsolete(*raw)) { 447 if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
361 /* Shouldn't really happen much */ 448 /* If it's an in-core inode, then we have to adjust any
362 new_jeb->dirty_size += rawlen; 449 full_dirent or full_dnode structure to point to the
363 new_jeb->free_size -= rawlen; 450 new version instead of the old */
364 c->dirty_size += rawlen; 451 f = jffs2_gc_fetch_inode(c, ic->ino, ic->nlink);
365 } else { 452 if (IS_ERR(f)) {
366 new_jeb->used_size += rawlen; 453 /* Should never happen; it _must_ be present */
367 new_jeb->free_size -= rawlen; 454 JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
455 ic->ino, PTR_ERR(f));
456 BUG();
457 }
458 /* We don't lock f->sem. There's a number of ways we could
459 end up in here with it already being locked, and nobody's
460 going to modify it on us anyway because we hold the
461 alloc_sem. We're only changing one ->raw pointer too,
462 which we can get away with without upsetting readers. */
463 adjust_ref = jffs2_incore_replace_raw(c, f, raw,
464 (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
465 } else if (unlikely(ic->state != INO_STATE_PRESENT &&
466 ic->state != INO_STATE_CHECKEDABSENT &&
467 ic->state != INO_STATE_GC)) {
468 JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
469 BUG();
470 }
471 }
472
473 new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
474
475 if (adjust_ref) {
476 BUG_ON(*adjust_ref != raw);
477 *adjust_ref = new_ref;
478 }
479 if (f)
480 jffs2_gc_release_inode(c, f);
481
482 if (!ref_obsolete(raw)) {
368 jeb->dirty_size += rawlen; 483 jeb->dirty_size += rawlen;
369 jeb->used_size -= rawlen; 484 jeb->used_size -= rawlen;
370 c->dirty_size += rawlen; 485 c->dirty_size += rawlen;
486 c->used_size -= rawlen;
487 raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
488 BUG_ON(raw->next_in_ino);
371 } 489 }
372 c->free_size -= rawlen;
373 (*raw)->flash_offset = ofs | ref_flags(*raw);
374 ofs += rawlen; 490 ofs += rawlen;
375 new_jeb->last_node = *raw;
376
377 raw = &(*raw)->next_phys;
378 } 491 }
379 492
493 kfree(buf);
494
380 /* Fix up the original jeb now it's on the bad_list */ 495 /* Fix up the original jeb now it's on the bad_list */
381 *first_raw = NULL; 496 if (first_raw == jeb->first_node) {
382 if (first_raw == &jeb->first_node) {
383 jeb->last_node = NULL;
384 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset)); 497 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
385 list_del(&jeb->list); 498 list_del(&jeb->list);
386 list_add(&jeb->list, &c->erase_pending_list); 499 list_add(&jeb->list, &c->erase_pending_list);
387 c->nr_erasing_blocks++; 500 c->nr_erasing_blocks++;
388 jffs2_erase_pending_trigger(c); 501 jffs2_erase_pending_trigger(c);
389 } 502 }
390 else
391 jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys);
392 503
393 jffs2_dbg_acct_sanity_check_nolock(c, jeb); 504 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
394 jffs2_dbg_acct_paranoia_check_nolock(c, jeb); 505 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
395 506
396 jffs2_dbg_acct_sanity_check_nolock(c, new_jeb); 507 jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
397 jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb); 508 jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
398 509
399 spin_unlock(&c->erase_completion_lock); 510 spin_unlock(&c->erase_completion_lock);
400 511
401 D1(printk(KERN_DEBUG "wbuf recovery completed OK\n")); 512 D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len));
513
402} 514}
403 515
404/* Meaning of pad argument: 516/* Meaning of pad argument:
@@ -412,6 +524,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
412 524
413static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) 525static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
414{ 526{
527 struct jffs2_eraseblock *wbuf_jeb;
415 int ret; 528 int ret;
416 size_t retlen; 529 size_t retlen;
417 530
@@ -429,6 +542,10 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
429 if (!c->wbuf_len) /* already checked c->wbuf above */ 542 if (!c->wbuf_len) /* already checked c->wbuf above */
430 return 0; 543 return 0;
431 544
545 wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
546 if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
547 return -ENOMEM;
548
432 /* claim remaining space on the page 549 /* claim remaining space on the page
433 this happens, if we have a change to a new block, 550 this happens, if we have a change to a new block,
434 or if fsync forces us to flush the writebuffer. 551 or if fsync forces us to flush the writebuffer.
@@ -458,15 +575,12 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
458 if (breakme++ == 20) { 575 if (breakme++ == 20) {
459 printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs); 576 printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
460 breakme = 0; 577 breakme = 0;
461 c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, 578 c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
462 &retlen, brokenbuf, NULL, c->oobinfo); 579 brokenbuf);
463 ret = -EIO; 580 ret = -EIO;
464 } else 581 } else
465#endif 582#endif
466 583
467 if (jffs2_cleanmarker_oob(c))
468 ret = c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf, NULL, c->oobinfo);
469 else
470 ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf); 584 ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
471 585
472 if (ret || retlen != c->wbuf_pagesize) { 586 if (ret || retlen != c->wbuf_pagesize) {
@@ -483,32 +597,34 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
483 return ret; 597 return ret;
484 } 598 }
485 599
486 spin_lock(&c->erase_completion_lock);
487
488 /* Adjust free size of the block if we padded. */ 600 /* Adjust free size of the block if we padded. */
489 if (pad) { 601 if (pad) {
490 struct jffs2_eraseblock *jeb; 602 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
491
492 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
493 603
494 D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", 604 D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
495 (jeb==c->nextblock)?"next":"", jeb->offset)); 605 (wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset));
496 606
497 /* wbuf_pagesize - wbuf_len is the amount of space that's to be 607 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
498 padded. If there is less free space in the block than that, 608 padded. If there is less free space in the block than that,
499 something screwed up */ 609 something screwed up */
500 if (jeb->free_size < (c->wbuf_pagesize - c->wbuf_len)) { 610 if (wbuf_jeb->free_size < waste) {
501 printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n", 611 printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
502 c->wbuf_ofs, c->wbuf_len, c->wbuf_pagesize-c->wbuf_len); 612 c->wbuf_ofs, c->wbuf_len, waste);
503 printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n", 613 printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
504 jeb->offset, jeb->free_size); 614 wbuf_jeb->offset, wbuf_jeb->free_size);
505 BUG(); 615 BUG();
506 } 616 }
507 jeb->free_size -= (c->wbuf_pagesize - c->wbuf_len); 617
508 c->free_size -= (c->wbuf_pagesize - c->wbuf_len); 618 spin_lock(&c->erase_completion_lock);
509 jeb->wasted_size += (c->wbuf_pagesize - c->wbuf_len); 619
510 c->wasted_size += (c->wbuf_pagesize - c->wbuf_len); 620 jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
511 } 621 /* FIXME: that made it count as dirty. Convert to wasted */
622 wbuf_jeb->dirty_size -= waste;
623 c->dirty_size -= waste;
624 wbuf_jeb->wasted_size += waste;
625 c->wasted_size += waste;
626 } else
627 spin_lock(&c->erase_completion_lock);
512 628
513 /* Stick any now-obsoleted blocks on the erase_pending_list */ 629 /* Stick any now-obsoleted blocks on the erase_pending_list */
514 jffs2_refile_wbuf_blocks(c); 630 jffs2_refile_wbuf_blocks(c);
@@ -603,20 +719,30 @@ int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
603 719
604 return ret; 720 return ret;
605} 721}
606int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino) 722
723static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
724 size_t len)
607{ 725{
608 struct kvec outvecs[3]; 726 if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
609 uint32_t totlen = 0; 727 return 0;
610 uint32_t split_ofs = 0; 728
611 uint32_t old_totlen; 729 if (len > (c->wbuf_pagesize - c->wbuf_len))
612 int ret, splitvec = -1; 730 len = c->wbuf_pagesize - c->wbuf_len;
613 int invec, outvec; 731 memcpy(c->wbuf + c->wbuf_len, buf, len);
614 size_t wbuf_retlen; 732 c->wbuf_len += (uint32_t) len;
615 unsigned char *wbuf_ptr; 733 return len;
616 size_t donelen = 0; 734}
735
736int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
737 unsigned long count, loff_t to, size_t *retlen,
738 uint32_t ino)
739{
740 struct jffs2_eraseblock *jeb;
741 size_t wbuf_retlen, donelen = 0;
617 uint32_t outvec_to = to; 742 uint32_t outvec_to = to;
743 int ret, invec;
618 744
619 /* If not NAND flash, don't bother */ 745 /* If not writebuffered flash, don't bother */
620 if (!jffs2_is_writebuffered(c)) 746 if (!jffs2_is_writebuffered(c))
621 return jffs2_flash_direct_writev(c, invecs, count, to, retlen); 747 return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
622 748
@@ -629,34 +755,22 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig
629 memset(c->wbuf,0xff,c->wbuf_pagesize); 755 memset(c->wbuf,0xff,c->wbuf_pagesize);
630 } 756 }
631 757
632 /* Fixup the wbuf if we are moving to a new eraseblock. The checks below 758 /*
633 fail for ECC'd NOR because cleanmarker == 16, so a block starts at 759 * Sanity checks on target address. It's permitted to write
634 xxx0010. */ 760 * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
635 if (jffs2_nor_ecc(c)) { 761 * write at the beginning of a new erase block. Anything else,
636 if (((c->wbuf_ofs % c->sector_size) == 0) && !c->wbuf_len) { 762 * and you die. New block starts at xxx000c (0-b = block
637 c->wbuf_ofs = PAGE_DIV(to); 763 * header)
638 c->wbuf_len = PAGE_MOD(to); 764 */
639 memset(c->wbuf,0xff,c->wbuf_pagesize);
640 }
641 }
642
643 /* Sanity checks on target address.
644 It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs),
645 and it's permitted to write at the beginning of a new
646 erase block. Anything else, and you die.
647 New block starts at xxx000c (0-b = block header)
648 */
649 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) { 765 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
650 /* It's a write to a new block */ 766 /* It's a write to a new block */
651 if (c->wbuf_len) { 767 if (c->wbuf_len) {
652 D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx causes flush of wbuf at 0x%08x\n", (unsigned long)to, c->wbuf_ofs)); 768 D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx "
769 "causes flush of wbuf at 0x%08x\n",
770 (unsigned long)to, c->wbuf_ofs));
653 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); 771 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
654 if (ret) { 772 if (ret)
655 /* the underlying layer has to check wbuf_len to do the cleanup */ 773 goto outerr;
656 D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret));
657 *retlen = 0;
658 goto exit;
659 }
660 } 774 }
661 /* set pointer to new block */ 775 /* set pointer to new block */
662 c->wbuf_ofs = PAGE_DIV(to); 776 c->wbuf_ofs = PAGE_DIV(to);
@@ -665,165 +779,70 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsig
665 779
666 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { 780 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
667 /* We're not writing immediately after the writebuffer. Bad. */ 781 /* We're not writing immediately after the writebuffer. Bad. */
668 printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write to %08lx\n", (unsigned long)to); 782 printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write "
783 "to %08lx\n", (unsigned long)to);
669 if (c->wbuf_len) 784 if (c->wbuf_len)
670 printk(KERN_CRIT "wbuf was previously %08x-%08x\n", 785 printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
671 c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len); 786 c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
672 BUG(); 787 BUG();
673 } 788 }
674 789
675 /* Note outvecs[3] above. We know count is never greater than 2 */ 790 /* adjust alignment offset */
676 if (count > 2) { 791 if (c->wbuf_len != PAGE_MOD(to)) {
677 printk(KERN_CRIT "jffs2_flash_writev(): count is %ld\n", count); 792 c->wbuf_len = PAGE_MOD(to);
678 BUG(); 793 /* take care of alignment to next page */
679 } 794 if (!c->wbuf_len) {
680 795 c->wbuf_len = c->wbuf_pagesize;
681 invec = 0; 796 ret = __jffs2_flush_wbuf(c, NOPAD);
682 outvec = 0; 797 if (ret)
683 798 goto outerr;
684 /* Fill writebuffer first, if already in use */
685 if (c->wbuf_len) {
686 uint32_t invec_ofs = 0;
687
688 /* adjust alignment offset */
689 if (c->wbuf_len != PAGE_MOD(to)) {
690 c->wbuf_len = PAGE_MOD(to);
691 /* take care of alignment to next page */
692 if (!c->wbuf_len)
693 c->wbuf_len = c->wbuf_pagesize;
694 }
695
696 while(c->wbuf_len < c->wbuf_pagesize) {
697 uint32_t thislen;
698
699 if (invec == count)
700 goto alldone;
701
702 thislen = c->wbuf_pagesize - c->wbuf_len;
703
704 if (thislen >= invecs[invec].iov_len)
705 thislen = invecs[invec].iov_len;
706
707 invec_ofs = thislen;
708
709 memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen);
710 c->wbuf_len += thislen;
711 donelen += thislen;
712 /* Get next invec, if actual did not fill the buffer */
713 if (c->wbuf_len < c->wbuf_pagesize)
714 invec++;
715 }
716
717 /* write buffer is full, flush buffer */
718 ret = __jffs2_flush_wbuf(c, NOPAD);
719 if (ret) {
720 /* the underlying layer has to check wbuf_len to do the cleanup */
721 D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret));
722 /* Retlen zero to make sure our caller doesn't mark the space dirty.
723 We've already done everything that's necessary */
724 *retlen = 0;
725 goto exit;
726 }
727 outvec_to += donelen;
728 c->wbuf_ofs = outvec_to;
729
730 /* All invecs done ? */
731 if (invec == count)
732 goto alldone;
733
734 /* Set up the first outvec, containing the remainder of the
735 invec we partially used */
736 if (invecs[invec].iov_len > invec_ofs) {
737 outvecs[0].iov_base = invecs[invec].iov_base+invec_ofs;
738 totlen = outvecs[0].iov_len = invecs[invec].iov_len-invec_ofs;
739 if (totlen > c->wbuf_pagesize) {
740 splitvec = outvec;
741 split_ofs = outvecs[0].iov_len - PAGE_MOD(totlen);
742 }
743 outvec++;
744 }
745 invec++;
746 }
747
748 /* OK, now we've flushed the wbuf and the start of the bits
749 we have been asked to write, now to write the rest.... */
750
751 /* totlen holds the amount of data still to be written */
752 old_totlen = totlen;
753 for ( ; invec < count; invec++,outvec++ ) {
754 outvecs[outvec].iov_base = invecs[invec].iov_base;
755 totlen += outvecs[outvec].iov_len = invecs[invec].iov_len;
756 if (PAGE_DIV(totlen) != PAGE_DIV(old_totlen)) {
757 splitvec = outvec;
758 split_ofs = outvecs[outvec].iov_len - PAGE_MOD(totlen);
759 old_totlen = totlen;
760 } 799 }
761 } 800 }
762 801
763 /* Now the outvecs array holds all the remaining data to write */ 802 for (invec = 0; invec < count; invec++) {
764 /* Up to splitvec,split_ofs is to be written immediately. The rest 803 int vlen = invecs[invec].iov_len;
765 goes into the (now-empty) wbuf */ 804 uint8_t *v = invecs[invec].iov_base;
766
767 if (splitvec != -1) {
768 uint32_t remainder;
769
770 remainder = outvecs[splitvec].iov_len - split_ofs;
771 outvecs[splitvec].iov_len = split_ofs;
772
773 /* We did cross a page boundary, so we write some now */
774 if (jffs2_cleanmarker_oob(c))
775 ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo);
776 else
777 ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen);
778
779 if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) {
780 /* At this point we have no problem,
781 c->wbuf is empty. However refile nextblock to avoid
782 writing again to same address.
783 */
784 struct jffs2_eraseblock *jeb;
785 805
786 spin_lock(&c->erase_completion_lock); 806 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
787 807
788 jeb = &c->blocks[outvec_to / c->sector_size]; 808 if (c->wbuf_len == c->wbuf_pagesize) {
789 jffs2_block_refile(c, jeb, REFILE_ANYWAY); 809 ret = __jffs2_flush_wbuf(c, NOPAD);
790 810 if (ret)
791 *retlen = 0; 811 goto outerr;
792 spin_unlock(&c->erase_completion_lock);
793 goto exit;
794 } 812 }
795 813 vlen -= wbuf_retlen;
814 outvec_to += wbuf_retlen;
796 donelen += wbuf_retlen; 815 donelen += wbuf_retlen;
797 c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen); 816 v += wbuf_retlen;
798 817
799 if (remainder) { 818 if (vlen >= c->wbuf_pagesize) {
800 outvecs[splitvec].iov_base += split_ofs; 819 ret = c->mtd->write(c->mtd, outvec_to, PAGE_DIV(vlen),
801 outvecs[splitvec].iov_len = remainder; 820 &wbuf_retlen, v);
802 } else { 821 if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
803 splitvec++; 822 goto outfile;
823
824 vlen -= wbuf_retlen;
825 outvec_to += wbuf_retlen;
826 c->wbuf_ofs = outvec_to;
827 donelen += wbuf_retlen;
828 v += wbuf_retlen;
804 } 829 }
805 830
806 } else { 831 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
807 splitvec = 0; 832 if (c->wbuf_len == c->wbuf_pagesize) {
808 } 833 ret = __jffs2_flush_wbuf(c, NOPAD);
809 834 if (ret)
810 /* Now splitvec points to the start of the bits we have to copy 835 goto outerr;
811 into the wbuf */ 836 }
812 wbuf_ptr = c->wbuf;
813 837
814 for ( ; splitvec < outvec; splitvec++) { 838 outvec_to += wbuf_retlen;
815 /* Don't copy the wbuf into itself */ 839 donelen += wbuf_retlen;
816 if (outvecs[splitvec].iov_base == c->wbuf)
817 continue;
818 memcpy(wbuf_ptr, outvecs[splitvec].iov_base, outvecs[splitvec].iov_len);
819 wbuf_ptr += outvecs[splitvec].iov_len;
820 donelen += outvecs[splitvec].iov_len;
821 } 840 }
822 c->wbuf_len = wbuf_ptr - c->wbuf;
823 841
824 /* If there's a remainder in the wbuf and it's a non-GC write, 842 /*
825 remember that the wbuf affects this ino */ 843 * If there's a remainder in the wbuf and it's a non-GC write,
826alldone: 844 * remember that the wbuf affects this ino
845 */
827 *retlen = donelen; 846 *retlen = donelen;
828 847
829 if (jffs2_sum_active()) { 848 if (jffs2_sum_active()) {
@@ -836,8 +855,24 @@ alldone:
836 jffs2_wbuf_dirties_inode(c, ino); 855 jffs2_wbuf_dirties_inode(c, ino);
837 856
838 ret = 0; 857 ret = 0;
858 up_write(&c->wbuf_sem);
859 return ret;
839 860
840exit: 861outfile:
862 /*
863 * At this point we have no problem, c->wbuf is empty. However
864 * refile nextblock to avoid writing again to same address.
865 */
866
867 spin_lock(&c->erase_completion_lock);
868
869 jeb = &c->blocks[outvec_to / c->sector_size];
870 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
871
872 spin_unlock(&c->erase_completion_lock);
873
874outerr:
875 *retlen = 0;
841 up_write(&c->wbuf_sem); 876 up_write(&c->wbuf_sem);
842 return ret; 877 return ret;
843} 878}
@@ -846,7 +881,8 @@ exit:
846 * This is the entry for flash write. 881 * This is the entry for flash write.
847 * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev 882 * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
848*/ 883*/
849int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf) 884int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
885 size_t *retlen, const u_char *buf)
850{ 886{
851 struct kvec vecs[1]; 887 struct kvec vecs[1];
852 888
@@ -871,25 +907,23 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re
871 907
872 /* Read flash */ 908 /* Read flash */
873 down_read(&c->wbuf_sem); 909 down_read(&c->wbuf_sem);
874 if (jffs2_cleanmarker_oob(c)) 910 ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
875 ret = c->mtd->read_ecc(c->mtd, ofs, len, retlen, buf, NULL, c->oobinfo); 911
876 else 912 if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
877 ret = c->mtd->read(c->mtd, ofs, len, retlen, buf); 913 if (ret == -EBADMSG)
878 914 printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx)"
879 if ( (ret == -EBADMSG) && (*retlen == len) ) { 915 " returned ECC error\n", len, ofs);
880 printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
881 len, ofs);
882 /* 916 /*
883 * We have the raw data without ECC correction in the buffer, maybe 917 * We have the raw data without ECC correction in the buffer,
884 * we are lucky and all data or parts are correct. We check the node. 918 * maybe we are lucky and all data or parts are correct. We
885 * If data are corrupted node check will sort it out. 919 * check the node. If data are corrupted node check will sort
886 * We keep this block, it will fail on write or erase and the we 920 * it out. We keep this block, it will fail on write or erase
887 * mark it bad. Or should we do that now? But we should give him a chance. 921 * and the we mark it bad. Or should we do that now? But we
888 * Maybe we had a system crash or power loss before the ecc write or 922 * should give him a chance. Maybe we had a system crash or
889 * a erase was completed. 923 * power loss before the ecc write or a erase was completed.
890 * So we return success. :) 924 * So we return success. :)
891 */ 925 */
892 ret = 0; 926 ret = 0;
893 } 927 }
894 928
895 /* if no writebuffer available or write buffer empty, return */ 929 /* if no writebuffer available or write buffer empty, return */
@@ -911,7 +945,7 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re
911 orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */ 945 orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */
912 if (orbf > len) /* is write beyond write buffer ? */ 946 if (orbf > len) /* is write beyond write buffer ? */
913 goto exit; 947 goto exit;
914 lwbf = len - orbf; /* number of bytes to copy */ 948 lwbf = len - orbf; /* number of bytes to copy */
915 if (lwbf > c->wbuf_len) 949 if (lwbf > c->wbuf_len)
916 lwbf = c->wbuf_len; 950 lwbf = c->wbuf_len;
917 } 951 }
@@ -923,158 +957,159 @@ exit:
923 return ret; 957 return ret;
924} 958}
925 959
960#define NR_OOB_SCAN_PAGES 4
961
926/* 962/*
927 * Check, if the out of band area is empty 963 * Check, if the out of band area is empty
928 */ 964 */
929int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int mode) 965int jffs2_check_oob_empty(struct jffs2_sb_info *c,
966 struct jffs2_eraseblock *jeb, int mode)
930{ 967{
931 unsigned char *buf; 968 int i, page, ret;
932 int ret = 0; 969 int oobsize = c->mtd->oobsize;
933 int i,len,page; 970 struct mtd_oob_ops ops;
934 size_t retlen; 971
935 int oob_size; 972 ops.len = NR_OOB_SCAN_PAGES * oobsize;
936 973 ops.ooblen = oobsize;
937 /* allocate a buffer for all oob data in this sector */ 974 ops.oobbuf = c->oobbuf;
938 oob_size = c->mtd->oobsize; 975 ops.ooboffs = 0;
939 len = 4 * oob_size; 976 ops.datbuf = NULL;
940 buf = kmalloc(len, GFP_KERNEL); 977 ops.mode = MTD_OOB_PLACE;
941 if (!buf) { 978
942 printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n"); 979 ret = c->mtd->read_oob(c->mtd, jeb->offset, &ops);
943 return -ENOMEM;
944 }
945 /*
946 * if mode = 0, we scan for a total empty oob area, else we have
947 * to take care of the cleanmarker in the first page of the block
948 */
949 ret = jffs2_flash_read_oob(c, jeb->offset, len , &retlen, buf);
950 if (ret) { 980 if (ret) {
951 D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset)); 981 D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB "
952 goto out; 982 "failed %d for block at %08x\n", ret, jeb->offset));
983 return ret;
953 } 984 }
954 985
955 if (retlen < len) { 986 if (ops.retlen < ops.len) {
956 D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read " 987 D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB "
957 "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset)); 988 "returned short read (%zd bytes not %d) for block "
958 ret = -EIO; 989 "at %08x\n", ops.retlen, ops.len, jeb->offset));
959 goto out; 990 return -EIO;
960 } 991 }
961 992
962 /* Special check for first page */ 993 /* Special check for first page */
963 for(i = 0; i < oob_size ; i++) { 994 for(i = 0; i < oobsize ; i++) {
964 /* Yeah, we know about the cleanmarker. */ 995 /* Yeah, we know about the cleanmarker. */
965 if (mode && i >= c->fsdata_pos && 996 if (mode && i >= c->fsdata_pos &&
966 i < c->fsdata_pos + c->fsdata_len) 997 i < c->fsdata_pos + c->fsdata_len)
967 continue; 998 continue;
968 999
969 if (buf[i] != 0xFF) { 1000 if (ops.oobbuf[i] != 0xFF) {
970 D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n", 1001 D2(printk(KERN_DEBUG "Found %02x at %x in OOB for "
971 buf[i], i, jeb->offset)); 1002 "%08x\n", ops.oobbuf[i], i, jeb->offset));
972 ret = 1; 1003 return 1;
973 goto out;
974 } 1004 }
975 } 1005 }
976 1006
977 /* we know, we are aligned :) */ 1007 /* we know, we are aligned :) */
978 for (page = oob_size; page < len; page += sizeof(long)) { 1008 for (page = oobsize; page < ops.len; page += sizeof(long)) {
979 unsigned long dat = *(unsigned long *)(&buf[page]); 1009 long dat = *(long *)(&ops.oobbuf[page]);
980 if(dat != -1) { 1010 if(dat != -1)
981 ret = 1; 1011 return 1;
982 goto out;
983 }
984 } 1012 }
985 1013 return 0;
986out:
987 kfree(buf);
988
989 return ret;
990} 1014}
991 1015
992/* 1016/*
993* Scan for a valid cleanmarker and for bad blocks 1017 * Scan for a valid cleanmarker and for bad blocks
994* For virtual blocks (concatenated physical blocks) check the cleanmarker 1018 */
995* only in the first page of the first physical block, but scan for bad blocks in all 1019int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c,
996* physical blocks 1020 struct jffs2_eraseblock *jeb)
997*/
998int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
999{ 1021{
1000 struct jffs2_unknown_node n; 1022 struct jffs2_unknown_node n;
1001 unsigned char buf[2 * NAND_MAX_OOBSIZE]; 1023 struct mtd_oob_ops ops;
1002 unsigned char *p; 1024 int oobsize = c->mtd->oobsize;
1003 int ret, i, cnt, retval = 0; 1025 unsigned char *p,*b;
1004 size_t retlen, offset; 1026 int i, ret;
1005 int oob_size; 1027 size_t offset = jeb->offset;
1006 1028
1007 offset = jeb->offset; 1029 /* Check first if the block is bad. */
1008 oob_size = c->mtd->oobsize; 1030 if (c->mtd->block_isbad(c->mtd, offset)) {
1009 1031 D1 (printk(KERN_WARNING "jffs2_check_nand_cleanmarker()"
1010 /* Loop through the physical blocks */ 1032 ": Bad block at %08x\n", jeb->offset));
1011 for (cnt = 0; cnt < (c->sector_size / c->mtd->erasesize); cnt++) { 1033 return 2;
1012 /* Check first if the block is bad. */ 1034 }
1013 if (c->mtd->block_isbad (c->mtd, offset)) {
1014 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Bad block at %08x\n", jeb->offset));
1015 return 2;
1016 }
1017 /*
1018 * We read oob data from page 0 and 1 of the block.
1019 * page 0 contains cleanmarker and badblock info
1020 * page 1 contains failure count of this block
1021 */
1022 ret = c->mtd->read_oob (c->mtd, offset, oob_size << 1, &retlen, buf);
1023 1035
1024 if (ret) { 1036 ops.len = oobsize;
1025 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB failed %d for block at %08x\n", ret, jeb->offset)); 1037 ops.ooblen = oobsize;
1026 return ret; 1038 ops.oobbuf = c->oobbuf;
1027 } 1039 ops.ooboffs = 0;
1028 if (retlen < (oob_size << 1)) { 1040 ops.datbuf = NULL;
1029 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB return short read (%zd bytes not %d) for block at %08x\n", retlen, oob_size << 1, jeb->offset)); 1041 ops.mode = MTD_OOB_PLACE;
1030 return -EIO;
1031 }
1032 1042
1033 /* Check cleanmarker only on the first physical block */ 1043 ret = c->mtd->read_oob(c->mtd, offset, &ops);
1034 if (!cnt) { 1044 if (ret) {
1035 n.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK); 1045 D1 (printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): "
1036 n.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER); 1046 "Read OOB failed %d for block at %08x\n",
1037 n.totlen = cpu_to_je32 (8); 1047 ret, jeb->offset));
1038 p = (unsigned char *) &n; 1048 return ret;
1049 }
1039 1050
1040 for (i = 0; i < c->fsdata_len; i++) { 1051 if (ops.retlen < ops.len) {
1041 if (buf[c->fsdata_pos + i] != p[i]) { 1052 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): "
1042 retval = 1; 1053 "Read OOB return short read (%zd bytes not %d) "
1043 } 1054 "for block at %08x\n", ops.retlen, ops.len,
1044 } 1055 jeb->offset));
1045 D1(if (retval == 1) { 1056 return -EIO;
1046 printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): Cleanmarker node not detected in block at %08x\n", jeb->offset);
1047 printk(KERN_WARNING "OOB at %08x was ", offset);
1048 for (i=0; i < oob_size; i++) {
1049 printk("%02x ", buf[i]);
1050 }
1051 printk("\n");
1052 })
1053 }
1054 offset += c->mtd->erasesize;
1055 } 1057 }
1056 return retval; 1058
1059 n.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK);
1060 n.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER);
1061 n.totlen = cpu_to_je32 (8);
1062 p = (unsigned char *) &n;
1063 b = c->oobbuf + c->fsdata_pos;
1064
1065 for (i = c->fsdata_len; i; i--) {
1066 if (*b++ != *p++)
1067 ret = 1;
1068 }
1069
1070 D1(if (ret == 1) {
1071 printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): "
1072 "Cleanmarker node not detected in block at %08x\n",
1073 offset);
1074 printk(KERN_WARNING "OOB at %08zx was ", offset);
1075 for (i=0; i < oobsize; i++)
1076 printk("%02x ", c->oobbuf[i]);
1077 printk("\n");
1078 });
1079 return ret;
1057} 1080}
1058 1081
1059int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) 1082int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
1083 struct jffs2_eraseblock *jeb)
1060{ 1084{
1061 struct jffs2_unknown_node n; 1085 struct jffs2_unknown_node n;
1062 int ret; 1086 int ret;
1063 size_t retlen; 1087 struct mtd_oob_ops ops;
1064 1088
1065 n.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); 1089 n.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
1066 n.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER); 1090 n.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER);
1067 n.totlen = cpu_to_je32(8); 1091 n.totlen = cpu_to_je32(8);
1068 1092
1069 ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n); 1093 ops.len = c->fsdata_len;
1094 ops.ooblen = c->fsdata_len;;
1095 ops.oobbuf = (uint8_t *)&n;
1096 ops.ooboffs = c->fsdata_pos;
1097 ops.datbuf = NULL;
1098 ops.mode = MTD_OOB_PLACE;
1099
1100 ret = c->mtd->write_oob(c->mtd, jeb->offset, &ops);
1070 1101
1071 if (ret) { 1102 if (ret) {
1072 D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); 1103 D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): "
1104 "Write failed for block at %08x: error %d\n",
1105 jeb->offset, ret));
1073 return ret; 1106 return ret;
1074 } 1107 }
1075 if (retlen != c->fsdata_len) { 1108 if (ops.retlen != ops.len) {
1076 D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Short write for block at %08x: %zd not %d\n", jeb->offset, retlen, c->fsdata_len)); 1109 D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): "
1077 return ret; 1110 "Short write for block at %08x: %zd not %d\n",
1111 jeb->offset, ops.retlen, ops.len));
1112 return -EIO;
1078 } 1113 }
1079 return 0; 1114 return 0;
1080} 1115}
@@ -1108,18 +1143,9 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *
1108 return 1; 1143 return 1;
1109} 1144}
1110 1145
1111#define NAND_JFFS2_OOB16_FSDALEN 8
1112
1113static struct nand_oobinfo jffs2_oobinfo_docecc = {
1114 .useecc = MTD_NANDECC_PLACE,
1115 .eccbytes = 6,
1116 .eccpos = {0,1,2,3,4,5}
1117};
1118
1119
1120static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c) 1146static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c)
1121{ 1147{
1122 struct nand_oobinfo *oinfo = &c->mtd->oobinfo; 1148 struct nand_ecclayout *oinfo = c->mtd->ecclayout;
1123 1149
1124 /* Do this only, if we have an oob buffer */ 1150 /* Do this only, if we have an oob buffer */
1125 if (!c->mtd->oobsize) 1151 if (!c->mtd->oobsize)
@@ -1129,33 +1155,23 @@ static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c)
1129 c->cleanmarker_size = 0; 1155 c->cleanmarker_size = 0;
1130 1156
1131 /* Should we use autoplacement ? */ 1157 /* Should we use autoplacement ? */
1132 if (oinfo && oinfo->useecc == MTD_NANDECC_AUTOPLACE) { 1158 if (!oinfo) {
1133 D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n")); 1159 D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n"));
1134 /* Get the position of the free bytes */ 1160 return -EINVAL;
1135 if (!oinfo->oobfree[0][1]) { 1161 }
1136 printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep. Autoplacement selected and no empty space in oob\n");
1137 return -ENOSPC;
1138 }
1139 c->fsdata_pos = oinfo->oobfree[0][0];
1140 c->fsdata_len = oinfo->oobfree[0][1];
1141 if (c->fsdata_len > 8)
1142 c->fsdata_len = 8;
1143 } else {
1144 /* This is just a legacy fallback and should go away soon */
1145 switch(c->mtd->ecctype) {
1146 case MTD_ECC_RS_DiskOnChip:
1147 printk(KERN_WARNING "JFFS2 using DiskOnChip hardware ECC without autoplacement. Fix it!\n");
1148 c->oobinfo = &jffs2_oobinfo_docecc;
1149 c->fsdata_pos = 6;
1150 c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN;
1151 c->badblock_pos = 15;
1152 break;
1153 1162
1154 default: 1163 D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n"));
1155 D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n")); 1164 /* Get the position of the free bytes */
1156 return -EINVAL; 1165 if (!oinfo->oobfree[0].length) {
1157 } 1166 printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep."
1167 " Autoplacement selected and no empty space in oob\n");
1168 return -ENOSPC;
1158 } 1169 }
1170 c->fsdata_pos = oinfo->oobfree[0].offset;
1171 c->fsdata_len = oinfo->oobfree[0].length;
1172 if (c->fsdata_len > 8)
1173 c->fsdata_len = 8;
1174
1159 return 0; 1175 return 0;
1160} 1176}
1161 1177
@@ -1165,13 +1181,17 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1165 1181
1166 /* Initialise write buffer */ 1182 /* Initialise write buffer */
1167 init_rwsem(&c->wbuf_sem); 1183 init_rwsem(&c->wbuf_sem);
1168 c->wbuf_pagesize = c->mtd->oobblock; 1184 c->wbuf_pagesize = c->mtd->writesize;
1169 c->wbuf_ofs = 0xFFFFFFFF; 1185 c->wbuf_ofs = 0xFFFFFFFF;
1170 1186
1171 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); 1187 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1172 if (!c->wbuf) 1188 if (!c->wbuf)
1173 return -ENOMEM; 1189 return -ENOMEM;
1174 1190
1191 c->oobbuf = kmalloc(NR_OOB_SCAN_PAGES * c->mtd->oobsize, GFP_KERNEL);
1192 if (!c->oobbuf)
1193 return -ENOMEM;
1194
1175 res = jffs2_nand_set_oobinfo(c); 1195 res = jffs2_nand_set_oobinfo(c);
1176 1196
1177#ifdef BREAKME 1197#ifdef BREAKME
@@ -1189,6 +1209,7 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1189void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c) 1209void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1190{ 1210{
1191 kfree(c->wbuf); 1211 kfree(c->wbuf);
1212 kfree(c->oobbuf);
1192} 1213}
1193 1214
1194int jffs2_dataflash_setup(struct jffs2_sb_info *c) { 1215int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
@@ -1236,33 +1257,14 @@ void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1236 kfree(c->wbuf); 1257 kfree(c->wbuf);
1237} 1258}
1238 1259
1239int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c) {
1240 /* Cleanmarker is actually larger on the flashes */
1241 c->cleanmarker_size = 16;
1242
1243 /* Initialize write buffer */
1244 init_rwsem(&c->wbuf_sem);
1245 c->wbuf_pagesize = c->mtd->eccsize;
1246 c->wbuf_ofs = 0xFFFFFFFF;
1247
1248 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1249 if (!c->wbuf)
1250 return -ENOMEM;
1251
1252 return 0;
1253}
1254
1255void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c) {
1256 kfree(c->wbuf);
1257}
1258
1259int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) { 1260int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1260 /* Cleanmarker currently occupies a whole programming region */ 1261 /* Cleanmarker currently occupies whole programming regions,
1261 c->cleanmarker_size = MTD_PROGREGION_SIZE(c->mtd); 1262 * either one or 2 for 8Byte STMicro flashes. */
1263 c->cleanmarker_size = max(16u, c->mtd->writesize);
1262 1264
1263 /* Initialize write buffer */ 1265 /* Initialize write buffer */
1264 init_rwsem(&c->wbuf_sem); 1266 init_rwsem(&c->wbuf_sem);
1265 c->wbuf_pagesize = MTD_PROGREGION_SIZE(c->mtd); 1267 c->wbuf_pagesize = c->mtd->writesize;
1266 c->wbuf_ofs = 0xFFFFFFFF; 1268 c->wbuf_ofs = 0xFFFFFFFF;
1267 1269
1268 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); 1270 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
index 1342f0158e9b..67176792e138 100644
--- a/fs/jffs2/write.c
+++ b/fs/jffs2/write.c
@@ -37,7 +37,6 @@ int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint
37 f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache; 37 f->inocache->nodes = (struct jffs2_raw_node_ref *)f->inocache;
38 f->inocache->state = INO_STATE_PRESENT; 38 f->inocache->state = INO_STATE_PRESENT;
39 39
40
41 jffs2_add_ino_cache(c, f->inocache); 40 jffs2_add_ino_cache(c, f->inocache);
42 D1(printk(KERN_DEBUG "jffs2_do_new_inode(): Assigned ino# %d\n", f->inocache->ino)); 41 D1(printk(KERN_DEBUG "jffs2_do_new_inode(): Assigned ino# %d\n", f->inocache->ino));
43 ri->ino = cpu_to_je32(f->inocache->ino); 42 ri->ino = cpu_to_je32(f->inocache->ino);
@@ -57,12 +56,14 @@ int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, uint
57/* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it, 56/* jffs2_write_dnode - given a raw_inode, allocate a full_dnode for it,
58 write it to the flash, link it into the existing inode/fragment list */ 57 write it to the flash, link it into the existing inode/fragment list */
59 58
60struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_inode *ri, const unsigned char *data, uint32_t datalen, uint32_t flash_ofs, int alloc_mode) 59struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
60 struct jffs2_raw_inode *ri, const unsigned char *data,
61 uint32_t datalen, int alloc_mode)
61 62
62{ 63{
63 struct jffs2_raw_node_ref *raw;
64 struct jffs2_full_dnode *fn; 64 struct jffs2_full_dnode *fn;
65 size_t retlen; 65 size_t retlen;
66 uint32_t flash_ofs;
66 struct kvec vecs[2]; 67 struct kvec vecs[2];
67 int ret; 68 int ret;
68 int retried = 0; 69 int retried = 0;
@@ -78,34 +79,21 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
78 vecs[1].iov_base = (unsigned char *)data; 79 vecs[1].iov_base = (unsigned char *)data;
79 vecs[1].iov_len = datalen; 80 vecs[1].iov_len = datalen;
80 81
81 jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len);
82
83 if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) { 82 if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) {
84 printk(KERN_WARNING "jffs2_write_dnode: ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", je32_to_cpu(ri->totlen), sizeof(*ri), datalen); 83 printk(KERN_WARNING "jffs2_write_dnode: ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", je32_to_cpu(ri->totlen), sizeof(*ri), datalen);
85 } 84 }
86 raw = jffs2_alloc_raw_node_ref();
87 if (!raw)
88 return ERR_PTR(-ENOMEM);
89 85
90 fn = jffs2_alloc_full_dnode(); 86 fn = jffs2_alloc_full_dnode();
91 if (!fn) { 87 if (!fn)
92 jffs2_free_raw_node_ref(raw);
93 return ERR_PTR(-ENOMEM); 88 return ERR_PTR(-ENOMEM);
94 }
95
96 fn->ofs = je32_to_cpu(ri->offset);
97 fn->size = je32_to_cpu(ri->dsize);
98 fn->frags = 0;
99 89
100 /* check number of valid vecs */ 90 /* check number of valid vecs */
101 if (!datalen || !data) 91 if (!datalen || !data)
102 cnt = 1; 92 cnt = 1;
103 retry: 93 retry:
104 fn->raw = raw; 94 flash_ofs = write_ofs(c);
105 95
106 raw->flash_offset = flash_ofs; 96 jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len);
107 raw->__totlen = PAD(sizeof(*ri)+datalen);
108 raw->next_phys = NULL;
109 97
110 if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) { 98 if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) {
111 BUG_ON(!retried); 99 BUG_ON(!retried);
@@ -125,22 +113,16 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
125 113
126 /* Mark the space as dirtied */ 114 /* Mark the space as dirtied */
127 if (retlen) { 115 if (retlen) {
128 /* Doesn't belong to any inode */
129 raw->next_in_ino = NULL;
130
131 /* Don't change raw->size to match retlen. We may have 116 /* Don't change raw->size to match retlen. We may have
132 written the node header already, and only the data will 117 written the node header already, and only the data will
133 seem corrupted, in which case the scan would skip over 118 seem corrupted, in which case the scan would skip over
134 any node we write before the original intended end of 119 any node we write before the original intended end of
135 this node */ 120 this node */
136 raw->flash_offset |= REF_OBSOLETE; 121 jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*ri)+datalen), NULL);
137 jffs2_add_physical_node_ref(c, raw);
138 jffs2_mark_node_obsolete(c, raw);
139 } else { 122 } else {
140 printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", raw->flash_offset); 123 printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", flash_ofs);
141 jffs2_free_raw_node_ref(raw);
142 } 124 }
143 if (!retried && alloc_mode != ALLOC_NORETRY && (raw = jffs2_alloc_raw_node_ref())) { 125 if (!retried && alloc_mode != ALLOC_NORETRY) {
144 /* Try to reallocate space and retry */ 126 /* Try to reallocate space and retry */
145 uint32_t dummy; 127 uint32_t dummy;
146 struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size]; 128 struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size];
@@ -153,19 +135,20 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
153 jffs2_dbg_acct_paranoia_check(c, jeb); 135 jffs2_dbg_acct_paranoia_check(c, jeb);
154 136
155 if (alloc_mode == ALLOC_GC) { 137 if (alloc_mode == ALLOC_GC) {
156 ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &flash_ofs, 138 ret = jffs2_reserve_space_gc(c, sizeof(*ri) + datalen, &dummy,
157 &dummy, JFFS2_SUMMARY_INODE_SIZE); 139 JFFS2_SUMMARY_INODE_SIZE);
158 } else { 140 } else {
159 /* Locking pain */ 141 /* Locking pain */
160 up(&f->sem); 142 up(&f->sem);
161 jffs2_complete_reservation(c); 143 jffs2_complete_reservation(c);
162 144
163 ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &flash_ofs, 145 ret = jffs2_reserve_space(c, sizeof(*ri) + datalen, &dummy,
164 &dummy, alloc_mode, JFFS2_SUMMARY_INODE_SIZE); 146 alloc_mode, JFFS2_SUMMARY_INODE_SIZE);
165 down(&f->sem); 147 down(&f->sem);
166 } 148 }
167 149
168 if (!ret) { 150 if (!ret) {
151 flash_ofs = write_ofs(c);
169 D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); 152 D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs));
170 153
171 jffs2_dbg_acct_sanity_check(c,jeb); 154 jffs2_dbg_acct_sanity_check(c,jeb);
@@ -174,7 +157,6 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
174 goto retry; 157 goto retry;
175 } 158 }
176 D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); 159 D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
177 jffs2_free_raw_node_ref(raw);
178 } 160 }
179 /* Release the full_dnode which is now useless, and return */ 161 /* Release the full_dnode which is now useless, and return */
180 jffs2_free_full_dnode(fn); 162 jffs2_free_full_dnode(fn);
@@ -188,20 +170,17 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
188 if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) || 170 if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) ||
189 ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) && 171 ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) &&
190 (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) { 172 (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) {
191 raw->flash_offset |= REF_PRISTINE; 173 flash_ofs |= REF_PRISTINE;
192 } else { 174 } else {
193 raw->flash_offset |= REF_NORMAL; 175 flash_ofs |= REF_NORMAL;
194 } 176 }
195 jffs2_add_physical_node_ref(c, raw); 177 fn->raw = jffs2_add_physical_node_ref(c, flash_ofs, PAD(sizeof(*ri)+datalen), f->inocache);
196 178 fn->ofs = je32_to_cpu(ri->offset);
197 /* Link into per-inode list */ 179 fn->size = je32_to_cpu(ri->dsize);
198 spin_lock(&c->erase_completion_lock); 180 fn->frags = 0;
199 raw->next_in_ino = f->inocache->nodes;
200 f->inocache->nodes = raw;
201 spin_unlock(&c->erase_completion_lock);
202 181
203 D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n", 182 D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n",
204 flash_ofs, ref_flags(raw), je32_to_cpu(ri->dsize), 183 flash_ofs & ~3, flash_ofs & 3, je32_to_cpu(ri->dsize),
205 je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc), 184 je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc),
206 je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen))); 185 je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen)));
207 186
@@ -212,12 +191,14 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2
212 return fn; 191 return fn;
213} 192}
214 193
215struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_dirent *rd, const unsigned char *name, uint32_t namelen, uint32_t flash_ofs, int alloc_mode) 194struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
195 struct jffs2_raw_dirent *rd, const unsigned char *name,
196 uint32_t namelen, int alloc_mode)
216{ 197{
217 struct jffs2_raw_node_ref *raw;
218 struct jffs2_full_dirent *fd; 198 struct jffs2_full_dirent *fd;
219 size_t retlen; 199 size_t retlen;
220 struct kvec vecs[2]; 200 struct kvec vecs[2];
201 uint32_t flash_ofs;
221 int retried = 0; 202 int retried = 0;
222 int ret; 203 int ret;
223 204
@@ -228,26 +209,16 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
228 D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) { 209 D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) {
229 printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n"); 210 printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n");
230 BUG(); 211 BUG();
231 } 212 });
232 );
233 213
234 vecs[0].iov_base = rd; 214 vecs[0].iov_base = rd;
235 vecs[0].iov_len = sizeof(*rd); 215 vecs[0].iov_len = sizeof(*rd);
236 vecs[1].iov_base = (unsigned char *)name; 216 vecs[1].iov_base = (unsigned char *)name;
237 vecs[1].iov_len = namelen; 217 vecs[1].iov_len = namelen;
238 218
239 jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len);
240
241 raw = jffs2_alloc_raw_node_ref();
242
243 if (!raw)
244 return ERR_PTR(-ENOMEM);
245
246 fd = jffs2_alloc_full_dirent(namelen+1); 219 fd = jffs2_alloc_full_dirent(namelen+1);
247 if (!fd) { 220 if (!fd)
248 jffs2_free_raw_node_ref(raw);
249 return ERR_PTR(-ENOMEM); 221 return ERR_PTR(-ENOMEM);
250 }
251 222
252 fd->version = je32_to_cpu(rd->version); 223 fd->version = je32_to_cpu(rd->version);
253 fd->ino = je32_to_cpu(rd->ino); 224 fd->ino = je32_to_cpu(rd->ino);
@@ -257,11 +228,9 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
257 fd->name[namelen]=0; 228 fd->name[namelen]=0;
258 229
259 retry: 230 retry:
260 fd->raw = raw; 231 flash_ofs = write_ofs(c);
261 232
262 raw->flash_offset = flash_ofs; 233 jffs2_dbg_prewrite_paranoia_check(c, flash_ofs, vecs[0].iov_len + vecs[1].iov_len);
263 raw->__totlen = PAD(sizeof(*rd)+namelen);
264 raw->next_phys = NULL;
265 234
266 if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(rd->version) < f->highest_version)) { 235 if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(rd->version) < f->highest_version)) {
267 BUG_ON(!retried); 236 BUG_ON(!retried);
@@ -280,15 +249,11 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
280 sizeof(*rd)+namelen, flash_ofs, ret, retlen); 249 sizeof(*rd)+namelen, flash_ofs, ret, retlen);
281 /* Mark the space as dirtied */ 250 /* Mark the space as dirtied */
282 if (retlen) { 251 if (retlen) {
283 raw->next_in_ino = NULL; 252 jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*rd)+namelen), NULL);
284 raw->flash_offset |= REF_OBSOLETE;
285 jffs2_add_physical_node_ref(c, raw);
286 jffs2_mark_node_obsolete(c, raw);
287 } else { 253 } else {
288 printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", raw->flash_offset); 254 printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", flash_ofs);
289 jffs2_free_raw_node_ref(raw);
290 } 255 }
291 if (!retried && (raw = jffs2_alloc_raw_node_ref())) { 256 if (!retried) {
292 /* Try to reallocate space and retry */ 257 /* Try to reallocate space and retry */
293 uint32_t dummy; 258 uint32_t dummy;
294 struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size]; 259 struct jffs2_eraseblock *jeb = &c->blocks[flash_ofs / c->sector_size];
@@ -301,39 +266,33 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
301 jffs2_dbg_acct_paranoia_check(c, jeb); 266 jffs2_dbg_acct_paranoia_check(c, jeb);
302 267
303 if (alloc_mode == ALLOC_GC) { 268 if (alloc_mode == ALLOC_GC) {
304 ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &flash_ofs, 269 ret = jffs2_reserve_space_gc(c, sizeof(*rd) + namelen, &dummy,
305 &dummy, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); 270 JFFS2_SUMMARY_DIRENT_SIZE(namelen));
306 } else { 271 } else {
307 /* Locking pain */ 272 /* Locking pain */
308 up(&f->sem); 273 up(&f->sem);
309 jffs2_complete_reservation(c); 274 jffs2_complete_reservation(c);
310 275
311 ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &flash_ofs, 276 ret = jffs2_reserve_space(c, sizeof(*rd) + namelen, &dummy,
312 &dummy, alloc_mode, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); 277 alloc_mode, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
313 down(&f->sem); 278 down(&f->sem);
314 } 279 }
315 280
316 if (!ret) { 281 if (!ret) {
282 flash_ofs = write_ofs(c);
317 D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); 283 D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs));
318 jffs2_dbg_acct_sanity_check(c,jeb); 284 jffs2_dbg_acct_sanity_check(c,jeb);
319 jffs2_dbg_acct_paranoia_check(c, jeb); 285 jffs2_dbg_acct_paranoia_check(c, jeb);
320 goto retry; 286 goto retry;
321 } 287 }
322 D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); 288 D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret));
323 jffs2_free_raw_node_ref(raw);
324 } 289 }
325 /* Release the full_dnode which is now useless, and return */ 290 /* Release the full_dnode which is now useless, and return */
326 jffs2_free_full_dirent(fd); 291 jffs2_free_full_dirent(fd);
327 return ERR_PTR(ret?ret:-EIO); 292 return ERR_PTR(ret?ret:-EIO);
328 } 293 }
329 /* Mark the space used */ 294 /* Mark the space used */
330 raw->flash_offset |= REF_PRISTINE; 295 fd->raw = jffs2_add_physical_node_ref(c, flash_ofs | REF_PRISTINE, PAD(sizeof(*rd)+namelen), f->inocache);
331 jffs2_add_physical_node_ref(c, raw);
332
333 spin_lock(&c->erase_completion_lock);
334 raw->next_in_ino = f->inocache->nodes;
335 f->inocache->nodes = raw;
336 spin_unlock(&c->erase_completion_lock);
337 296
338 if (retried) { 297 if (retried) {
339 jffs2_dbg_acct_sanity_check(c,NULL); 298 jffs2_dbg_acct_sanity_check(c,NULL);
@@ -359,14 +318,14 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
359 struct jffs2_full_dnode *fn; 318 struct jffs2_full_dnode *fn;
360 unsigned char *comprbuf = NULL; 319 unsigned char *comprbuf = NULL;
361 uint16_t comprtype = JFFS2_COMPR_NONE; 320 uint16_t comprtype = JFFS2_COMPR_NONE;
362 uint32_t phys_ofs, alloclen; 321 uint32_t alloclen;
363 uint32_t datalen, cdatalen; 322 uint32_t datalen, cdatalen;
364 int retried = 0; 323 int retried = 0;
365 324
366 retry: 325 retry:
367 D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset)); 326 D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset));
368 327
369 ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, &phys_ofs, 328 ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN,
370 &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); 329 &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
371 if (ret) { 330 if (ret) {
372 D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret)); 331 D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret));
@@ -394,7 +353,7 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
394 ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); 353 ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
395 ri->data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen)); 354 ri->data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen));
396 355
397 fn = jffs2_write_dnode(c, f, ri, comprbuf, cdatalen, phys_ofs, ALLOC_NORETRY); 356 fn = jffs2_write_dnode(c, f, ri, comprbuf, cdatalen, ALLOC_NORETRY);
398 357
399 jffs2_free_comprbuf(comprbuf, buf); 358 jffs2_free_comprbuf(comprbuf, buf);
400 359
@@ -448,13 +407,13 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
448 struct jffs2_raw_dirent *rd; 407 struct jffs2_raw_dirent *rd;
449 struct jffs2_full_dnode *fn; 408 struct jffs2_full_dnode *fn;
450 struct jffs2_full_dirent *fd; 409 struct jffs2_full_dirent *fd;
451 uint32_t alloclen, phys_ofs; 410 uint32_t alloclen;
452 int ret; 411 int ret;
453 412
454 /* Try to reserve enough space for both node and dirent. 413 /* Try to reserve enough space for both node and dirent.
455 * Just the node will do for now, though 414 * Just the node will do for now, though
456 */ 415 */
457 ret = jffs2_reserve_space(c, sizeof(*ri), &phys_ofs, &alloclen, ALLOC_NORMAL, 416 ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL,
458 JFFS2_SUMMARY_INODE_SIZE); 417 JFFS2_SUMMARY_INODE_SIZE);
459 D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen)); 418 D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen));
460 if (ret) { 419 if (ret) {
@@ -465,7 +424,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
465 ri->data_crc = cpu_to_je32(0); 424 ri->data_crc = cpu_to_je32(0);
466 ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); 425 ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
467 426
468 fn = jffs2_write_dnode(c, f, ri, NULL, 0, phys_ofs, ALLOC_NORMAL); 427 fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL);
469 428
470 D1(printk(KERN_DEBUG "jffs2_do_create created file with mode 0x%x\n", 429 D1(printk(KERN_DEBUG "jffs2_do_create created file with mode 0x%x\n",
471 jemode_to_cpu(ri->mode))); 430 jemode_to_cpu(ri->mode)));
@@ -484,7 +443,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
484 443
485 up(&f->sem); 444 up(&f->sem);
486 jffs2_complete_reservation(c); 445 jffs2_complete_reservation(c);
487 ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, 446 ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
488 ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); 447 ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
489 448
490 if (ret) { 449 if (ret) {
@@ -516,7 +475,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, str
516 rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); 475 rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
517 rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); 476 rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
518 477
519 fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL); 478 fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_NORMAL);
520 479
521 jffs2_free_raw_dirent(rd); 480 jffs2_free_raw_dirent(rd);
522 481
@@ -545,7 +504,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
545{ 504{
546 struct jffs2_raw_dirent *rd; 505 struct jffs2_raw_dirent *rd;
547 struct jffs2_full_dirent *fd; 506 struct jffs2_full_dirent *fd;
548 uint32_t alloclen, phys_ofs; 507 uint32_t alloclen;
549 int ret; 508 int ret;
550 509
551 if (1 /* alternative branch needs testing */ || 510 if (1 /* alternative branch needs testing */ ||
@@ -556,7 +515,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
556 if (!rd) 515 if (!rd)
557 return -ENOMEM; 516 return -ENOMEM;
558 517
559 ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, 518 ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
560 ALLOC_DELETION, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); 519 ALLOC_DELETION, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
561 if (ret) { 520 if (ret) {
562 jffs2_free_raw_dirent(rd); 521 jffs2_free_raw_dirent(rd);
@@ -580,7 +539,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
580 rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); 539 rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
581 rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); 540 rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
582 541
583 fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_DELETION); 542 fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_DELETION);
584 543
585 jffs2_free_raw_dirent(rd); 544 jffs2_free_raw_dirent(rd);
586 545
@@ -659,14 +618,14 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint
659{ 618{
660 struct jffs2_raw_dirent *rd; 619 struct jffs2_raw_dirent *rd;
661 struct jffs2_full_dirent *fd; 620 struct jffs2_full_dirent *fd;
662 uint32_t alloclen, phys_ofs; 621 uint32_t alloclen;
663 int ret; 622 int ret;
664 623
665 rd = jffs2_alloc_raw_dirent(); 624 rd = jffs2_alloc_raw_dirent();
666 if (!rd) 625 if (!rd)
667 return -ENOMEM; 626 return -ENOMEM;
668 627
669 ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &phys_ofs, &alloclen, 628 ret = jffs2_reserve_space(c, sizeof(*rd)+namelen, &alloclen,
670 ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen)); 629 ALLOC_NORMAL, JFFS2_SUMMARY_DIRENT_SIZE(namelen));
671 if (ret) { 630 if (ret) {
672 jffs2_free_raw_dirent(rd); 631 jffs2_free_raw_dirent(rd);
@@ -692,7 +651,7 @@ int jffs2_do_link (struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, uint
692 rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); 651 rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8));
693 rd->name_crc = cpu_to_je32(crc32(0, name, namelen)); 652 rd->name_crc = cpu_to_je32(crc32(0, name, namelen));
694 653
695 fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, phys_ofs, ALLOC_NORMAL); 654 fd = jffs2_write_dirent(c, dir_f, rd, name, namelen, ALLOC_NORMAL);
696 655
697 jffs2_free_raw_dirent(rd); 656 jffs2_free_raw_dirent(rd);
698 657
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
new file mode 100644
index 000000000000..2d82e250be34
--- /dev/null
+++ b/fs/jffs2/xattr.c
@@ -0,0 +1,1238 @@
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2006 NEC Corporation
5 *
6 * Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
10 */
11#include <linux/kernel.h>
12#include <linux/slab.h>
13#include <linux/fs.h>
14#include <linux/time.h>
15#include <linux/pagemap.h>
16#include <linux/highmem.h>
17#include <linux/crc32.h>
18#include <linux/jffs2.h>
19#include <linux/xattr.h>
20#include <linux/mtd/mtd.h>
21#include "nodelist.h"
22/* -------- xdatum related functions ----------------
23 * xattr_datum_hashkey(xprefix, xname, xvalue, xsize)
24 * is used to calcurate xdatum hashkey. The reminder of hashkey into XATTRINDEX_HASHSIZE is
25 * the index of the xattr name/value pair cache (c->xattrindex).
26 * unload_xattr_datum(c, xd)
27 * is used to release xattr name/value pair and detach from c->xattrindex.
28 * reclaim_xattr_datum(c)
29 * is used to reclaim xattr name/value pairs on the xattr name/value pair cache when
30 * memory usage by cache is over c->xdatum_mem_threshold. Currentry, this threshold
31 * is hard coded as 32KiB.
32 * delete_xattr_datum_node(c, xd)
33 * is used to delete a jffs2 node is dominated by xdatum. When EBS(Erase Block Summary) is
34 * enabled, it overwrites the obsolete node by myself.
35 * delete_xattr_datum(c, xd)
36 * is used to delete jffs2_xattr_datum object. It must be called with 0-value of reference
37 * counter. (It means how many jffs2_xattr_ref object refers this xdatum.)
38 * do_verify_xattr_datum(c, xd)
39 * is used to load the xdatum informations without name/value pair from the medium.
40 * It's necessary once, because those informations are not collected during mounting
41 * process when EBS is enabled.
42 * 0 will be returned, if success. An negative return value means recoverable error, and
43 * positive return value means unrecoverable error. Thus, caller must remove this xdatum
44 * and xref when it returned positive value.
45 * do_load_xattr_datum(c, xd)
46 * is used to load name/value pair from the medium.
47 * The meanings of return value is same as do_verify_xattr_datum().
48 * load_xattr_datum(c, xd)
49 * is used to be as a wrapper of do_verify_xattr_datum() and do_load_xattr_datum().
50 * If xd need to call do_verify_xattr_datum() at first, it's called before calling
51 * do_load_xattr_datum(). The meanings of return value is same as do_verify_xattr_datum().
52 * save_xattr_datum(c, xd)
53 * is used to write xdatum to medium. xd->version will be incremented.
54 * create_xattr_datum(c, xprefix, xname, xvalue, xsize)
55 * is used to create new xdatum and write to medium.
56 * -------------------------------------------------- */
57
58static uint32_t xattr_datum_hashkey(int xprefix, const char *xname, const char *xvalue, int xsize)
59{
60 int name_len = strlen(xname);
61
62 return crc32(xprefix, xname, name_len) ^ crc32(xprefix, xvalue, xsize);
63}
64
65static void unload_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
66{
67 /* must be called under down_write(xattr_sem) */
68 D1(dbg_xattr("%s: xid=%u, version=%u\n", __FUNCTION__, xd->xid, xd->version));
69 if (xd->xname) {
70 c->xdatum_mem_usage -= (xd->name_len + 1 + xd->value_len);
71 kfree(xd->xname);
72 }
73
74 list_del_init(&xd->xindex);
75 xd->hashkey = 0;
76 xd->xname = NULL;
77 xd->xvalue = NULL;
78}
79
80static void reclaim_xattr_datum(struct jffs2_sb_info *c)
81{
82 /* must be called under down_write(xattr_sem) */
83 struct jffs2_xattr_datum *xd, *_xd;
84 uint32_t target, before;
85 static int index = 0;
86 int count;
87
88 if (c->xdatum_mem_threshold > c->xdatum_mem_usage)
89 return;
90
91 before = c->xdatum_mem_usage;
92 target = c->xdatum_mem_usage * 4 / 5; /* 20% reduction */
93 for (count = 0; count < XATTRINDEX_HASHSIZE; count++) {
94 list_for_each_entry_safe(xd, _xd, &c->xattrindex[index], xindex) {
95 if (xd->flags & JFFS2_XFLAGS_HOT) {
96 xd->flags &= ~JFFS2_XFLAGS_HOT;
97 } else if (!(xd->flags & JFFS2_XFLAGS_BIND)) {
98 unload_xattr_datum(c, xd);
99 }
100 if (c->xdatum_mem_usage <= target)
101 goto out;
102 }
103 index = (index+1) % XATTRINDEX_HASHSIZE;
104 }
105 out:
106 JFFS2_NOTICE("xdatum_mem_usage from %u byte to %u byte (%u byte reclaimed)\n",
107 before, c->xdatum_mem_usage, before - c->xdatum_mem_usage);
108}
109
110static void delete_xattr_datum_node(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
111{
112 /* must be called under down_write(xattr_sem) */
113 struct jffs2_raw_xattr rx;
114 size_t length;
115 int rc;
116
117 if (!xd->node) {
118 JFFS2_WARNING("xdatum (xid=%u) is removed twice.\n", xd->xid);
119 return;
120 }
121 if (jffs2_sum_active()) {
122 memset(&rx, 0xff, sizeof(struct jffs2_raw_xattr));
123 rc = jffs2_flash_read(c, ref_offset(xd->node),
124 sizeof(struct jffs2_unknown_node),
125 &length, (char *)&rx);
126 if (rc || length != sizeof(struct jffs2_unknown_node)) {
127 JFFS2_ERROR("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n",
128 rc, sizeof(struct jffs2_unknown_node),
129 length, ref_offset(xd->node));
130 }
131 rc = jffs2_flash_write(c, ref_offset(xd->node), sizeof(rx),
132 &length, (char *)&rx);
133 if (rc || length != sizeof(struct jffs2_raw_xattr)) {
134 JFFS2_ERROR("jffs2_flash_write()=%d, req=%zu, wrote=%zu ar %#08x\n",
135 rc, sizeof(rx), length, ref_offset(xd->node));
136 }
137 }
138 spin_lock(&c->erase_completion_lock);
139 xd->node->next_in_ino = NULL;
140 spin_unlock(&c->erase_completion_lock);
141 jffs2_mark_node_obsolete(c, xd->node);
142 xd->node = NULL;
143}
144
145static void delete_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
146{
147 /* must be called under down_write(xattr_sem) */
148 BUG_ON(xd->refcnt);
149
150 unload_xattr_datum(c, xd);
151 if (xd->node) {
152 delete_xattr_datum_node(c, xd);
153 xd->node = NULL;
154 }
155 jffs2_free_xattr_datum(xd);
156}
157
158static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
159{
160 /* must be called under down_write(xattr_sem) */
161 struct jffs2_eraseblock *jeb;
162 struct jffs2_raw_xattr rx;
163 size_t readlen;
164 uint32_t crc, totlen;
165 int rc;
166
167 BUG_ON(!xd->node);
168 BUG_ON(ref_flags(xd->node) != REF_UNCHECKED);
169
170 rc = jffs2_flash_read(c, ref_offset(xd->node), sizeof(rx), &readlen, (char *)&rx);
171 if (rc || readlen != sizeof(rx)) {
172 JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n",
173 rc, sizeof(rx), readlen, ref_offset(xd->node));
174 return rc ? rc : -EIO;
175 }
176 crc = crc32(0, &rx, sizeof(rx) - 4);
177 if (crc != je32_to_cpu(rx.node_crc)) {
178 if (je32_to_cpu(rx.node_crc) != 0xffffffff)
179 JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
180 ref_offset(xd->node), je32_to_cpu(rx.hdr_crc), crc);
181 return EIO;
182 }
183 totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len));
184 if (je16_to_cpu(rx.magic) != JFFS2_MAGIC_BITMASK
185 || je16_to_cpu(rx.nodetype) != JFFS2_NODETYPE_XATTR
186 || je32_to_cpu(rx.totlen) != totlen
187 || je32_to_cpu(rx.xid) != xd->xid
188 || je32_to_cpu(rx.version) != xd->version) {
189 JFFS2_ERROR("inconsistent xdatum at %#08x, magic=%#04x/%#04x, "
190 "nodetype=%#04x/%#04x, totlen=%u/%u, xid=%u/%u, version=%u/%u\n",
191 ref_offset(xd->node), je16_to_cpu(rx.magic), JFFS2_MAGIC_BITMASK,
192 je16_to_cpu(rx.nodetype), JFFS2_NODETYPE_XATTR,
193 je32_to_cpu(rx.totlen), totlen,
194 je32_to_cpu(rx.xid), xd->xid,
195 je32_to_cpu(rx.version), xd->version);
196 return EIO;
197 }
198 xd->xprefix = rx.xprefix;
199 xd->name_len = rx.name_len;
200 xd->value_len = je16_to_cpu(rx.value_len);
201 xd->data_crc = je32_to_cpu(rx.data_crc);
202
203 /* This JFFS2_NODETYPE_XATTR node is checked */
204 jeb = &c->blocks[ref_offset(xd->node) / c->sector_size];
205 totlen = PAD(je32_to_cpu(rx.totlen));
206
207 spin_lock(&c->erase_completion_lock);
208 c->unchecked_size -= totlen; c->used_size += totlen;
209 jeb->unchecked_size -= totlen; jeb->used_size += totlen;
210 xd->node->flash_offset = ref_offset(xd->node) | REF_PRISTINE;
211 spin_unlock(&c->erase_completion_lock);
212
213 /* unchecked xdatum is chained with c->xattr_unchecked */
214 list_del_init(&xd->xindex);
215
216 dbg_xattr("success on verfying xdatum (xid=%u, version=%u)\n",
217 xd->xid, xd->version);
218
219 return 0;
220}
221
222static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
223{
224 /* must be called under down_write(xattr_sem) */
225 char *data;
226 size_t readlen;
227 uint32_t crc, length;
228 int i, ret, retry = 0;
229
230 BUG_ON(!xd->node);
231 BUG_ON(ref_flags(xd->node) != REF_PRISTINE);
232 BUG_ON(!list_empty(&xd->xindex));
233 retry:
234 length = xd->name_len + 1 + xd->value_len;
235 data = kmalloc(length, GFP_KERNEL);
236 if (!data)
237 return -ENOMEM;
238
239 ret = jffs2_flash_read(c, ref_offset(xd->node)+sizeof(struct jffs2_raw_xattr),
240 length, &readlen, data);
241
242 if (ret || length!=readlen) {
243 JFFS2_WARNING("jffs2_flash_read() returned %d, request=%d, readlen=%zu, at %#08x\n",
244 ret, length, readlen, ref_offset(xd->node));
245 kfree(data);
246 return ret ? ret : -EIO;
247 }
248
249 data[xd->name_len] = '\0';
250 crc = crc32(0, data, length);
251 if (crc != xd->data_crc) {
252 JFFS2_WARNING("node CRC failed (JFFS2_NODETYPE_XREF)"
253 " at %#08x, read: 0x%08x calculated: 0x%08x\n",
254 ref_offset(xd->node), xd->data_crc, crc);
255 kfree(data);
256 return EIO;
257 }
258
259 xd->flags |= JFFS2_XFLAGS_HOT;
260 xd->xname = data;
261 xd->xvalue = data + xd->name_len+1;
262
263 c->xdatum_mem_usage += length;
264
265 xd->hashkey = xattr_datum_hashkey(xd->xprefix, xd->xname, xd->xvalue, xd->value_len);
266 i = xd->hashkey % XATTRINDEX_HASHSIZE;
267 list_add(&xd->xindex, &c->xattrindex[i]);
268 if (!retry) {
269 retry = 1;
270 reclaim_xattr_datum(c);
271 if (!xd->xname)
272 goto retry;
273 }
274
275 dbg_xattr("success on loading xdatum (xid=%u, xprefix=%u, xname='%s')\n",
276 xd->xid, xd->xprefix, xd->xname);
277
278 return 0;
279}
280
281static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
282{
283 /* must be called under down_write(xattr_sem);
284 * rc < 0 : recoverable error, try again
285 * rc = 0 : success
286 * rc > 0 : Unrecoverable error, this node should be deleted.
287 */
288 int rc = 0;
289 BUG_ON(xd->xname);
290 if (!xd->node)
291 return EIO;
292 if (unlikely(ref_flags(xd->node) != REF_PRISTINE)) {
293 rc = do_verify_xattr_datum(c, xd);
294 if (rc > 0) {
295 list_del_init(&xd->xindex);
296 delete_xattr_datum_node(c, xd);
297 }
298 }
299 if (!rc)
300 rc = do_load_xattr_datum(c, xd);
301 return rc;
302}
303
304static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
305{
306 /* must be called under down_write(xattr_sem) */
307 struct jffs2_raw_node_ref *raw;
308 struct jffs2_raw_xattr rx;
309 struct kvec vecs[2];
310 size_t length;
311 int rc, totlen;
312 uint32_t phys_ofs = write_ofs(c);
313
314 BUG_ON(!xd->xname);
315
316 vecs[0].iov_base = &rx;
317 vecs[0].iov_len = PAD(sizeof(rx));
318 vecs[1].iov_base = xd->xname;
319 vecs[1].iov_len = xd->name_len + 1 + xd->value_len;
320 totlen = vecs[0].iov_len + vecs[1].iov_len;
321
322 /* Setup raw-xattr */
323 rx.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
324 rx.nodetype = cpu_to_je16(JFFS2_NODETYPE_XATTR);
325 rx.totlen = cpu_to_je32(PAD(totlen));
326 rx.hdr_crc = cpu_to_je32(crc32(0, &rx, sizeof(struct jffs2_unknown_node) - 4));
327
328 rx.xid = cpu_to_je32(xd->xid);
329 rx.version = cpu_to_je32(++xd->version);
330 rx.xprefix = xd->xprefix;
331 rx.name_len = xd->name_len;
332 rx.value_len = cpu_to_je16(xd->value_len);
333 rx.data_crc = cpu_to_je32(crc32(0, vecs[1].iov_base, vecs[1].iov_len));
334 rx.node_crc = cpu_to_je32(crc32(0, &rx, sizeof(struct jffs2_raw_xattr) - 4));
335
336 rc = jffs2_flash_writev(c, vecs, 2, phys_ofs, &length, 0);
337 if (rc || totlen != length) {
338 JFFS2_WARNING("jffs2_flash_writev()=%d, req=%u, wrote=%zu, at %#08x\n",
339 rc, totlen, length, phys_ofs);
340 rc = rc ? rc : -EIO;
341 if (length)
342 jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, PAD(totlen), NULL);
343
344 return rc;
345 }
346
347 /* success */
348 raw = jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(totlen), NULL);
349 /* FIXME */ raw->next_in_ino = (void *)xd;
350
351 if (xd->node)
352 delete_xattr_datum_node(c, xd);
353 xd->node = raw;
354
355 dbg_xattr("success on saving xdatum (xid=%u, version=%u, xprefix=%u, xname='%s')\n",
356 xd->xid, xd->version, xd->xprefix, xd->xname);
357
358 return 0;
359}
360
361static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c,
362 int xprefix, const char *xname,
363 const char *xvalue, int xsize)
364{
365 /* must be called under down_write(xattr_sem) */
366 struct jffs2_xattr_datum *xd;
367 uint32_t hashkey, name_len;
368 char *data;
369 int i, rc;
370
371 /* Search xattr_datum has same xname/xvalue by index */
372 hashkey = xattr_datum_hashkey(xprefix, xname, xvalue, xsize);
373 i = hashkey % XATTRINDEX_HASHSIZE;
374 list_for_each_entry(xd, &c->xattrindex[i], xindex) {
375 if (xd->hashkey==hashkey
376 && xd->xprefix==xprefix
377 && xd->value_len==xsize
378 && !strcmp(xd->xname, xname)
379 && !memcmp(xd->xvalue, xvalue, xsize)) {
380 xd->refcnt++;
381 return xd;
382 }
383 }
384
385 /* Not found, Create NEW XATTR-Cache */
386 name_len = strlen(xname);
387
388 xd = jffs2_alloc_xattr_datum();
389 if (!xd)
390 return ERR_PTR(-ENOMEM);
391
392 data = kmalloc(name_len + 1 + xsize, GFP_KERNEL);
393 if (!data) {
394 jffs2_free_xattr_datum(xd);
395 return ERR_PTR(-ENOMEM);
396 }
397 strcpy(data, xname);
398 memcpy(data + name_len + 1, xvalue, xsize);
399
400 xd->refcnt = 1;
401 xd->xid = ++c->highest_xid;
402 xd->flags |= JFFS2_XFLAGS_HOT;
403 xd->xprefix = xprefix;
404
405 xd->hashkey = hashkey;
406 xd->xname = data;
407 xd->xvalue = data + name_len + 1;
408 xd->name_len = name_len;
409 xd->value_len = xsize;
410 xd->data_crc = crc32(0, data, xd->name_len + 1 + xd->value_len);
411
412 rc = save_xattr_datum(c, xd);
413 if (rc) {
414 kfree(xd->xname);
415 jffs2_free_xattr_datum(xd);
416 return ERR_PTR(rc);
417 }
418
419 /* Insert Hash Index */
420 i = hashkey % XATTRINDEX_HASHSIZE;
421 list_add(&xd->xindex, &c->xattrindex[i]);
422
423 c->xdatum_mem_usage += (xd->name_len + 1 + xd->value_len);
424 reclaim_xattr_datum(c);
425
426 return xd;
427}
428
429/* -------- xref related functions ------------------
430 * verify_xattr_ref(c, ref)
431 * is used to load xref information from medium. Because summary data does not
432 * contain xid/ino, it's necessary to verify once while mounting process.
433 * delete_xattr_ref_node(c, ref)
434 * is used to delete a jffs2 node is dominated by xref. When EBS is enabled,
435 * it overwrites the obsolete node by myself.
436 * delete_xattr_ref(c, ref)
437 * is used to delete jffs2_xattr_ref object. If the reference counter of xdatum
438 * is refered by this xref become 0, delete_xattr_datum() is called later.
439 * save_xattr_ref(c, ref)
440 * is used to write xref to medium.
441 * create_xattr_ref(c, ic, xd)
442 * is used to create a new xref and write to medium.
443 * jffs2_xattr_delete_inode(c, ic)
444 * is called to remove xrefs related to obsolete inode when inode is unlinked.
445 * jffs2_xattr_free_inode(c, ic)
446 * is called to release xattr related objects when unmounting.
447 * check_xattr_ref_inode(c, ic)
448 * is used to confirm inode does not have duplicate xattr name/value pair.
449 * -------------------------------------------------- */
450static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
451{
452 struct jffs2_eraseblock *jeb;
453 struct jffs2_raw_xref rr;
454 size_t readlen;
455 uint32_t crc, totlen;
456 int rc;
457
458 BUG_ON(ref_flags(ref->node) != REF_UNCHECKED);
459
460 rc = jffs2_flash_read(c, ref_offset(ref->node), sizeof(rr), &readlen, (char *)&rr);
461 if (rc || sizeof(rr) != readlen) {
462 JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu, at %#08x\n",
463 rc, sizeof(rr), readlen, ref_offset(ref->node));
464 return rc ? rc : -EIO;
465 }
466 /* obsolete node */
467 crc = crc32(0, &rr, sizeof(rr) - 4);
468 if (crc != je32_to_cpu(rr.node_crc)) {
469 if (je32_to_cpu(rr.node_crc) != 0xffffffff)
470 JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
471 ref_offset(ref->node), je32_to_cpu(rr.node_crc), crc);
472 return EIO;
473 }
474 if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK
475 || je16_to_cpu(rr.nodetype) != JFFS2_NODETYPE_XREF
476 || je32_to_cpu(rr.totlen) != PAD(sizeof(rr))) {
477 JFFS2_ERROR("inconsistent xref at %#08x, magic=%#04x/%#04x, "
478 "nodetype=%#04x/%#04x, totlen=%u/%zu\n",
479 ref_offset(ref->node), je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK,
480 je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF,
481 je32_to_cpu(rr.totlen), PAD(sizeof(rr)));
482 return EIO;
483 }
484 ref->ino = je32_to_cpu(rr.ino);
485 ref->xid = je32_to_cpu(rr.xid);
486
487 /* fixup superblock/eraseblock info */
488 jeb = &c->blocks[ref_offset(ref->node) / c->sector_size];
489 totlen = PAD(sizeof(rr));
490
491 spin_lock(&c->erase_completion_lock);
492 c->unchecked_size -= totlen; c->used_size += totlen;
493 jeb->unchecked_size -= totlen; jeb->used_size += totlen;
494 ref->node->flash_offset = ref_offset(ref->node) | REF_PRISTINE;
495 spin_unlock(&c->erase_completion_lock);
496
497 dbg_xattr("success on verifying xref (ino=%u, xid=%u) at %#08x\n",
498 ref->ino, ref->xid, ref_offset(ref->node));
499 return 0;
500}
501
502static void delete_xattr_ref_node(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
503{
504 struct jffs2_raw_xref rr;
505 size_t length;
506 int rc;
507
508 if (jffs2_sum_active()) {
509 memset(&rr, 0xff, sizeof(rr));
510 rc = jffs2_flash_read(c, ref_offset(ref->node),
511 sizeof(struct jffs2_unknown_node),
512 &length, (char *)&rr);
513 if (rc || length != sizeof(struct jffs2_unknown_node)) {
514 JFFS2_ERROR("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n",
515 rc, sizeof(struct jffs2_unknown_node),
516 length, ref_offset(ref->node));
517 }
518 rc = jffs2_flash_write(c, ref_offset(ref->node), sizeof(rr),
519 &length, (char *)&rr);
520 if (rc || length != sizeof(struct jffs2_raw_xref)) {
521 JFFS2_ERROR("jffs2_flash_write()=%d, req=%zu, wrote=%zu at %#08x\n",
522 rc, sizeof(rr), length, ref_offset(ref->node));
523 }
524 }
525 spin_lock(&c->erase_completion_lock);
526 ref->node->next_in_ino = NULL;
527 spin_unlock(&c->erase_completion_lock);
528 jffs2_mark_node_obsolete(c, ref->node);
529 ref->node = NULL;
530}
531
532static void delete_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
533{
534 /* must be called under down_write(xattr_sem) */
535 struct jffs2_xattr_datum *xd;
536
537 BUG_ON(!ref->node);
538 delete_xattr_ref_node(c, ref);
539
540 xd = ref->xd;
541 xd->refcnt--;
542 if (!xd->refcnt)
543 delete_xattr_datum(c, xd);
544 jffs2_free_xattr_ref(ref);
545}
546
547static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
548{
549 /* must be called under down_write(xattr_sem) */
550 struct jffs2_raw_node_ref *raw;
551 struct jffs2_raw_xref rr;
552 size_t length;
553 uint32_t phys_ofs = write_ofs(c);
554 int ret;
555
556 rr.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
557 rr.nodetype = cpu_to_je16(JFFS2_NODETYPE_XREF);
558 rr.totlen = cpu_to_je32(PAD(sizeof(rr)));
559 rr.hdr_crc = cpu_to_je32(crc32(0, &rr, sizeof(struct jffs2_unknown_node) - 4));
560
561 rr.ino = cpu_to_je32(ref->ic->ino);
562 rr.xid = cpu_to_je32(ref->xd->xid);
563 rr.node_crc = cpu_to_je32(crc32(0, &rr, sizeof(rr) - 4));
564
565 ret = jffs2_flash_write(c, phys_ofs, sizeof(rr), &length, (char *)&rr);
566 if (ret || sizeof(rr) != length) {
567 JFFS2_WARNING("jffs2_flash_write() returned %d, request=%zu, retlen=%zu, at %#08x\n",
568 ret, sizeof(rr), length, phys_ofs);
569 ret = ret ? ret : -EIO;
570 if (length)
571 jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, PAD(sizeof(rr)), NULL);
572
573 return ret;
574 }
575
576 raw = jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(sizeof(rr)), NULL);
577 /* FIXME */ raw->next_in_ino = (void *)ref;
578 if (ref->node)
579 delete_xattr_ref_node(c, ref);
580 ref->node = raw;
581
582 dbg_xattr("success on saving xref (ino=%u, xid=%u)\n", ref->ic->ino, ref->xd->xid);
583
584 return 0;
585}
586
587static struct jffs2_xattr_ref *create_xattr_ref(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic,
588 struct jffs2_xattr_datum *xd)
589{
590 /* must be called under down_write(xattr_sem) */
591 struct jffs2_xattr_ref *ref;
592 int ret;
593
594 ref = jffs2_alloc_xattr_ref();
595 if (!ref)
596 return ERR_PTR(-ENOMEM);
597 ref->ic = ic;
598 ref->xd = xd;
599
600 ret = save_xattr_ref(c, ref);
601 if (ret) {
602 jffs2_free_xattr_ref(ref);
603 return ERR_PTR(ret);
604 }
605
606 /* Chain to inode */
607 ref->next = ic->xref;
608 ic->xref = ref;
609
610 return ref; /* success */
611}
612
613void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
614{
615 /* It's called from jffs2_clear_inode() on inode removing.
616 When an inode with XATTR is removed, those XATTRs must be removed. */
617 struct jffs2_xattr_ref *ref, *_ref;
618
619 if (!ic || ic->nlink > 0)
620 return;
621
622 down_write(&c->xattr_sem);
623 for (ref = ic->xref; ref; ref = _ref) {
624 _ref = ref->next;
625 delete_xattr_ref(c, ref);
626 }
627 ic->xref = NULL;
628 up_write(&c->xattr_sem);
629}
630
631void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
632{
633 /* It's called from jffs2_free_ino_caches() until unmounting FS. */
634 struct jffs2_xattr_datum *xd;
635 struct jffs2_xattr_ref *ref, *_ref;
636
637 down_write(&c->xattr_sem);
638 for (ref = ic->xref; ref; ref = _ref) {
639 _ref = ref->next;
640 xd = ref->xd;
641 xd->refcnt--;
642 if (!xd->refcnt) {
643 unload_xattr_datum(c, xd);
644 jffs2_free_xattr_datum(xd);
645 }
646 jffs2_free_xattr_ref(ref);
647 }
648 ic->xref = NULL;
649 up_write(&c->xattr_sem);
650}
651
652static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
653{
654 /* success of check_xattr_ref_inode() means taht inode (ic) dose not have
655 * duplicate name/value pairs. If duplicate name/value pair would be found,
656 * one will be removed.
657 */
658 struct jffs2_xattr_ref *ref, *cmp, **pref;
659 int rc = 0;
660
661 if (likely(ic->flags & INO_FLAGS_XATTR_CHECKED))
662 return 0;
663 down_write(&c->xattr_sem);
664 retry:
665 rc = 0;
666 for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) {
667 if (!ref->xd->xname) {
668 rc = load_xattr_datum(c, ref->xd);
669 if (unlikely(rc > 0)) {
670 *pref = ref->next;
671 delete_xattr_ref(c, ref);
672 goto retry;
673 } else if (unlikely(rc < 0))
674 goto out;
675 }
676 for (cmp=ref->next, pref=&ref->next; cmp; pref=&cmp->next, cmp=cmp->next) {
677 if (!cmp->xd->xname) {
678 ref->xd->flags |= JFFS2_XFLAGS_BIND;
679 rc = load_xattr_datum(c, cmp->xd);
680 ref->xd->flags &= ~JFFS2_XFLAGS_BIND;
681 if (unlikely(rc > 0)) {
682 *pref = cmp->next;
683 delete_xattr_ref(c, cmp);
684 goto retry;
685 } else if (unlikely(rc < 0))
686 goto out;
687 }
688 if (ref->xd->xprefix == cmp->xd->xprefix
689 && !strcmp(ref->xd->xname, cmp->xd->xname)) {
690 *pref = cmp->next;
691 delete_xattr_ref(c, cmp);
692 goto retry;
693 }
694 }
695 }
696 ic->flags |= INO_FLAGS_XATTR_CHECKED;
697 out:
698 up_write(&c->xattr_sem);
699
700 return rc;
701}
702
703/* -------- xattr subsystem functions ---------------
704 * jffs2_init_xattr_subsystem(c)
705 * is used to initialize semaphore and list_head, and some variables.
706 * jffs2_find_xattr_datum(c, xid)
707 * is used to lookup xdatum while scanning process.
708 * jffs2_clear_xattr_subsystem(c)
709 * is used to release any xattr related objects.
710 * jffs2_build_xattr_subsystem(c)
711 * is used to associate xdatum and xref while super block building process.
712 * jffs2_setup_xattr_datum(c, xid, version)
713 * is used to insert xdatum while scanning process.
714 * -------------------------------------------------- */
715void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c)
716{
717 int i;
718
719 for (i=0; i < XATTRINDEX_HASHSIZE; i++)
720 INIT_LIST_HEAD(&c->xattrindex[i]);
721 INIT_LIST_HEAD(&c->xattr_unchecked);
722 c->xref_temp = NULL;
723
724 init_rwsem(&c->xattr_sem);
725 c->xdatum_mem_usage = 0;
726 c->xdatum_mem_threshold = 32 * 1024; /* Default 32KB */
727}
728
729static struct jffs2_xattr_datum *jffs2_find_xattr_datum(struct jffs2_sb_info *c, uint32_t xid)
730{
731 struct jffs2_xattr_datum *xd;
732 int i = xid % XATTRINDEX_HASHSIZE;
733
734 /* It's only used in scanning/building process. */
735 BUG_ON(!(c->flags & (JFFS2_SB_FLAG_SCANNING|JFFS2_SB_FLAG_BUILDING)));
736
737 list_for_each_entry(xd, &c->xattrindex[i], xindex) {
738 if (xd->xid==xid)
739 return xd;
740 }
741 return NULL;
742}
743
744void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c)
745{
746 struct jffs2_xattr_datum *xd, *_xd;
747 struct jffs2_xattr_ref *ref, *_ref;
748 int i;
749
750 for (ref=c->xref_temp; ref; ref = _ref) {
751 _ref = ref->next;
752 jffs2_free_xattr_ref(ref);
753 }
754 c->xref_temp = NULL;
755
756 for (i=0; i < XATTRINDEX_HASHSIZE; i++) {
757 list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) {
758 list_del(&xd->xindex);
759 if (xd->xname)
760 kfree(xd->xname);
761 jffs2_free_xattr_datum(xd);
762 }
763 }
764}
765
766void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
767{
768 struct jffs2_xattr_ref *ref, *_ref;
769 struct jffs2_xattr_datum *xd, *_xd;
770 struct jffs2_inode_cache *ic;
771 int i, xdatum_count =0, xdatum_unchecked_count = 0, xref_count = 0;
772
773 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
774
775 /* Phase.1 */
776 for (ref=c->xref_temp; ref; ref=_ref) {
777 _ref = ref->next;
778 /* checking REF_UNCHECKED nodes */
779 if (ref_flags(ref->node) != REF_PRISTINE) {
780 if (verify_xattr_ref(c, ref)) {
781 delete_xattr_ref_node(c, ref);
782 jffs2_free_xattr_ref(ref);
783 continue;
784 }
785 }
786 /* At this point, ref->xid and ref->ino contain XID and inode number.
787 ref->xd and ref->ic are not valid yet. */
788 xd = jffs2_find_xattr_datum(c, ref->xid);
789 ic = jffs2_get_ino_cache(c, ref->ino);
790 if (!xd || !ic) {
791 if (ref_flags(ref->node) != REF_UNCHECKED)
792 JFFS2_WARNING("xref(ino=%u, xid=%u) is orphan. \n",
793 ref->ino, ref->xid);
794 delete_xattr_ref_node(c, ref);
795 jffs2_free_xattr_ref(ref);
796 continue;
797 }
798 ref->xd = xd;
799 ref->ic = ic;
800 xd->refcnt++;
801 ref->next = ic->xref;
802 ic->xref = ref;
803 xref_count++;
804 }
805 c->xref_temp = NULL;
806 /* After this, ref->xid/ino are NEVER used. */
807
808 /* Phase.2 */
809 for (i=0; i < XATTRINDEX_HASHSIZE; i++) {
810 list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) {
811 list_del_init(&xd->xindex);
812 if (!xd->refcnt) {
813 if (ref_flags(xd->node) != REF_UNCHECKED)
814 JFFS2_WARNING("orphan xdatum(xid=%u, version=%u) at %#08x\n",
815 xd->xid, xd->version, ref_offset(xd->node));
816 delete_xattr_datum(c, xd);
817 continue;
818 }
819 if (ref_flags(xd->node) != REF_PRISTINE) {
820 dbg_xattr("unchecked xdatum(xid=%u) at %#08x\n",
821 xd->xid, ref_offset(xd->node));
822 list_add(&xd->xindex, &c->xattr_unchecked);
823 xdatum_unchecked_count++;
824 }
825 xdatum_count++;
826 }
827 }
828 /* build complete */
829 JFFS2_NOTICE("complete building xattr subsystem, %u of xdatum (%u unchecked) and "
830 "%u of xref found.\n", xdatum_count, xdatum_unchecked_count, xref_count);
831}
832
833struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
834 uint32_t xid, uint32_t version)
835{
836 struct jffs2_xattr_datum *xd, *_xd;
837
838 _xd = jffs2_find_xattr_datum(c, xid);
839 if (_xd) {
840 dbg_xattr("duplicate xdatum (xid=%u, version=%u/%u) at %#08x\n",
841 xid, version, _xd->version, ref_offset(_xd->node));
842 if (version < _xd->version)
843 return ERR_PTR(-EEXIST);
844 }
845 xd = jffs2_alloc_xattr_datum();
846 if (!xd)
847 return ERR_PTR(-ENOMEM);
848 xd->xid = xid;
849 xd->version = version;
850 if (xd->xid > c->highest_xid)
851 c->highest_xid = xd->xid;
852 list_add_tail(&xd->xindex, &c->xattrindex[xid % XATTRINDEX_HASHSIZE]);
853
854 if (_xd) {
855 list_del_init(&_xd->xindex);
856 delete_xattr_datum_node(c, _xd);
857 jffs2_free_xattr_datum(_xd);
858 }
859 return xd;
860}
861
862/* -------- xattr subsystem functions ---------------
863 * xprefix_to_handler(xprefix)
864 * is used to translate xprefix into xattr_handler.
865 * jffs2_listxattr(dentry, buffer, size)
866 * is an implementation of listxattr handler on jffs2.
867 * do_jffs2_getxattr(inode, xprefix, xname, buffer, size)
868 * is an implementation of getxattr handler on jffs2.
869 * do_jffs2_setxattr(inode, xprefix, xname, buffer, size, flags)
870 * is an implementation of setxattr handler on jffs2.
871 * -------------------------------------------------- */
872struct xattr_handler *jffs2_xattr_handlers[] = {
873 &jffs2_user_xattr_handler,
874#ifdef CONFIG_JFFS2_FS_SECURITY
875 &jffs2_security_xattr_handler,
876#endif
877#ifdef CONFIG_JFFS2_FS_POSIX_ACL
878 &jffs2_acl_access_xattr_handler,
879 &jffs2_acl_default_xattr_handler,
880#endif
881 &jffs2_trusted_xattr_handler,
882 NULL
883};
884
885static struct xattr_handler *xprefix_to_handler(int xprefix) {
886 struct xattr_handler *ret;
887
888 switch (xprefix) {
889 case JFFS2_XPREFIX_USER:
890 ret = &jffs2_user_xattr_handler;
891 break;
892#ifdef CONFIG_JFFS2_FS_SECURITY
893 case JFFS2_XPREFIX_SECURITY:
894 ret = &jffs2_security_xattr_handler;
895 break;
896#endif
897#ifdef CONFIG_JFFS2_FS_POSIX_ACL
898 case JFFS2_XPREFIX_ACL_ACCESS:
899 ret = &jffs2_acl_access_xattr_handler;
900 break;
901 case JFFS2_XPREFIX_ACL_DEFAULT:
902 ret = &jffs2_acl_default_xattr_handler;
903 break;
904#endif
905 case JFFS2_XPREFIX_TRUSTED:
906 ret = &jffs2_trusted_xattr_handler;
907 break;
908 default:
909 ret = NULL;
910 break;
911 }
912 return ret;
913}
914
915ssize_t jffs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
916{
917 struct inode *inode = dentry->d_inode;
918 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
919 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
920 struct jffs2_inode_cache *ic = f->inocache;
921 struct jffs2_xattr_ref *ref, **pref;
922 struct jffs2_xattr_datum *xd;
923 struct xattr_handler *xhandle;
924 ssize_t len, rc;
925 int retry = 0;
926
927 rc = check_xattr_ref_inode(c, ic);
928 if (unlikely(rc))
929 return rc;
930
931 down_read(&c->xattr_sem);
932 retry:
933 len = 0;
934 for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) {
935 BUG_ON(ref->ic != ic);
936 xd = ref->xd;
937 if (!xd->xname) {
938 /* xdatum is unchached */
939 if (!retry) {
940 retry = 1;
941 up_read(&c->xattr_sem);
942 down_write(&c->xattr_sem);
943 goto retry;
944 } else {
945 rc = load_xattr_datum(c, xd);
946 if (unlikely(rc > 0)) {
947 *pref = ref->next;
948 delete_xattr_ref(c, ref);
949 goto retry;
950 } else if (unlikely(rc < 0))
951 goto out;
952 }
953 }
954 xhandle = xprefix_to_handler(xd->xprefix);
955 if (!xhandle)
956 continue;
957 if (buffer) {
958 rc = xhandle->list(inode, buffer+len, size-len, xd->xname, xd->name_len);
959 } else {
960 rc = xhandle->list(inode, NULL, 0, xd->xname, xd->name_len);
961 }
962 if (rc < 0)
963 goto out;
964 len += rc;
965 }
966 rc = len;
967 out:
968 if (!retry) {
969 up_read(&c->xattr_sem);
970 } else {
971 up_write(&c->xattr_sem);
972 }
973 return rc;
974}
975
976int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname,
977 char *buffer, size_t size)
978{
979 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
980 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
981 struct jffs2_inode_cache *ic = f->inocache;
982 struct jffs2_xattr_datum *xd;
983 struct jffs2_xattr_ref *ref, **pref;
984 int rc, retry = 0;
985
986 rc = check_xattr_ref_inode(c, ic);
987 if (unlikely(rc))
988 return rc;
989
990 down_read(&c->xattr_sem);
991 retry:
992 for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) {
993 BUG_ON(ref->ic!=ic);
994
995 xd = ref->xd;
996 if (xd->xprefix != xprefix)
997 continue;
998 if (!xd->xname) {
999 /* xdatum is unchached */
1000 if (!retry) {
1001 retry = 1;
1002 up_read(&c->xattr_sem);
1003 down_write(&c->xattr_sem);
1004 goto retry;
1005 } else {
1006 rc = load_xattr_datum(c, xd);
1007 if (unlikely(rc > 0)) {
1008 *pref = ref->next;
1009 delete_xattr_ref(c, ref);
1010 goto retry;
1011 } else if (unlikely(rc < 0)) {
1012 goto out;
1013 }
1014 }
1015 }
1016 if (!strcmp(xname, xd->xname)) {
1017 rc = xd->value_len;
1018 if (buffer) {
1019 if (size < rc) {
1020 rc = -ERANGE;
1021 } else {
1022 memcpy(buffer, xd->xvalue, rc);
1023 }
1024 }
1025 goto out;
1026 }
1027 }
1028 rc = -ENODATA;
1029 out:
1030 if (!retry) {
1031 up_read(&c->xattr_sem);
1032 } else {
1033 up_write(&c->xattr_sem);
1034 }
1035 return rc;
1036}
1037
1038int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
1039 const char *buffer, size_t size, int flags)
1040{
1041 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
1042 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
1043 struct jffs2_inode_cache *ic = f->inocache;
1044 struct jffs2_xattr_datum *xd;
1045 struct jffs2_xattr_ref *ref, *newref, **pref;
1046 uint32_t length, request;
1047 int rc;
1048
1049 rc = check_xattr_ref_inode(c, ic);
1050 if (unlikely(rc))
1051 return rc;
1052
1053 request = PAD(sizeof(struct jffs2_raw_xattr) + strlen(xname) + 1 + size);
1054 rc = jffs2_reserve_space(c, request, &length,
1055 ALLOC_NORMAL, JFFS2_SUMMARY_XATTR_SIZE);
1056 if (rc) {
1057 JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request);
1058 return rc;
1059 }
1060
1061 /* Find existing xattr */
1062 down_write(&c->xattr_sem);
1063 retry:
1064 for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) {
1065 xd = ref->xd;
1066 if (xd->xprefix != xprefix)
1067 continue;
1068 if (!xd->xname) {
1069 rc = load_xattr_datum(c, xd);
1070 if (unlikely(rc > 0)) {
1071 *pref = ref->next;
1072 delete_xattr_ref(c, ref);
1073 goto retry;
1074 } else if (unlikely(rc < 0))
1075 goto out;
1076 }
1077 if (!strcmp(xd->xname, xname)) {
1078 if (flags & XATTR_CREATE) {
1079 rc = -EEXIST;
1080 goto out;
1081 }
1082 if (!buffer) {
1083 *pref = ref->next;
1084 delete_xattr_ref(c, ref);
1085 rc = 0;
1086 goto out;
1087 }
1088 goto found;
1089 }
1090 }
1091 /* not found */
1092 if (flags & XATTR_REPLACE) {
1093 rc = -ENODATA;
1094 goto out;
1095 }
1096 if (!buffer) {
1097 rc = -EINVAL;
1098 goto out;
1099 }
1100 found:
1101 xd = create_xattr_datum(c, xprefix, xname, buffer, size);
1102 if (IS_ERR(xd)) {
1103 rc = PTR_ERR(xd);
1104 goto out;
1105 }
1106 up_write(&c->xattr_sem);
1107 jffs2_complete_reservation(c);
1108
1109 /* create xattr_ref */
1110 request = PAD(sizeof(struct jffs2_raw_xref));
1111 rc = jffs2_reserve_space(c, request, &length,
1112 ALLOC_NORMAL, JFFS2_SUMMARY_XREF_SIZE);
1113 if (rc) {
1114 JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request);
1115 down_write(&c->xattr_sem);
1116 xd->refcnt--;
1117 if (!xd->refcnt)
1118 delete_xattr_datum(c, xd);
1119 up_write(&c->xattr_sem);
1120 return rc;
1121 }
1122 down_write(&c->xattr_sem);
1123 if (ref)
1124 *pref = ref->next;
1125 newref = create_xattr_ref(c, ic, xd);
1126 if (IS_ERR(newref)) {
1127 if (ref) {
1128 ref->next = ic->xref;
1129 ic->xref = ref;
1130 }
1131 rc = PTR_ERR(newref);
1132 xd->refcnt--;
1133 if (!xd->refcnt)
1134 delete_xattr_datum(c, xd);
1135 } else if (ref) {
1136 delete_xattr_ref(c, ref);
1137 }
1138 out:
1139 up_write(&c->xattr_sem);
1140 jffs2_complete_reservation(c);
1141 return rc;
1142}
1143
1144/* -------- garbage collector functions -------------
1145 * jffs2_garbage_collect_xattr_datum(c, xd)
1146 * is used to move xdatum into new node.
1147 * jffs2_garbage_collect_xattr_ref(c, ref)
1148 * is used to move xref into new node.
1149 * jffs2_verify_xattr(c)
1150 * is used to call do_verify_xattr_datum() before garbage collecting.
1151 * -------------------------------------------------- */
1152int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
1153{
1154 uint32_t totlen, length, old_ofs;
1155 int rc = -EINVAL;
1156
1157 down_write(&c->xattr_sem);
1158 BUG_ON(!xd->node);
1159
1160 old_ofs = ref_offset(xd->node);
1161 totlen = ref_totlen(c, c->gcblock, xd->node);
1162 if (totlen < sizeof(struct jffs2_raw_xattr))
1163 goto out;
1164
1165 if (!xd->xname) {
1166 rc = load_xattr_datum(c, xd);
1167 if (unlikely(rc > 0)) {
1168 delete_xattr_datum_node(c, xd);
1169 rc = 0;
1170 goto out;
1171 } else if (unlikely(rc < 0))
1172 goto out;
1173 }
1174 rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XATTR_SIZE);
1175 if (rc || length < totlen) {
1176 JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, totlen);
1177 rc = rc ? rc : -EBADFD;
1178 goto out;
1179 }
1180 rc = save_xattr_datum(c, xd);
1181 if (!rc)
1182 dbg_xattr("xdatum (xid=%u, version=%u) GC'ed from %#08x to %08x\n",
1183 xd->xid, xd->version, old_ofs, ref_offset(xd->node));
1184 out:
1185 up_write(&c->xattr_sem);
1186 return rc;
1187}
1188
1189
1190int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
1191{
1192 uint32_t totlen, length, old_ofs;
1193 int rc = -EINVAL;
1194
1195 down_write(&c->xattr_sem);
1196 BUG_ON(!ref->node);
1197
1198 old_ofs = ref_offset(ref->node);
1199 totlen = ref_totlen(c, c->gcblock, ref->node);
1200 if (totlen != sizeof(struct jffs2_raw_xref))
1201 goto out;
1202
1203 rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XREF_SIZE);
1204 if (rc || length < totlen) {
1205 JFFS2_WARNING("%s: jffs2_reserve_space() = %d, request = %u\n",
1206 __FUNCTION__, rc, totlen);
1207 rc = rc ? rc : -EBADFD;
1208 goto out;
1209 }
1210 rc = save_xattr_ref(c, ref);
1211 if (!rc)
1212 dbg_xattr("xref (ino=%u, xid=%u) GC'ed from %#08x to %08x\n",
1213 ref->ic->ino, ref->xd->xid, old_ofs, ref_offset(ref->node));
1214 out:
1215 up_write(&c->xattr_sem);
1216 return rc;
1217}
1218
1219int jffs2_verify_xattr(struct jffs2_sb_info *c)
1220{
1221 struct jffs2_xattr_datum *xd, *_xd;
1222 int rc;
1223
1224 down_write(&c->xattr_sem);
1225 list_for_each_entry_safe(xd, _xd, &c->xattr_unchecked, xindex) {
1226 rc = do_verify_xattr_datum(c, xd);
1227 if (rc == 0) {
1228 list_del_init(&xd->xindex);
1229 break;
1230 } else if (rc > 0) {
1231 list_del_init(&xd->xindex);
1232 delete_xattr_datum_node(c, xd);
1233 }
1234 }
1235 up_write(&c->xattr_sem);
1236
1237 return list_empty(&c->xattr_unchecked) ? 1 : 0;
1238}
diff --git a/fs/jffs2/xattr.h b/fs/jffs2/xattr.h
new file mode 100644
index 000000000000..2c199856c582
--- /dev/null
+++ b/fs/jffs2/xattr.h
@@ -0,0 +1,116 @@
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2006 NEC Corporation
5 *
6 * Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
10 */
11#ifndef _JFFS2_FS_XATTR_H_
12#define _JFFS2_FS_XATTR_H_
13
14#include <linux/xattr.h>
15#include <linux/list.h>
16
17#define JFFS2_XFLAGS_HOT (0x01) /* This datum is HOT */
18#define JFFS2_XFLAGS_BIND (0x02) /* This datum is not reclaimed */
19
20struct jffs2_xattr_datum
21{
22 void *always_null;
23 struct jffs2_raw_node_ref *node;
24 uint8_t class;
25 uint8_t flags;
26 uint16_t xprefix; /* see JFFS2_XATTR_PREFIX_* */
27
28 struct list_head xindex; /* chained from c->xattrindex[n] */
29 uint32_t refcnt; /* # of xattr_ref refers this */
30 uint32_t xid;
31 uint32_t version;
32
33 uint32_t data_crc;
34 uint32_t hashkey;
35 char *xname; /* XATTR name without prefix */
36 uint32_t name_len; /* length of xname */
37 char *xvalue; /* XATTR value */
38 uint32_t value_len; /* length of xvalue */
39};
40
41struct jffs2_inode_cache;
42struct jffs2_xattr_ref
43{
44 void *always_null;
45 struct jffs2_raw_node_ref *node;
46 uint8_t class;
47 uint8_t flags; /* Currently unused */
48 u16 unused;
49
50 union {
51 struct jffs2_inode_cache *ic; /* reference to jffs2_inode_cache */
52 uint32_t ino; /* only used in scanning/building */
53 };
54 union {
55 struct jffs2_xattr_datum *xd; /* reference to jffs2_xattr_datum */
56 uint32_t xid; /* only used in sccanning/building */
57 };
58 struct jffs2_xattr_ref *next; /* chained from ic->xref_list */
59};
60
61#ifdef CONFIG_JFFS2_FS_XATTR
62
63extern void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c);
64extern void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c);
65extern void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c);
66
67extern struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
68 uint32_t xid, uint32_t version);
69
70extern void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
71extern void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
72
73extern int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd);
74extern int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref);
75extern int jffs2_verify_xattr(struct jffs2_sb_info *c);
76
77extern int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname,
78 char *buffer, size_t size);
79extern int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
80 const char *buffer, size_t size, int flags);
81
82extern struct xattr_handler *jffs2_xattr_handlers[];
83extern struct xattr_handler jffs2_user_xattr_handler;
84extern struct xattr_handler jffs2_trusted_xattr_handler;
85
86extern ssize_t jffs2_listxattr(struct dentry *, char *, size_t);
87#define jffs2_getxattr generic_getxattr
88#define jffs2_setxattr generic_setxattr
89#define jffs2_removexattr generic_removexattr
90
91#else
92
93#define jffs2_init_xattr_subsystem(c)
94#define jffs2_build_xattr_subsystem(c)
95#define jffs2_clear_xattr_subsystem(c)
96
97#define jffs2_xattr_delete_inode(c, ic)
98#define jffs2_xattr_free_inode(c, ic)
99#define jffs2_verify_xattr(c) (1)
100
101#define jffs2_xattr_handlers NULL
102#define jffs2_listxattr NULL
103#define jffs2_getxattr NULL
104#define jffs2_setxattr NULL
105#define jffs2_removexattr NULL
106
107#endif /* CONFIG_JFFS2_FS_XATTR */
108
109#ifdef CONFIG_JFFS2_FS_SECURITY
110extern int jffs2_init_security(struct inode *inode, struct inode *dir);
111extern struct xattr_handler jffs2_security_xattr_handler;
112#else
113#define jffs2_init_security(inode,dir) (0)
114#endif /* CONFIG_JFFS2_FS_SECURITY */
115
116#endif /* _JFFS2_FS_XATTR_H_ */
diff --git a/fs/jffs2/xattr_trusted.c b/fs/jffs2/xattr_trusted.c
new file mode 100644
index 000000000000..ed046e19dbfa
--- /dev/null
+++ b/fs/jffs2/xattr_trusted.c
@@ -0,0 +1,52 @@
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2006 NEC Corporation
5 *
6 * Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
10 */
11#include <linux/kernel.h>
12#include <linux/fs.h>
13#include <linux/jffs2.h>
14#include <linux/xattr.h>
15#include <linux/mtd/mtd.h>
16#include "nodelist.h"
17
18static int jffs2_trusted_getxattr(struct inode *inode, const char *name,
19 void *buffer, size_t size)
20{
21 if (!strcmp(name, ""))
22 return -EINVAL;
23 return do_jffs2_getxattr(inode, JFFS2_XPREFIX_TRUSTED, name, buffer, size);
24}
25
26static int jffs2_trusted_setxattr(struct inode *inode, const char *name, const void *buffer,
27 size_t size, int flags)
28{
29 if (!strcmp(name, ""))
30 return -EINVAL;
31 return do_jffs2_setxattr(inode, JFFS2_XPREFIX_TRUSTED, name, buffer, size, flags);
32}
33
34static size_t jffs2_trusted_listxattr(struct inode *inode, char *list, size_t list_size,
35 const char *name, size_t name_len)
36{
37 size_t retlen = XATTR_TRUSTED_PREFIX_LEN + name_len + 1;
38
39 if (list && retlen<=list_size) {
40 strcpy(list, XATTR_TRUSTED_PREFIX);
41 strcpy(list + XATTR_TRUSTED_PREFIX_LEN, name);
42 }
43
44 return retlen;
45}
46
47struct xattr_handler jffs2_trusted_xattr_handler = {
48 .prefix = XATTR_TRUSTED_PREFIX,
49 .list = jffs2_trusted_listxattr,
50 .set = jffs2_trusted_setxattr,
51 .get = jffs2_trusted_getxattr
52};
diff --git a/fs/jffs2/xattr_user.c b/fs/jffs2/xattr_user.c
new file mode 100644
index 000000000000..2f8e9aa01ea0
--- /dev/null
+++ b/fs/jffs2/xattr_user.c
@@ -0,0 +1,52 @@
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2006 NEC Corporation
5 *
6 * Created by KaiGai Kohei <kaigai@ak.jp.nec.com>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
10 */
11#include <linux/kernel.h>
12#include <linux/fs.h>
13#include <linux/jffs2.h>
14#include <linux/xattr.h>
15#include <linux/mtd/mtd.h>
16#include "nodelist.h"
17
18static int jffs2_user_getxattr(struct inode *inode, const char *name,
19 void *buffer, size_t size)
20{
21 if (!strcmp(name, ""))
22 return -EINVAL;
23 return do_jffs2_getxattr(inode, JFFS2_XPREFIX_USER, name, buffer, size);
24}
25
26static int jffs2_user_setxattr(struct inode *inode, const char *name, const void *buffer,
27 size_t size, int flags)
28{
29 if (!strcmp(name, ""))
30 return -EINVAL;
31 return do_jffs2_setxattr(inode, JFFS2_XPREFIX_USER, name, buffer, size, flags);
32}
33
34static size_t jffs2_user_listxattr(struct inode *inode, char *list, size_t list_size,
35 const char *name, size_t name_len)
36{
37 size_t retlen = XATTR_USER_PREFIX_LEN + name_len + 1;
38
39 if (list && retlen <= list_size) {
40 strcpy(list, XATTR_USER_PREFIX);
41 strcpy(list + XATTR_USER_PREFIX_LEN, name);
42 }
43
44 return retlen;
45}
46
47struct xattr_handler jffs2_user_xattr_handler = {
48 .prefix = XATTR_USER_PREFIX,
49 .list = jffs2_user_listxattr,
50 .set = jffs2_user_setxattr,
51 .get = jffs2_user_getxattr
52};
diff --git a/include/linux/jffs2.h b/include/linux/jffs2.h
index cf792bb3c726..c6f70660b371 100644
--- a/include/linux/jffs2.h
+++ b/include/linux/jffs2.h
@@ -65,6 +65,18 @@
65 65
66#define JFFS2_NODETYPE_SUMMARY (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 6) 66#define JFFS2_NODETYPE_SUMMARY (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 6)
67 67
68#define JFFS2_NODETYPE_XATTR (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 8)
69#define JFFS2_NODETYPE_XREF (JFFS2_FEATURE_INCOMPAT | JFFS2_NODE_ACCURATE | 9)
70
71/* XATTR Related */
72#define JFFS2_XPREFIX_USER 1 /* for "user." */
73#define JFFS2_XPREFIX_SECURITY 2 /* for "security." */
74#define JFFS2_XPREFIX_ACL_ACCESS 3 /* for "system.posix_acl_access" */
75#define JFFS2_XPREFIX_ACL_DEFAULT 4 /* for "system.posix_acl_default" */
76#define JFFS2_XPREFIX_TRUSTED 5 /* for "trusted.*" */
77
78#define JFFS2_ACL_VERSION 0x0001
79
68// Maybe later... 80// Maybe later...
69//#define JFFS2_NODETYPE_CHECKPOINT (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 3) 81//#define JFFS2_NODETYPE_CHECKPOINT (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 3)
70//#define JFFS2_NODETYPE_OPTIONS (JFFS2_FEATURE_RWCOMPAT_COPY | JFFS2_NODE_ACCURATE | 4) 82//#define JFFS2_NODETYPE_OPTIONS (JFFS2_FEATURE_RWCOMPAT_COPY | JFFS2_NODE_ACCURATE | 4)
@@ -82,11 +94,11 @@
82 94
83typedef struct { 95typedef struct {
84 uint32_t v32; 96 uint32_t v32;
85} __attribute__((packed)) jint32_t; 97} __attribute__((packed)) jint32_t;
86 98
87typedef struct { 99typedef struct {
88 uint32_t m; 100 uint32_t m;
89} __attribute__((packed)) jmode_t; 101} __attribute__((packed)) jmode_t;
90 102
91typedef struct { 103typedef struct {
92 uint16_t v16; 104 uint16_t v16;
@@ -99,7 +111,7 @@ struct jffs2_unknown_node
99 jint16_t nodetype; 111 jint16_t nodetype;
100 jint32_t totlen; /* So we can skip over nodes we don't grok */ 112 jint32_t totlen; /* So we can skip over nodes we don't grok */
101 jint32_t hdr_crc; 113 jint32_t hdr_crc;
102} __attribute__((packed)); 114};
103 115
104struct jffs2_raw_dirent 116struct jffs2_raw_dirent
105{ 117{
@@ -117,7 +129,7 @@ struct jffs2_raw_dirent
117 jint32_t node_crc; 129 jint32_t node_crc;
118 jint32_t name_crc; 130 jint32_t name_crc;
119 uint8_t name[0]; 131 uint8_t name[0];
120} __attribute__((packed)); 132};
121 133
122/* The JFFS2 raw inode structure: Used for storage on physical media. */ 134/* The JFFS2 raw inode structure: Used for storage on physical media. */
123/* The uid, gid, atime, mtime and ctime members could be longer, but 135/* The uid, gid, atime, mtime and ctime members could be longer, but
@@ -149,6 +161,32 @@ struct jffs2_raw_inode
149 jint32_t data_crc; /* CRC for the (compressed) data. */ 161 jint32_t data_crc; /* CRC for the (compressed) data. */
150 jint32_t node_crc; /* CRC for the raw inode (excluding data) */ 162 jint32_t node_crc; /* CRC for the raw inode (excluding data) */
151 uint8_t data[0]; 163 uint8_t data[0];
164};
165
166struct jffs2_raw_xattr {
167 jint16_t magic;
168 jint16_t nodetype; /* = JFFS2_NODETYPE_XATTR */
169 jint32_t totlen;
170 jint32_t hdr_crc;
171 jint32_t xid; /* XATTR identifier number */
172 jint32_t version;
173 uint8_t xprefix;
174 uint8_t name_len;
175 jint16_t value_len;
176 jint32_t data_crc;
177 jint32_t node_crc;
178 uint8_t data[0];
179} __attribute__((packed));
180
181struct jffs2_raw_xref
182{
183 jint16_t magic;
184 jint16_t nodetype; /* = JFFS2_NODETYPE_XREF */
185 jint32_t totlen;
186 jint32_t hdr_crc;
187 jint32_t ino; /* inode number */
188 jint32_t xid; /* XATTR identifier number */
189 jint32_t node_crc;
152} __attribute__((packed)); 190} __attribute__((packed));
153 191
154struct jffs2_raw_summary 192struct jffs2_raw_summary
@@ -163,14 +201,22 @@ struct jffs2_raw_summary
163 jint32_t sum_crc; /* summary information crc */ 201 jint32_t sum_crc; /* summary information crc */
164 jint32_t node_crc; /* node crc */ 202 jint32_t node_crc; /* node crc */
165 jint32_t sum[0]; /* inode summary info */ 203 jint32_t sum[0]; /* inode summary info */
166} __attribute__((packed)); 204};
167 205
168union jffs2_node_union 206union jffs2_node_union
169{ 207{
170 struct jffs2_raw_inode i; 208 struct jffs2_raw_inode i;
171 struct jffs2_raw_dirent d; 209 struct jffs2_raw_dirent d;
210 struct jffs2_raw_xattr x;
211 struct jffs2_raw_xref r;
172 struct jffs2_raw_summary s; 212 struct jffs2_raw_summary s;
173 struct jffs2_unknown_node u; 213 struct jffs2_unknown_node u;
174}; 214};
175 215
216/* Data payload for device nodes. */
217union jffs2_device_node {
218 jint16_t old;
219 jint32_t new;
220};
221
176#endif /* __LINUX_JFFS2_H__ */ 222#endif /* __LINUX_JFFS2_H__ */
diff --git a/include/linux/module.h b/include/linux/module.h
index eaec13ddd667..b0d44134f3c4 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -557,13 +557,4 @@ static inline void module_remove_driver(struct device_driver *driver)
557 557
558#define __MODULE_STRING(x) __stringify(x) 558#define __MODULE_STRING(x) __stringify(x)
559 559
560/* Use symbol_get and symbol_put instead. You'll thank me. */
561#define HAVE_INTER_MODULE
562extern void __deprecated inter_module_register(const char *,
563 struct module *, const void *);
564extern void __deprecated inter_module_unregister(const char *);
565extern const void * __deprecated inter_module_get_request(const char *,
566 const char *);
567extern void __deprecated inter_module_put(const char *);
568
569#endif /* _LINUX_MODULE_H */ 560#endif /* _LINUX_MODULE_H */
diff --git a/include/linux/mtd/inftl.h b/include/linux/mtd/inftl.h
index d7eaa40e5ab0..6977780e548f 100644
--- a/include/linux/mtd/inftl.h
+++ b/include/linux/mtd/inftl.h
@@ -46,7 +46,7 @@ struct INFTLrecord {
46 unsigned int nb_blocks; /* number of physical blocks */ 46 unsigned int nb_blocks; /* number of physical blocks */
47 unsigned int nb_boot_blocks; /* number of blocks used by the bios */ 47 unsigned int nb_boot_blocks; /* number of blocks used by the bios */
48 struct erase_info instr; 48 struct erase_info instr;
49 struct nand_oobinfo oobinfo; 49 struct nand_ecclayout oobinfo;
50}; 50};
51 51
52int INFTL_mount(struct INFTLrecord *s); 52int INFTL_mount(struct INFTLrecord *s);
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index b6f2fdae65c6..e1d2a3d56546 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -56,18 +56,69 @@ struct mtd_erase_region_info {
56 u_int32_t numblocks; /* Number of blocks of erasesize in this region */ 56 u_int32_t numblocks; /* Number of blocks of erasesize in this region */
57}; 57};
58 58
59/*
60 * oob operation modes
61 *
62 * MTD_OOB_PLACE: oob data are placed at the given offset
63 * MTD_OOB_AUTO: oob data are automatically placed at the free areas
64 * which are defined by the ecclayout
65 * MTD_OOB_RAW: mode to read raw data+oob in one chunk. The oob data
66 * is inserted into the data. Thats a raw image of the
67 * flash contents.
68 */
69typedef enum {
70 MTD_OOB_PLACE,
71 MTD_OOB_AUTO,
72 MTD_OOB_RAW,
73} mtd_oob_mode_t;
74
75/**
76 * struct mtd_oob_ops - oob operation operands
77 * @mode: operation mode
78 *
79 * @len: number of bytes to write/read. When a data buffer is given
80 * (datbuf != NULL) this is the number of data bytes. When
81 + no data buffer is available this is the number of oob bytes.
82 *
83 * @retlen: number of bytes written/read. When a data buffer is given
84 * (datbuf != NULL) this is the number of data bytes. When
85 + no data buffer is available this is the number of oob bytes.
86 *
87 * @ooblen: number of oob bytes per page
88 * @ooboffs: offset of oob data in the oob area (only relevant when
89 * mode = MTD_OOB_PLACE)
90 * @datbuf: data buffer - if NULL only oob data are read/written
91 * @oobbuf: oob data buffer
92 */
93struct mtd_oob_ops {
94 mtd_oob_mode_t mode;
95 size_t len;
96 size_t retlen;
97 size_t ooblen;
98 uint32_t ooboffs;
99 uint8_t *datbuf;
100 uint8_t *oobbuf;
101};
102
59struct mtd_info { 103struct mtd_info {
60 u_char type; 104 u_char type;
61 u_int32_t flags; 105 u_int32_t flags;
62 u_int32_t size; // Total size of the MTD 106 u_int32_t size; // Total size of the MTD
63 107
64 /* "Major" erase size for the device. Naïve users may take this 108 /* "Major" erase size for the device. Naïve users may take this
65 * to be the only erase size available, or may use the more detailed 109 * to be the only erase size available, or may use the more detailed
66 * information below if they desire 110 * information below if they desire
67 */ 111 */
68 u_int32_t erasesize; 112 u_int32_t erasesize;
113 /* Minimal writable flash unit size. In case of NOR flash it is 1 (even
114 * though individual bits can be cleared), in case of NAND flash it is
115 * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR
116 * it is of ECC block size, etc. It is illegal to have writesize = 0.
117 * Any driver registering a struct mtd_info must ensure a writesize of
118 * 1 or larger.
119 */
120 u_int32_t writesize;
69 121
70 u_int32_t oobblock; // Size of OOB blocks (e.g. 512)
71 u_int32_t oobsize; // Amount of OOB data per block (e.g. 16) 122 u_int32_t oobsize; // Amount of OOB data per block (e.g. 16)
72 u_int32_t ecctype; 123 u_int32_t ecctype;
73 u_int32_t eccsize; 124 u_int32_t eccsize;
@@ -79,7 +130,6 @@ struct mtd_info {
79 * MTD_PROGRAM_REGIONS flag is set. 130 * MTD_PROGRAM_REGIONS flag is set.
80 * (Maybe we should have an union for those?) 131 * (Maybe we should have an union for those?)
81 */ 132 */
82#define MTD_PROGREGION_SIZE(mtd) (mtd)->oobblock
83#define MTD_PROGREGION_CTRLMODE_VALID(mtd) (mtd)->oobsize 133#define MTD_PROGREGION_CTRLMODE_VALID(mtd) (mtd)->oobsize
84#define MTD_PROGREGION_CTRLMODE_INVALID(mtd) (mtd)->ecctype 134#define MTD_PROGREGION_CTRLMODE_INVALID(mtd) (mtd)->ecctype
85 135
@@ -87,9 +137,8 @@ struct mtd_info {
87 char *name; 137 char *name;
88 int index; 138 int index;
89 139
90 // oobinfo is a nand_oobinfo structure, which can be set by iotcl (MEMSETOOBINFO) 140 /* ecc layout structure pointer - read only ! */
91 struct nand_oobinfo oobinfo; 141 struct nand_ecclayout *ecclayout;
92 u_int32_t oobavail; // Number of bytes in OOB area available for fs
93 142
94 /* Data for variable erase regions. If numeraseregions is zero, 143 /* Data for variable erase regions. If numeraseregions is zero,
95 * it means that the whole device has erasesize as given above. 144 * it means that the whole device has erasesize as given above.
@@ -112,11 +161,10 @@ struct mtd_info {
112 int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); 161 int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
113 int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); 162 int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
114 163
115 int (*read_ecc) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel); 164 int (*read_oob) (struct mtd_info *mtd, loff_t from,
116 int (*write_ecc) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf, u_char *eccbuf, struct nand_oobinfo *oobsel); 165 struct mtd_oob_ops *ops);
117 166 int (*write_oob) (struct mtd_info *mtd, loff_t to,
118 int (*read_oob) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); 167 struct mtd_oob_ops *ops);
119 int (*write_oob) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
120 168
121 /* 169 /*
122 * Methods to access the protection register area, present in some 170 * Methods to access the protection register area, present in some
@@ -130,17 +178,11 @@ struct mtd_info {
130 int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); 178 int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
131 int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len); 179 int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, size_t len);
132 180
133 /* kvec-based read/write methods. We need these especially for NAND flash, 181 /* kvec-based read/write methods.
134 with its limited number of write cycles per erase.
135 NB: The 'count' parameter is the number of _vectors_, each of 182 NB: The 'count' parameter is the number of _vectors_, each of
136 which contains an (ofs, len) tuple. 183 which contains an (ofs, len) tuple.
137 */ 184 */
138 int (*readv) (struct mtd_info *mtd, struct kvec *vecs, unsigned long count, loff_t from, size_t *retlen);
139 int (*readv_ecc) (struct mtd_info *mtd, struct kvec *vecs, unsigned long count, loff_t from,
140 size_t *retlen, u_char *eccbuf, struct nand_oobinfo *oobsel);
141 int (*writev) (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen); 185 int (*writev) (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen);
142 int (*writev_ecc) (struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to,
143 size_t *retlen, u_char *eccbuf, struct nand_oobinfo *oobsel);
144 186
145 /* Sync */ 187 /* Sync */
146 void (*sync) (struct mtd_info *mtd); 188 void (*sync) (struct mtd_info *mtd);
@@ -159,6 +201,9 @@ struct mtd_info {
159 201
160 struct notifier_block reboot_notifier; /* default mode before reboot */ 202 struct notifier_block reboot_notifier; /* default mode before reboot */
161 203
204 /* ECC status information */
205 struct mtd_ecc_stats ecc_stats;
206
162 void *priv; 207 void *priv;
163 208
164 struct module *owner; 209 struct module *owner;
@@ -192,20 +237,6 @@ int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
192int default_mtd_readv(struct mtd_info *mtd, struct kvec *vecs, 237int default_mtd_readv(struct mtd_info *mtd, struct kvec *vecs,
193 unsigned long count, loff_t from, size_t *retlen); 238 unsigned long count, loff_t from, size_t *retlen);
194 239
195#define MTD_ERASE(mtd, args...) (*(mtd->erase))(mtd, args)
196#define MTD_POINT(mtd, a,b,c,d) (*(mtd->point))(mtd, a,b,c, (u_char **)(d))
197#define MTD_UNPOINT(mtd, arg) (*(mtd->unpoint))(mtd, (u_char *)arg)
198#define MTD_READ(mtd, args...) (*(mtd->read))(mtd, args)
199#define MTD_WRITE(mtd, args...) (*(mtd->write))(mtd, args)
200#define MTD_READV(mtd, args...) (*(mtd->readv))(mtd, args)
201#define MTD_WRITEV(mtd, args...) (*(mtd->writev))(mtd, args)
202#define MTD_READECC(mtd, args...) (*(mtd->read_ecc))(mtd, args)
203#define MTD_WRITEECC(mtd, args...) (*(mtd->write_ecc))(mtd, args)
204#define MTD_READOOB(mtd, args...) (*(mtd->read_oob))(mtd, args)
205#define MTD_WRITEOOB(mtd, args...) (*(mtd->write_oob))(mtd, args)
206#define MTD_SYNC(mtd) do { if (mtd->sync) (*(mtd->sync))(mtd); } while (0)
207
208
209#ifdef CONFIG_MTD_PARTITIONS 240#ifdef CONFIG_MTD_PARTITIONS
210void mtd_erase_callback(struct erase_info *instr); 241void mtd_erase_callback(struct erase_info *instr);
211#else 242#else
@@ -226,7 +257,7 @@ static inline void mtd_erase_callback(struct erase_info *instr)
226 257
227#ifdef CONFIG_MTD_DEBUG 258#ifdef CONFIG_MTD_DEBUG
228#define DEBUG(n, args...) \ 259#define DEBUG(n, args...) \
229 do { \ 260 do { \
230 if (n <= CONFIG_MTD_DEBUG_VERBOSE) \ 261 if (n <= CONFIG_MTD_DEBUG_VERBOSE) \
231 printk(KERN_INFO args); \ 262 printk(KERN_INFO args); \
232 } while(0) 263 } while(0)
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index da5e67b3fc70..a30969eb9afe 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -11,47 +11,11 @@
11 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 * 13 *
14 * Info: 14 * Info:
15 * Contains standard defines and IDs for NAND flash devices 15 * Contains standard defines and IDs for NAND flash devices
16 * 16 *
17 * Changelog: 17 * Changelog:
18 * 01-31-2000 DMW Created 18 * See git changelog.
19 * 09-18-2000 SJH Moved structure out of the Disk-On-Chip drivers
20 * so it can be used by other NAND flash device
21 * drivers. I also changed the copyright since none
22 * of the original contents of this file are specific
23 * to DoC devices. David can whack me with a baseball
24 * bat later if I did something naughty.
25 * 10-11-2000 SJH Added private NAND flash structure for driver
26 * 10-24-2000 SJH Added prototype for 'nand_scan' function
27 * 10-29-2001 TG changed nand_chip structure to support
28 * hardwarespecific function for accessing control lines
29 * 02-21-2002 TG added support for different read/write adress and
30 * ready/busy line access function
31 * 02-26-2002 TG added chip_delay to nand_chip structure to optimize
32 * command delay times for different chips
33 * 04-28-2002 TG OOB config defines moved from nand.c to avoid duplicate
34 * defines in jffs2/wbuf.c
35 * 08-07-2002 TG forced bad block location to byte 5 of OOB, even if
36 * CONFIG_MTD_NAND_ECC_JFFS2 is not set
37 * 08-10-2002 TG extensions to nand_chip structure to support HW-ECC
38 *
39 * 08-29-2002 tglx nand_chip structure: data_poi for selecting
40 * internal / fs-driver buffer
41 * support for 6byte/512byte hardware ECC
42 * read_ecc, write_ecc extended for different oob-layout
43 * oob layout selections: NAND_NONE_OOB, NAND_JFFS2_OOB,
44 * NAND_YAFFS_OOB
45 * 11-25-2002 tglx Added Manufacturer code FUJITSU, NATIONAL
46 * Split manufacturer and device ID structures
47 *
48 * 02-08-2004 tglx added option field to nand structure for chip anomalities
49 * 05-25-2004 tglx added bad block table support, ST-MICRO manufacturer id
50 * update of nand_chip structure description
51 * 01-17-2005 dmarlin added extended commands for AG-AND device and added option
52 * for BBT_AUTO_REFRESH.
53 * 01-20-2005 dmarlin added optional pointer to hardware specific callback for
54 * extra error status checks.
55 */ 19 */
56#ifndef __LINUX_MTD_NAND_H 20#ifndef __LINUX_MTD_NAND_H
57#define __LINUX_MTD_NAND_H 21#define __LINUX_MTD_NAND_H
@@ -67,10 +31,6 @@ extern int nand_scan (struct mtd_info *mtd, int max_chips);
67/* Free resources held by the NAND device */ 31/* Free resources held by the NAND device */
68extern void nand_release (struct mtd_info *mtd); 32extern void nand_release (struct mtd_info *mtd);
69 33
70/* Read raw data from the device without ECC */
71extern int nand_read_raw (struct mtd_info *mtd, uint8_t *buf, loff_t from, size_t len, size_t ooblen);
72
73
74/* The maximum number of NAND chips in an array */ 34/* The maximum number of NAND chips in an array */
75#define NAND_MAX_CHIPS 8 35#define NAND_MAX_CHIPS 8
76 36
@@ -79,44 +39,45 @@ extern int nand_read_raw (struct mtd_info *mtd, uint8_t *buf, loff_t from, size_
79 * adjust this accordingly. 39 * adjust this accordingly.
80 */ 40 */
81#define NAND_MAX_OOBSIZE 64 41#define NAND_MAX_OOBSIZE 64
42#define NAND_MAX_PAGESIZE 2048
82 43
83/* 44/*
84 * Constants for hardware specific CLE/ALE/NCE function 45 * Constants for hardware specific CLE/ALE/NCE function
85*/ 46 *
47 * These are bits which can be or'ed to set/clear multiple
48 * bits in one go.
49 */
86/* Select the chip by setting nCE to low */ 50/* Select the chip by setting nCE to low */
87#define NAND_CTL_SETNCE 1 51#define NAND_NCE 0x01
88/* Deselect the chip by setting nCE to high */
89#define NAND_CTL_CLRNCE 2
90/* Select the command latch by setting CLE to high */ 52/* Select the command latch by setting CLE to high */
91#define NAND_CTL_SETCLE 3 53#define NAND_CLE 0x02
92/* Deselect the command latch by setting CLE to low */
93#define NAND_CTL_CLRCLE 4
94/* Select the address latch by setting ALE to high */ 54/* Select the address latch by setting ALE to high */
95#define NAND_CTL_SETALE 5 55#define NAND_ALE 0x04
96/* Deselect the address latch by setting ALE to low */ 56
97#define NAND_CTL_CLRALE 6 57#define NAND_CTRL_CLE (NAND_NCE | NAND_CLE)
98/* Set write protection by setting WP to high. Not used! */ 58#define NAND_CTRL_ALE (NAND_NCE | NAND_ALE)
99#define NAND_CTL_SETWP 7 59#define NAND_CTRL_CHANGE 0x80
100/* Clear write protection by setting WP to low. Not used! */
101#define NAND_CTL_CLRWP 8
102 60
103/* 61/*
104 * Standard NAND flash commands 62 * Standard NAND flash commands
105 */ 63 */
106#define NAND_CMD_READ0 0 64#define NAND_CMD_READ0 0
107#define NAND_CMD_READ1 1 65#define NAND_CMD_READ1 1
66#define NAND_CMD_RNDOUT 5
108#define NAND_CMD_PAGEPROG 0x10 67#define NAND_CMD_PAGEPROG 0x10
109#define NAND_CMD_READOOB 0x50 68#define NAND_CMD_READOOB 0x50
110#define NAND_CMD_ERASE1 0x60 69#define NAND_CMD_ERASE1 0x60
111#define NAND_CMD_STATUS 0x70 70#define NAND_CMD_STATUS 0x70
112#define NAND_CMD_STATUS_MULTI 0x71 71#define NAND_CMD_STATUS_MULTI 0x71
113#define NAND_CMD_SEQIN 0x80 72#define NAND_CMD_SEQIN 0x80
73#define NAND_CMD_RNDIN 0x85
114#define NAND_CMD_READID 0x90 74#define NAND_CMD_READID 0x90
115#define NAND_CMD_ERASE2 0xd0 75#define NAND_CMD_ERASE2 0xd0
116#define NAND_CMD_RESET 0xff 76#define NAND_CMD_RESET 0xff
117 77
118/* Extended commands for large page devices */ 78/* Extended commands for large page devices */
119#define NAND_CMD_READSTART 0x30 79#define NAND_CMD_READSTART 0x30
80#define NAND_CMD_RNDOUTSTART 0xE0
120#define NAND_CMD_CACHEDPROG 0x15 81#define NAND_CMD_CACHEDPROG 0x15
121 82
122/* Extended commands for AG-AND device */ 83/* Extended commands for AG-AND device */
@@ -138,6 +99,8 @@ extern int nand_read_raw (struct mtd_info *mtd, uint8_t *buf, loff_t from, size_
138#define NAND_CMD_STATUS_RESET 0x7f 99#define NAND_CMD_STATUS_RESET 0x7f
139#define NAND_CMD_STATUS_CLEAR 0xff 100#define NAND_CMD_STATUS_CLEAR 0xff
140 101
102#define NAND_CMD_NONE -1
103
141/* Status bits */ 104/* Status bits */
142#define NAND_STATUS_FAIL 0x01 105#define NAND_STATUS_FAIL 0x01
143#define NAND_STATUS_FAIL_N1 0x02 106#define NAND_STATUS_FAIL_N1 0x02
@@ -148,21 +111,12 @@ extern int nand_read_raw (struct mtd_info *mtd, uint8_t *buf, loff_t from, size_
148/* 111/*
149 * Constants for ECC_MODES 112 * Constants for ECC_MODES
150 */ 113 */
151 114typedef enum {
152/* No ECC. Usage is not recommended ! */ 115 NAND_ECC_NONE,
153#define NAND_ECC_NONE 0 116 NAND_ECC_SOFT,
154/* Software ECC 3 byte ECC per 256 Byte data */ 117 NAND_ECC_HW,
155#define NAND_ECC_SOFT 1 118 NAND_ECC_HW_SYNDROME,
156/* Hardware ECC 3 byte ECC per 256 Byte data */ 119} nand_ecc_modes_t;
157#define NAND_ECC_HW3_256 2
158/* Hardware ECC 3 byte ECC per 512 Byte data */
159#define NAND_ECC_HW3_512 3
160/* Hardware ECC 3 byte ECC per 512 Byte data */
161#define NAND_ECC_HW6_512 4
162/* Hardware ECC 8 byte ECC per 512 Byte data */
163#define NAND_ECC_HW8_512 6
164/* Hardware ECC 12 byte ECC per 2048 Byte data */
165#define NAND_ECC_HW12_2048 7
166 120
167/* 121/*
168 * Constants for Hardware ECC 122 * Constants for Hardware ECC
@@ -201,6 +155,10 @@ extern int nand_read_raw (struct mtd_info *mtd, uint8_t *buf, loff_t from, size_
201 * bits from adjacent blocks from 'leaking' in altering data. 155 * bits from adjacent blocks from 'leaking' in altering data.
202 * This happens with the Renesas AG-AND chips, possibly others. */ 156 * This happens with the Renesas AG-AND chips, possibly others. */
203#define BBT_AUTO_REFRESH 0x00000080 157#define BBT_AUTO_REFRESH 0x00000080
158/* Chip does not require ready check on read. True
159 * for all large page devices, as they do not support
160 * autoincrement.*/
161#define NAND_NO_READRDY 0x00000100
204 162
205/* Options valid for Samsung large page devices */ 163/* Options valid for Samsung large page devices */
206#define NAND_SAMSUNG_LP_OPTIONS \ 164#define NAND_SAMSUNG_LP_OPTIONS \
@@ -219,18 +177,12 @@ extern int nand_read_raw (struct mtd_info *mtd, uint8_t *buf, loff_t from, size_
219/* Use a flash based bad block table. This option is passed to the 177/* Use a flash based bad block table. This option is passed to the
220 * default bad block table function. */ 178 * default bad block table function. */
221#define NAND_USE_FLASH_BBT 0x00010000 179#define NAND_USE_FLASH_BBT 0x00010000
222/* The hw ecc generator provides a syndrome instead a ecc value on read
223 * This can only work if we have the ecc bytes directly behind the
224 * data bytes. Applies for DOC and AG-AND Renesas HW Reed Solomon generators */
225#define NAND_HWECC_SYNDROME 0x00020000
226/* This option skips the bbt scan during initialization. */ 180/* This option skips the bbt scan during initialization. */
227#define NAND_SKIP_BBTSCAN 0x00040000 181#define NAND_SKIP_BBTSCAN 0x00020000
228 182
229/* Options set by nand scan */ 183/* Options set by nand scan */
230/* Nand scan has allocated oob_buf */ 184/* Nand scan has allocated controller struct */
231#define NAND_OOBBUF_ALLOC 0x40000000 185#define NAND_CONTROLLER_ALLOC 0x80000000
232/* Nand scan has allocated data_buf */
233#define NAND_DATABUF_ALLOC 0x80000000
234 186
235 187
236/* 188/*
@@ -264,45 +216,102 @@ struct nand_hw_control {
264}; 216};
265 217
266/** 218/**
219 * struct nand_ecc_ctrl - Control structure for ecc
220 * @mode: ecc mode
221 * @steps: number of ecc steps per page
222 * @size: data bytes per ecc step
223 * @bytes: ecc bytes per step
224 * @total: total number of ecc bytes per page
225 * @prepad: padding information for syndrome based ecc generators
226 * @postpad: padding information for syndrome based ecc generators
227 * @hwctl: function to control hardware ecc generator. Must only
228 * be provided if an hardware ECC is available
229 * @calculate: function for ecc calculation or readback from ecc hardware
230 * @correct: function for ecc correction, matching to ecc generator (sw/hw)
231 * @read_page: function to read a page according to the ecc generator requirements
232 * @write_page: function to write a page according to the ecc generator requirements
233 */
234struct nand_ecc_ctrl {
235 nand_ecc_modes_t mode;
236 int steps;
237 int size;
238 int bytes;
239 int total;
240 int prepad;
241 int postpad;
242 struct nand_ecclayout *layout;
243 void (*hwctl)(struct mtd_info *mtd, int mode);
244 int (*calculate)(struct mtd_info *mtd,
245 const uint8_t *dat,
246 uint8_t *ecc_code);
247 int (*correct)(struct mtd_info *mtd, uint8_t *dat,
248 uint8_t *read_ecc,
249 uint8_t *calc_ecc);
250 int (*read_page)(struct mtd_info *mtd,
251 struct nand_chip *chip,
252 uint8_t *buf);
253 void (*write_page)(struct mtd_info *mtd,
254 struct nand_chip *chip,
255 const uint8_t *buf);
256 int (*read_oob)(struct mtd_info *mtd,
257 struct nand_chip *chip,
258 int page,
259 int sndcmd);
260 int (*write_oob)(struct mtd_info *mtd,
261 struct nand_chip *chip,
262 int page);
263};
264
265/**
266 * struct nand_buffers - buffer structure for read/write
267 * @ecccalc: buffer for calculated ecc
268 * @ecccode: buffer for ecc read from flash
269 * @oobwbuf: buffer for write oob data
270 * @databuf: buffer for data - dynamically sized
271 * @oobrbuf: buffer to read oob data
272 *
273 * Do not change the order of buffers. databuf and oobrbuf must be in
274 * consecutive order.
275 */
276struct nand_buffers {
277 uint8_t ecccalc[NAND_MAX_OOBSIZE];
278 uint8_t ecccode[NAND_MAX_OOBSIZE];
279 uint8_t oobwbuf[NAND_MAX_OOBSIZE];
280 uint8_t databuf[NAND_MAX_PAGESIZE];
281 uint8_t oobrbuf[NAND_MAX_OOBSIZE];
282};
283
284/**
267 * struct nand_chip - NAND Private Flash Chip Data 285 * struct nand_chip - NAND Private Flash Chip Data
268 * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the flash device 286 * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the flash device
269 * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the flash device 287 * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the flash device
270 * @read_byte: [REPLACEABLE] read one byte from the chip 288 * @read_byte: [REPLACEABLE] read one byte from the chip
271 * @write_byte: [REPLACEABLE] write one byte to the chip
272 * @read_word: [REPLACEABLE] read one word from the chip 289 * @read_word: [REPLACEABLE] read one word from the chip
273 * @write_word: [REPLACEABLE] write one word to the chip
274 * @write_buf: [REPLACEABLE] write data from the buffer to the chip 290 * @write_buf: [REPLACEABLE] write data from the buffer to the chip
275 * @read_buf: [REPLACEABLE] read data from the chip into the buffer 291 * @read_buf: [REPLACEABLE] read data from the chip into the buffer
276 * @verify_buf: [REPLACEABLE] verify buffer contents against the chip data 292 * @verify_buf: [REPLACEABLE] verify buffer contents against the chip data
277 * @select_chip: [REPLACEABLE] select chip nr 293 * @select_chip: [REPLACEABLE] select chip nr
278 * @block_bad: [REPLACEABLE] check, if the block is bad 294 * @block_bad: [REPLACEABLE] check, if the block is bad
279 * @block_markbad: [REPLACEABLE] mark the block bad 295 * @block_markbad: [REPLACEABLE] mark the block bad
280 * @hwcontrol: [BOARDSPECIFIC] hardwarespecific function for accesing control-lines 296 * @cmd_ctrl: [BOARDSPECIFIC] hardwarespecific funtion for controlling
297 * ALE/CLE/nCE. Also used to write command and address
281 * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accesing device ready/busy line 298 * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accesing device ready/busy line
282 * If set to NULL no access to ready/busy is available and the ready/busy information 299 * If set to NULL no access to ready/busy is available and the ready/busy information
283 * is read from the chip status register 300 * is read from the chip status register
284 * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing commands to the chip 301 * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing commands to the chip
285 * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on ready 302 * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on ready
286 * @calculate_ecc: [REPLACEABLE] function for ecc calculation or readback from ecc hardware 303 * @ecc: [BOARDSPECIFIC] ecc control ctructure
287 * @correct_data: [REPLACEABLE] function for ecc correction, matching to ecc generator (sw/hw)
288 * @enable_hwecc: [BOARDSPECIFIC] function to enable (reset) hardware ecc generator. Must only
289 * be provided if a hardware ECC is available
290 * @erase_cmd: [INTERN] erase command write function, selectable due to AND support 304 * @erase_cmd: [INTERN] erase command write function, selectable due to AND support
291 * @scan_bbt: [REPLACEABLE] function to scan bad block table 305 * @scan_bbt: [REPLACEABLE] function to scan bad block table
292 * @eccmode: [BOARDSPECIFIC] mode of ecc, see defines
293 * @eccsize: [INTERN] databytes used per ecc-calculation
294 * @eccbytes: [INTERN] number of ecc bytes per ecc-calculation step
295 * @eccsteps: [INTERN] number of ecc calculation steps per page
296 * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR) 306 * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transfering data from array to read regs (tR)
297 * @chip_lock: [INTERN] spinlock used to protect access to this structure and the chip
298 * @wq: [INTERN] wait queue to sleep on if a NAND operation is in progress 307 * @wq: [INTERN] wait queue to sleep on if a NAND operation is in progress
299 * @state: [INTERN] the current state of the NAND device 308 * @state: [INTERN] the current state of the NAND device
300 * @page_shift: [INTERN] number of address bits in a page (column address bits) 309 * @page_shift: [INTERN] number of address bits in a page (column address bits)
301 * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock 310 * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock
302 * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry 311 * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
303 * @chip_shift: [INTERN] number of address bits in one chip 312 * @chip_shift: [INTERN] number of address bits in one chip
304 * @data_buf: [INTERN] internal buffer for one page + oob 313 * @datbuf: [INTERN] internal buffer for one page + oob
305 * @oob_buf: [INTERN] oob buffer for one eraseblock 314 * @oobbuf: [INTERN] oob buffer for one eraseblock
306 * @oobdirty: [INTERN] indicates that oob_buf must be reinitialized 315 * @oobdirty: [INTERN] indicates that oob_buf must be reinitialized
307 * @data_poi: [INTERN] pointer to a data buffer 316 * @data_poi: [INTERN] pointer to a data buffer
308 * @options: [BOARDSPECIFIC] various chip options. They can partly be set to inform nand_scan about 317 * @options: [BOARDSPECIFIC] various chip options. They can partly be set to inform nand_scan about
@@ -312,12 +321,13 @@ struct nand_hw_control {
312 * @chipsize: [INTERN] the size of one chip for multichip arrays 321 * @chipsize: [INTERN] the size of one chip for multichip arrays
313 * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1 322 * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
314 * @pagebuf: [INTERN] holds the pagenumber which is currently in data_buf 323 * @pagebuf: [INTERN] holds the pagenumber which is currently in data_buf
315 * @autooob: [REPLACEABLE] the default (auto)placement scheme 324 * @ecclayout: [REPLACEABLE] the default ecc placement scheme
316 * @bbt: [INTERN] bad block table pointer 325 * @bbt: [INTERN] bad block table pointer
317 * @bbt_td: [REPLACEABLE] bad block table descriptor for flash lookup 326 * @bbt_td: [REPLACEABLE] bad block table descriptor for flash lookup
318 * @bbt_md: [REPLACEABLE] bad block table mirror descriptor 327 * @bbt_md: [REPLACEABLE] bad block table mirror descriptor
319 * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial bad block scan 328 * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial bad block scan
320 * @controller: [OPTIONAL] a pointer to a hardware controller structure which is shared among multiple independend devices 329 * @controller: [REPLACEABLE] a pointer to a hardware controller structure
330 * which is shared among multiple independend devices
321 * @priv: [OPTIONAL] pointer to private chip date 331 * @priv: [OPTIONAL] pointer to private chip date
322 * @errstat: [OPTIONAL] hardware specific function to perform additional error status checks 332 * @errstat: [OPTIONAL] hardware specific function to perform additional error status checks
323 * (determine if errors are correctable) 333 * (determine if errors are correctable)
@@ -325,58 +335,57 @@ struct nand_hw_control {
325 335
326struct nand_chip { 336struct nand_chip {
327 void __iomem *IO_ADDR_R; 337 void __iomem *IO_ADDR_R;
328 void __iomem *IO_ADDR_W; 338 void __iomem *IO_ADDR_W;
329 339
330 u_char (*read_byte)(struct mtd_info *mtd); 340 uint8_t (*read_byte)(struct mtd_info *mtd);
331 void (*write_byte)(struct mtd_info *mtd, u_char byte);
332 u16 (*read_word)(struct mtd_info *mtd); 341 u16 (*read_word)(struct mtd_info *mtd);
333 void (*write_word)(struct mtd_info *mtd, u16 word); 342 void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
334 343 void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
335 void (*write_buf)(struct mtd_info *mtd, const u_char *buf, int len); 344 int (*verify_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
336 void (*read_buf)(struct mtd_info *mtd, u_char *buf, int len);
337 int (*verify_buf)(struct mtd_info *mtd, const u_char *buf, int len);
338 void (*select_chip)(struct mtd_info *mtd, int chip); 345 void (*select_chip)(struct mtd_info *mtd, int chip);
339 int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip); 346 int (*block_bad)(struct mtd_info *mtd, loff_t ofs, int getchip);
340 int (*block_markbad)(struct mtd_info *mtd, loff_t ofs); 347 int (*block_markbad)(struct mtd_info *mtd, loff_t ofs);
341 void (*hwcontrol)(struct mtd_info *mtd, int cmd); 348 void (*cmd_ctrl)(struct mtd_info *mtd, int dat,
342 int (*dev_ready)(struct mtd_info *mtd); 349 unsigned int ctrl);
343 void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column, int page_addr); 350 int (*dev_ready)(struct mtd_info *mtd);
344 int (*waitfunc)(struct mtd_info *mtd, struct nand_chip *this, int state); 351 void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column, int page_addr);
345 int (*calculate_ecc)(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code); 352 int (*waitfunc)(struct mtd_info *mtd, struct nand_chip *this);
346 int (*correct_data)(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
347 void (*enable_hwecc)(struct mtd_info *mtd, int mode);
348 void (*erase_cmd)(struct mtd_info *mtd, int page); 353 void (*erase_cmd)(struct mtd_info *mtd, int page);
349 int (*scan_bbt)(struct mtd_info *mtd); 354 int (*scan_bbt)(struct mtd_info *mtd);
350 int eccmode; 355 int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state, int status, int page);
351 int eccsize; 356
352 int eccbytes; 357 int chip_delay;
353 int eccsteps; 358 unsigned int options;
354 int chip_delay; 359
355 spinlock_t chip_lock; 360 int page_shift;
356 wait_queue_head_t wq;
357 nand_state_t state;
358 int page_shift;
359 int phys_erase_shift; 361 int phys_erase_shift;
360 int bbt_erase_shift; 362 int bbt_erase_shift;
361 int chip_shift; 363 int chip_shift;
362 u_char *data_buf;
363 u_char *oob_buf;
364 int oobdirty;
365 u_char *data_poi;
366 unsigned int options;
367 int badblockpos;
368 int numchips; 364 int numchips;
369 unsigned long chipsize; 365 unsigned long chipsize;
370 int pagemask; 366 int pagemask;
371 int pagebuf; 367 int pagebuf;
372 struct nand_oobinfo *autooob; 368 int badblockpos;
369
370 nand_state_t state;
371
372 uint8_t *oob_poi;
373 struct nand_hw_control *controller;
374 struct nand_ecclayout *ecclayout;
375
376 struct nand_ecc_ctrl ecc;
377 struct nand_buffers buffers;
378 struct nand_hw_control hwcontrol;
379
380 struct mtd_oob_ops ops;
381
373 uint8_t *bbt; 382 uint8_t *bbt;
374 struct nand_bbt_descr *bbt_td; 383 struct nand_bbt_descr *bbt_td;
375 struct nand_bbt_descr *bbt_md; 384 struct nand_bbt_descr *bbt_md;
385
376 struct nand_bbt_descr *badblock_pattern; 386 struct nand_bbt_descr *badblock_pattern;
377 struct nand_hw_control *controller; 387
378 void *priv; 388 void *priv;
379 int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state, int status, int page);
380}; 389};
381 390
382/* 391/*
@@ -388,19 +397,19 @@ struct nand_chip {
388#define NAND_MFR_NATIONAL 0x8f 397#define NAND_MFR_NATIONAL 0x8f
389#define NAND_MFR_RENESAS 0x07 398#define NAND_MFR_RENESAS 0x07
390#define NAND_MFR_STMICRO 0x20 399#define NAND_MFR_STMICRO 0x20
391#define NAND_MFR_HYNIX 0xad 400#define NAND_MFR_HYNIX 0xad
392 401
393/** 402/**
394 * struct nand_flash_dev - NAND Flash Device ID Structure 403 * struct nand_flash_dev - NAND Flash Device ID Structure
395 * 404 *
396 * @name: Identify the device type 405 * @name: Identify the device type
397 * @id: device ID code 406 * @id: device ID code
398 * @pagesize: Pagesize in bytes. Either 256 or 512 or 0 407 * @pagesize: Pagesize in bytes. Either 256 or 512 or 0
399 * If the pagesize is 0, then the real pagesize 408 * If the pagesize is 0, then the real pagesize
400 * and the eraseize are determined from the 409 * and the eraseize are determined from the
401 * extended id bytes in the chip 410 * extended id bytes in the chip
402 * @erasesize: Size of an erase block in the flash device. 411 * @erasesize: Size of an erase block in the flash device.
403 * @chipsize: Total chipsize in Mega Bytes 412 * @chipsize: Total chipsize in Mega Bytes
404 * @options: Bitfield to store chip relevant options 413 * @options: Bitfield to store chip relevant options
405 */ 414 */
406struct nand_flash_dev { 415struct nand_flash_dev {
@@ -415,7 +424,7 @@ struct nand_flash_dev {
415/** 424/**
416 * struct nand_manufacturers - NAND Flash Manufacturer ID Structure 425 * struct nand_manufacturers - NAND Flash Manufacturer ID Structure
417 * @name: Manufacturer name 426 * @name: Manufacturer name
418 * @id: manufacturer ID code of device. 427 * @id: manufacturer ID code of device.
419*/ 428*/
420struct nand_manufacturers { 429struct nand_manufacturers {
421 int id; 430 int id;
@@ -455,7 +464,7 @@ struct nand_bbt_descr {
455 int veroffs; 464 int veroffs;
456 uint8_t version[NAND_MAX_CHIPS]; 465 uint8_t version[NAND_MAX_CHIPS];
457 int len; 466 int len;
458 int maxblocks; 467 int maxblocks;
459 int reserved_block_code; 468 int reserved_block_code;
460 uint8_t *pattern; 469 uint8_t *pattern;
461}; 470};
@@ -494,14 +503,14 @@ struct nand_bbt_descr {
494/* The maximum number of blocks to scan for a bbt */ 503/* The maximum number of blocks to scan for a bbt */
495#define NAND_BBT_SCAN_MAXBLOCKS 4 504#define NAND_BBT_SCAN_MAXBLOCKS 4
496 505
497extern int nand_scan_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd); 506extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
498extern int nand_update_bbt (struct mtd_info *mtd, loff_t offs); 507extern int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
499extern int nand_default_bbt (struct mtd_info *mtd); 508extern int nand_default_bbt(struct mtd_info *mtd);
500extern int nand_isbad_bbt (struct mtd_info *mtd, loff_t offs, int allowbbt); 509extern int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
501extern int nand_erase_nand (struct mtd_info *mtd, struct erase_info *instr, int allowbbt); 510extern int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
502extern int nand_do_read_ecc (struct mtd_info *mtd, loff_t from, size_t len, 511 int allowbbt);
503 size_t * retlen, u_char * buf, u_char * oob_buf, 512extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
504 struct nand_oobinfo *oobsel, int flags); 513 size_t * retlen, uint8_t * buf);
505 514
506/* 515/*
507* Constants for oob configuration 516* Constants for oob configuration
@@ -509,4 +518,53 @@ extern int nand_do_read_ecc (struct mtd_info *mtd, loff_t from, size_t len,
509#define NAND_SMALL_BADBLOCK_POS 5 518#define NAND_SMALL_BADBLOCK_POS 5
510#define NAND_LARGE_BADBLOCK_POS 0 519#define NAND_LARGE_BADBLOCK_POS 0
511 520
521/**
522 * struct platform_nand_chip - chip level device structure
523 *
524 * @nr_chips: max. number of chips to scan for
525 * @chip_offs: chip number offset
526 * @nr_partitions: number of partitions pointed to by partitions (or zero)
527 * @partitions: mtd partition list
528 * @chip_delay: R/B delay value in us
529 * @options: Option flags, e.g. 16bit buswidth
530 * @ecclayout: ecc layout info structure
531 * @priv: hardware controller specific settings
532 */
533struct platform_nand_chip {
534 int nr_chips;
535 int chip_offset;
536 int nr_partitions;
537 struct mtd_partition *partitions;
538 struct nand_ecclayout *ecclayout;
539 int chip_delay;
540 unsigned int options;
541 void *priv;
542};
543
544/**
545 * struct platform_nand_ctrl - controller level device structure
546 *
547 * @hwcontrol: platform specific hardware control structure
548 * @dev_ready: platform specific function to read ready/busy pin
549 * @select_chip: platform specific chip select function
550 * @priv_data: private data to transport driver specific settings
551 *
552 * All fields are optional and depend on the hardware driver requirements
553 */
554struct platform_nand_ctrl {
555 void (*hwcontrol)(struct mtd_info *mtd, int cmd);
556 int (*dev_ready)(struct mtd_info *mtd);
557 void (*select_chip)(struct mtd_info *mtd, int chip);
558 void *priv;
559};
560
561/* Some helpers to access the data structures */
562static inline
563struct platform_nand_chip *get_platform_nandchip(struct mtd_info *mtd)
564{
565 struct nand_chip *chip = mtd->priv;
566
567 return chip->priv;
568}
569
512#endif /* __LINUX_MTD_NAND_H */ 570#endif /* __LINUX_MTD_NAND_H */
diff --git a/include/linux/mtd/ndfc.h b/include/linux/mtd/ndfc.h
new file mode 100644
index 000000000000..d0558a982628
--- /dev/null
+++ b/include/linux/mtd/ndfc.h
@@ -0,0 +1,67 @@
1/*
2 * linux/include/linux/mtd/ndfc.h
3 *
4 * Copyright (c) 2006 Thomas Gleixner <tglx@linutronix.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Info:
11 * Contains defines, datastructures for ndfc nand controller
12 *
13 */
14#ifndef __LINUX_MTD_NDFC_H
15#define __LINUX_MTD_NDFC_H
16
17/* NDFC Register definitions */
18#define NDFC_CMD 0x00
19#define NDFC_ALE 0x04
20#define NDFC_DATA 0x08
21#define NDFC_ECC 0x10
22#define NDFC_BCFG0 0x30
23#define NDFC_BCFG1 0x34
24#define NDFC_BCFG2 0x38
25#define NDFC_BCFG3 0x3c
26#define NDFC_CCR 0x40
27#define NDFC_STAT 0x44
28#define NDFC_HWCTL 0x48
29#define NDFC_REVID 0x50
30
31#define NDFC_STAT_IS_READY 0x01000000
32
33#define NDFC_CCR_RESET_CE 0x80000000 /* CE Reset */
34#define NDFC_CCR_RESET_ECC 0x40000000 /* ECC Reset */
35#define NDFC_CCR_RIE 0x20000000 /* Interrupt Enable on Device Rdy */
36#define NDFC_CCR_REN 0x10000000 /* Enable wait for Rdy in LinearR */
37#define NDFC_CCR_ROMEN 0x08000000 /* Enable ROM In LinearR */
38#define NDFC_CCR_ARE 0x04000000 /* Auto-Read Enable */
39#define NDFC_CCR_BS(x) (((x) & 0x3) << 24) /* Select Bank on CE[x] */
40#define NDFC_CCR_BS_MASK 0x03000000 /* Select Bank */
41#define NDFC_CCR_ARAC0 0x00000000 /* 3 Addr, 1 Col 2 Row 512b page */
42#define NDFC_CCR_ARAC1 0x00001000 /* 4 Addr, 1 Col 3 Row 512b page */
43#define NDFC_CCR_ARAC2 0x00002000 /* 4 Addr, 2 Col 2 Row 2K page */
44#define NDFC_CCR_ARAC3 0x00003000 /* 5 Addr, 2 Col 3 Row 2K page */
45#define NDFC_CCR_ARAC_MASK 0x00003000 /* Auto-Read mode Addr Cycles */
46#define NDFC_CCR_RPG 0x0000C000 /* Auto-Read Page */
47#define NDFC_CCR_EBCC 0x00000004 /* EBC Configuration Completed */
48#define NDFC_CCR_DHC 0x00000002 /* Direct Hardware Control Enable */
49
50#define NDFC_BxCFG_EN 0x80000000 /* Bank Enable */
51#define NDFC_BxCFG_CED 0x40000000 /* nCE Style */
52#define NDFC_BxCFG_SZ_MASK 0x08000000 /* Bank Size */
53#define NDFC_BxCFG_SZ_8BIT 0x00000000 /* 8bit */
54#define NDFC_BxCFG_SZ_16BIT 0x08000000 /* 16bit */
55
56#define NDFC_MAX_BANKS 4
57
58struct ndfc_controller_settings {
59 uint32_t ccr_settings;
60 uint64_t ndfc_erpn;
61};
62
63struct ndfc_chip_settings {
64 uint32_t bank_settings;
65};
66
67#endif
diff --git a/include/linux/mtd/nftl.h b/include/linux/mtd/nftl.h
index d35d2c21ff3e..bcf2fb3fa4a7 100644
--- a/include/linux/mtd/nftl.h
+++ b/include/linux/mtd/nftl.h
@@ -37,7 +37,7 @@ struct NFTLrecord {
37 unsigned int nb_blocks; /* number of physical blocks */ 37 unsigned int nb_blocks; /* number of physical blocks */
38 unsigned int nb_boot_blocks; /* number of blocks used by the bios */ 38 unsigned int nb_boot_blocks; /* number of blocks used by the bios */
39 struct erase_info instr; 39 struct erase_info instr;
40 struct nand_oobinfo oobinfo; 40 struct nand_ecclayout oobinfo;
41}; 41};
42 42
43int NFTL_mount(struct NFTLrecord *s); 43int NFTL_mount(struct NFTLrecord *s);
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 7419b5fab133..9ce9a48db444 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -35,6 +35,8 @@ typedef enum {
35 FL_SYNCING, 35 FL_SYNCING,
36 FL_UNLOCKING, 36 FL_UNLOCKING,
37 FL_LOCKING, 37 FL_LOCKING,
38 FL_RESETING,
39 FL_OTPING,
38 FL_PM_SUSPENDED, 40 FL_PM_SUSPENDED,
39} onenand_state_t; 41} onenand_state_t;
40 42
@@ -75,7 +77,7 @@ struct onenand_bufferram {
75 * @param chip_lock [INTERN] spinlock used to protect access to this structure and the chip 77 * @param chip_lock [INTERN] spinlock used to protect access to this structure and the chip
76 * @param wq [INTERN] wait queue to sleep on if a OneNAND operation is in progress 78 * @param wq [INTERN] wait queue to sleep on if a OneNAND operation is in progress
77 * @param state [INTERN] the current state of the OneNAND device 79 * @param state [INTERN] the current state of the OneNAND device
78 * @param autooob [REPLACEABLE] the default (auto)placement scheme 80 * @param ecclayout [REPLACEABLE] the default ecc placement scheme
79 * @param bbm [REPLACEABLE] pointer to Bad Block Management 81 * @param bbm [REPLACEABLE] pointer to Bad Block Management
80 * @param priv [OPTIONAL] pointer to private chip date 82 * @param priv [OPTIONAL] pointer to private chip date
81 */ 83 */
@@ -111,9 +113,9 @@ struct onenand_chip {
111 onenand_state_t state; 113 onenand_state_t state;
112 unsigned char *page_buf; 114 unsigned char *page_buf;
113 115
114 struct nand_oobinfo *autooob; 116 struct nand_ecclayout *ecclayout;
115 117
116 void *bbm; 118 void *bbm;
117 119
118 void *priv; 120 void *priv;
119}; 121};
@@ -130,6 +132,9 @@ struct onenand_chip {
130#define ONENAND_SET_SYS_CFG1(v, this) \ 132#define ONENAND_SET_SYS_CFG1(v, this) \
131 (this->write_word(v, this->base + ONENAND_REG_SYS_CFG1)) 133 (this->write_word(v, this->base + ONENAND_REG_SYS_CFG1))
132 134
135/* Check byte access in OneNAND */
136#define ONENAND_CHECK_BYTE_ACCESS(addr) (addr & 0x1)
137
133/* 138/*
134 * Options bits 139 * Options bits
135 */ 140 */
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index d7832ef8ed63..4a72818d2545 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -112,6 +112,7 @@
112#define ONENAND_CMD_LOCK_TIGHT (0x2C) 112#define ONENAND_CMD_LOCK_TIGHT (0x2C)
113#define ONENAND_CMD_ERASE (0x94) 113#define ONENAND_CMD_ERASE (0x94)
114#define ONENAND_CMD_RESET (0xF0) 114#define ONENAND_CMD_RESET (0xF0)
115#define ONENAND_CMD_OTP_ACCESS (0x65)
115#define ONENAND_CMD_READID (0x90) 116#define ONENAND_CMD_READID (0x90)
116 117
117/* NOTE: Those are not *REAL* commands */ 118/* NOTE: Those are not *REAL* commands */
@@ -152,6 +153,8 @@
152#define ONENAND_CTRL_ERASE (1 << 11) 153#define ONENAND_CTRL_ERASE (1 << 11)
153#define ONENAND_CTRL_ERROR (1 << 10) 154#define ONENAND_CTRL_ERROR (1 << 10)
154#define ONENAND_CTRL_RSTB (1 << 7) 155#define ONENAND_CTRL_RSTB (1 << 7)
156#define ONENAND_CTRL_OTP_L (1 << 6)
157#define ONENAND_CTRL_OTP_BL (1 << 5)
155 158
156/* 159/*
157 * Interrupt Status Register F241h (R) 160 * Interrupt Status Register F241h (R)
@@ -177,4 +180,9 @@
177#define ONENAND_ECC_2BIT (1 << 1) 180#define ONENAND_ECC_2BIT (1 << 1)
178#define ONENAND_ECC_2BIT_ALL (0xAAAA) 181#define ONENAND_ECC_2BIT_ALL (0xAAAA)
179 182
183/*
184 * One-Time Programmable (OTP)
185 */
186#define ONENAND_OTP_LOCK_OFFSET (14)
187
180#endif /* __ONENAND_REG_H */ 188#endif /* __ONENAND_REG_H */
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index b03f512d51b9..da6b3d6f12a7 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -41,7 +41,7 @@ struct mtd_partition {
41 u_int32_t size; /* partition size */ 41 u_int32_t size; /* partition size */
42 u_int32_t offset; /* offset within the master MTD space */ 42 u_int32_t offset; /* offset within the master MTD space */
43 u_int32_t mask_flags; /* master MTD flags to mask out for this partition */ 43 u_int32_t mask_flags; /* master MTD flags to mask out for this partition */
44 struct nand_oobinfo *oobsel; /* out of band layout for this partition (NAND only)*/ 44 struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/
45 struct mtd_info **mtdp; /* pointer to store the MTD object */ 45 struct mtd_info **mtdp; /* pointer to store the MTD object */
46}; 46};
47 47
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h
index c7b8bcdef013..86831e3594f6 100644
--- a/include/linux/mtd/physmap.h
+++ b/include/linux/mtd/physmap.h
@@ -15,33 +15,26 @@
15 */ 15 */
16 16
17#ifndef __LINUX_MTD_PHYSMAP__ 17#ifndef __LINUX_MTD_PHYSMAP__
18 18#define __LINUX_MTD_PHYSMAP__
19#include <linux/config.h>
20
21#if defined(CONFIG_MTD_PHYSMAP)
22 19
23#include <linux/mtd/mtd.h> 20#include <linux/mtd/mtd.h>
24#include <linux/mtd/map.h> 21#include <linux/mtd/map.h>
25#include <linux/mtd/partitions.h> 22#include <linux/mtd/partitions.h>
26 23
27/* 24struct physmap_flash_data {
28 * The map_info for physmap. Board can override size, buswidth, phys, 25 unsigned int width;
29 * (*set_vpp)(), etc in their initial setup routine. 26 void (*set_vpp)(struct map_info *, int);
30 */ 27 unsigned int nr_parts;
31extern struct map_info physmap_map; 28 struct mtd_partition *parts;
29};
32 30
33/* 31/*
34 * Board needs to specify the exact mapping during their setup time. 32 * Board needs to specify the exact mapping during their setup time.
35 */ 33 */
36static inline void physmap_configure(unsigned long addr, unsigned long size, int bankwidth, void (*set_vpp)(struct map_info *, int) ) 34void physmap_configure(unsigned long addr, unsigned long size,
37{ 35 int bankwidth, void (*set_vpp)(struct map_info *, int) );
38 physmap_map.phys = addr;
39 physmap_map.size = size;
40 physmap_map.bankwidth = bankwidth;
41 physmap_map.set_vpp = set_vpp;
42}
43 36
44#if defined(CONFIG_MTD_PARTITIONS) 37#ifdef CONFIG_MTD_PARTITIONS
45 38
46/* 39/*
47 * Machines that wish to do flash partition may want to call this function in 40 * Machines that wish to do flash partition may want to call this function in
@@ -55,7 +48,5 @@ static inline void physmap_configure(unsigned long addr, unsigned long size, int
55void physmap_set_partitions(struct mtd_partition *parts, int num_parts); 48void physmap_set_partitions(struct mtd_partition *parts, int num_parts);
56 49
57#endif /* defined(CONFIG_MTD_PARTITIONS) */ 50#endif /* defined(CONFIG_MTD_PARTITIONS) */
58#endif /* defined(CONFIG_MTD) */
59 51
60#endif /* __LINUX_MTD_PHYSMAP__ */ 52#endif /* __LINUX_MTD_PHYSMAP__ */
61
diff --git a/include/mtd/mtd-abi.h b/include/mtd/mtd-abi.h
index b5994ea56a5a..ee2afbaefe1b 100644
--- a/include/mtd/mtd-abi.h
+++ b/include/mtd/mtd-abi.h
@@ -28,28 +28,17 @@ struct mtd_oob_buf {
28#define MTD_ROM 2 28#define MTD_ROM 2
29#define MTD_NORFLASH 3 29#define MTD_NORFLASH 3
30#define MTD_NANDFLASH 4 30#define MTD_NANDFLASH 4
31#define MTD_PEROM 5
32#define MTD_DATAFLASH 6 31#define MTD_DATAFLASH 6
33#define MTD_OTHER 14 32
34#define MTD_UNKNOWN 15 33#define MTD_WRITEABLE 0x400 /* Device is writeable */
35 34#define MTD_BIT_WRITEABLE 0x800 /* Single bits can be flipped */
36#define MTD_CLEAR_BITS 1 // Bits can be cleared (flash) 35#define MTD_NO_ERASE 0x1000 /* No erase necessary */
37#define MTD_SET_BITS 2 // Bits can be set
38#define MTD_ERASEABLE 4 // Has an erase function
39#define MTD_WRITEB_WRITEABLE 8 // Direct IO is possible
40#define MTD_VOLATILE 16 // Set for RAMs
41#define MTD_XIP 32 // eXecute-In-Place possible
42#define MTD_OOB 64 // Out-of-band data (NAND flash)
43#define MTD_ECC 128 // Device capable of automatic ECC
44#define MTD_NO_VIRTBLOCKS 256 // Virtual blocks not allowed
45#define MTD_PROGRAM_REGIONS 512 // Configurable Programming Regions
46 36
47// Some common devices / combinations of capabilities 37// Some common devices / combinations of capabilities
48#define MTD_CAP_ROM 0 38#define MTD_CAP_ROM 0
49#define MTD_CAP_RAM (MTD_CLEAR_BITS|MTD_SET_BITS|MTD_WRITEB_WRITEABLE) 39#define MTD_CAP_RAM (MTD_WRITEABLE | MTD_BIT_WRITEABLE | MTD_NO_ERASE)
50#define MTD_CAP_NORFLASH (MTD_CLEAR_BITS|MTD_ERASEABLE) 40#define MTD_CAP_NORFLASH (MTD_WRITEABLE | MTD_BIT_WRITEABLE)
51#define MTD_CAP_NANDFLASH (MTD_CLEAR_BITS|MTD_ERASEABLE|MTD_OOB) 41#define MTD_CAP_NANDFLASH (MTD_WRITEABLE)
52#define MTD_WRITEABLE (MTD_CLEAR_BITS|MTD_SET_BITS)
53 42
54 43
55// Types of automatic ECC/Checksum available 44// Types of automatic ECC/Checksum available
@@ -74,7 +63,7 @@ struct mtd_info_user {
74 uint32_t flags; 63 uint32_t flags;
75 uint32_t size; // Total size of the MTD 64 uint32_t size; // Total size of the MTD
76 uint32_t erasesize; 65 uint32_t erasesize;
77 uint32_t oobblock; // Size of OOB blocks (e.g. 512) 66 uint32_t writesize;
78 uint32_t oobsize; // Amount of OOB data per block (e.g. 16) 67 uint32_t oobsize; // Amount of OOB data per block (e.g. 16)
79 uint32_t ecctype; 68 uint32_t ecctype;
80 uint32_t eccsize; 69 uint32_t eccsize;
@@ -94,12 +83,12 @@ struct otp_info {
94 uint32_t locked; 83 uint32_t locked;
95}; 84};
96 85
97#define MEMGETINFO _IOR('M', 1, struct mtd_info_user) 86#define MEMGETINFO _IOR('M', 1, struct mtd_info_user)
98#define MEMERASE _IOW('M', 2, struct erase_info_user) 87#define MEMERASE _IOW('M', 2, struct erase_info_user)
99#define MEMWRITEOOB _IOWR('M', 3, struct mtd_oob_buf) 88#define MEMWRITEOOB _IOWR('M', 3, struct mtd_oob_buf)
100#define MEMREADOOB _IOWR('M', 4, struct mtd_oob_buf) 89#define MEMREADOOB _IOWR('M', 4, struct mtd_oob_buf)
101#define MEMLOCK _IOW('M', 5, struct erase_info_user) 90#define MEMLOCK _IOW('M', 5, struct erase_info_user)
102#define MEMUNLOCK _IOW('M', 6, struct erase_info_user) 91#define MEMUNLOCK _IOW('M', 6, struct erase_info_user)
103#define MEMGETREGIONCOUNT _IOR('M', 7, int) 92#define MEMGETREGIONCOUNT _IOR('M', 7, int)
104#define MEMGETREGIONINFO _IOWR('M', 8, struct region_info_user) 93#define MEMGETREGIONINFO _IOWR('M', 8, struct region_info_user)
105#define MEMSETOOBSEL _IOW('M', 9, struct nand_oobinfo) 94#define MEMSETOOBSEL _IOW('M', 9, struct nand_oobinfo)
@@ -109,8 +98,15 @@ struct otp_info {
109#define OTPSELECT _IOR('M', 13, int) 98#define OTPSELECT _IOR('M', 13, int)
110#define OTPGETREGIONCOUNT _IOW('M', 14, int) 99#define OTPGETREGIONCOUNT _IOW('M', 14, int)
111#define OTPGETREGIONINFO _IOW('M', 15, struct otp_info) 100#define OTPGETREGIONINFO _IOW('M', 15, struct otp_info)
112#define OTPLOCK _IOR('M', 16, struct otp_info) 101#define OTPLOCK _IOR('M', 16, struct otp_info)
102#define ECCGETLAYOUT _IOR('M', 17, struct nand_ecclayout)
103#define ECCGETSTATS _IOR('M', 18, struct mtd_ecc_stats)
104#define MTDFILEMODE _IO('M', 19)
113 105
106/*
107 * Obsolete legacy interface. Keep it in order not to break userspace
108 * interfaces
109 */
114struct nand_oobinfo { 110struct nand_oobinfo {
115 uint32_t useecc; 111 uint32_t useecc;
116 uint32_t eccbytes; 112 uint32_t eccbytes;
@@ -118,4 +114,46 @@ struct nand_oobinfo {
118 uint32_t eccpos[32]; 114 uint32_t eccpos[32];
119}; 115};
120 116
117struct nand_oobfree {
118 uint32_t offset;
119 uint32_t length;
120};
121
122#define MTD_MAX_OOBFREE_ENTRIES 8
123/*
124 * ECC layout control structure. Exported to userspace for
125 * diagnosis and to allow creation of raw images
126 */
127struct nand_ecclayout {
128 uint32_t eccbytes;
129 uint32_t eccpos[64];
130 uint32_t oobavail;
131 struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES];
132};
133
134/**
135 * struct mtd_ecc_stats - error correction status
136 *
137 * @corrected: number of corrected bits
138 * @failed: number of uncorrectable errors
139 * @badblocks: number of bad blocks in this partition
140 * @bbtblocks: number of blocks reserved for bad block tables
141 */
142struct mtd_ecc_stats {
143 uint32_t corrected;
144 uint32_t failed;
145 uint32_t badblocks;
146 uint32_t bbtblocks;
147};
148
149/*
150 * Read/write file modes for access to MTD
151 */
152enum mtd_file_modes {
153 MTD_MODE_NORMAL = MTD_OTP_OFF,
154 MTD_MODE_OTP_FACTORY = MTD_OTP_FACTORY,
155 MTD_MODE_OTP_USER = MTD_OTP_USER,
156 MTD_MODE_RAW,
157};
158
121#endif /* __MTD_ABI_H__ */ 159#endif /* __MTD_ABI_H__ */
diff --git a/include/mtd/mtd-user.h b/include/mtd/mtd-user.h
index 1c13fc7161fe..713f34d3e62e 100644
--- a/include/mtd/mtd-user.h
+++ b/include/mtd/mtd-user.h
@@ -16,5 +16,6 @@ typedef struct mtd_info_user mtd_info_t;
16typedef struct erase_info_user erase_info_t; 16typedef struct erase_info_user erase_info_t;
17typedef struct region_info_user region_info_t; 17typedef struct region_info_user region_info_t;
18typedef struct nand_oobinfo nand_oobinfo_t; 18typedef struct nand_oobinfo nand_oobinfo_t;
19typedef struct nand_ecclayout nand_ecclayout_t;
19 20
20#endif /* __MTD_USER_H__ */ 21#endif /* __MTD_USER_H__ */
diff --git a/init/Kconfig b/init/Kconfig
index 3b36a1d53656..a7697787946a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -389,9 +389,6 @@ config SLOB
389 default !SLAB 389 default !SLAB
390 bool 390 bool
391 391
392config OBSOLETE_INTERMODULE
393 tristate
394
395menu "Loadable module support" 392menu "Loadable module support"
396 393
397config MODULES 394config MODULES
diff --git a/init/do_mounts.c b/init/do_mounts.c
index f4b7b9d278cd..21b3b8f33a72 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -409,6 +409,10 @@ void __init prepare_namespace(void)
409 409
410 if (saved_root_name[0]) { 410 if (saved_root_name[0]) {
411 root_device_name = saved_root_name; 411 root_device_name = saved_root_name;
412 if (!strncmp(root_device_name, "mtd", 3)) {
413 mount_block_root(root_device_name, root_mountflags);
414 goto out;
415 }
412 ROOT_DEV = name_to_dev_t(root_device_name); 416 ROOT_DEV = name_to_dev_t(root_device_name);
413 if (strncmp(root_device_name, "/dev/", 5) == 0) 417 if (strncmp(root_device_name, "/dev/", 5) == 0)
414 root_device_name += 5; 418 root_device_name += 5;
diff --git a/kernel/Makefile b/kernel/Makefile
index 58908f9d156a..f6ef00f4f90f 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -20,7 +20,6 @@ obj-$(CONFIG_SMP) += cpu.o spinlock.o
20obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o 20obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
21obj-$(CONFIG_UID16) += uid16.o 21obj-$(CONFIG_UID16) += uid16.o
22obj-$(CONFIG_MODULES) += module.o 22obj-$(CONFIG_MODULES) += module.o
23obj-$(CONFIG_OBSOLETE_INTERMODULE) += intermodule.o
24obj-$(CONFIG_KALLSYMS) += kallsyms.o 23obj-$(CONFIG_KALLSYMS) += kallsyms.o
25obj-$(CONFIG_PM) += power/ 24obj-$(CONFIG_PM) += power/
26obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o 25obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
diff --git a/kernel/intermodule.c b/kernel/intermodule.c
deleted file mode 100644
index 55b1e5b85db9..000000000000
--- a/kernel/intermodule.c
+++ /dev/null
@@ -1,184 +0,0 @@
1/* Deprecated, do not use. Moved from module.c to here. --RR */
2
3/* Written by Keith Owens <kaos@ocs.com.au> Oct 2000 */
4#include <linux/module.h>
5#include <linux/kmod.h>
6#include <linux/spinlock.h>
7#include <linux/list.h>
8#include <linux/slab.h>
9
10/* inter_module functions are always available, even when the kernel is
11 * compiled without modules. Consumers of inter_module_xxx routines
12 * will always work, even when both are built into the kernel, this
13 * approach removes lots of #ifdefs in mainline code.
14 */
15
16static struct list_head ime_list = LIST_HEAD_INIT(ime_list);
17static DEFINE_SPINLOCK(ime_lock);
18static int kmalloc_failed;
19
20struct inter_module_entry {
21 struct list_head list;
22 const char *im_name;
23 struct module *owner;
24 const void *userdata;
25};
26
27/**
28 * inter_module_register - register a new set of inter module data.
29 * @im_name: an arbitrary string to identify the data, must be unique
30 * @owner: module that is registering the data, always use THIS_MODULE
31 * @userdata: pointer to arbitrary userdata to be registered
32 *
33 * Description: Check that the im_name has not already been registered,
34 * complain if it has. For new data, add it to the inter_module_entry
35 * list.
36 */
37void inter_module_register(const char *im_name, struct module *owner, const void *userdata)
38{
39 struct list_head *tmp;
40 struct inter_module_entry *ime, *ime_new;
41
42 if (!(ime_new = kzalloc(sizeof(*ime), GFP_KERNEL))) {
43 /* Overloaded kernel, not fatal */
44 printk(KERN_ERR
45 "Aiee, inter_module_register: cannot kmalloc entry for '%s'\n",
46 im_name);
47 kmalloc_failed = 1;
48 return;
49 }
50 ime_new->im_name = im_name;
51 ime_new->owner = owner;
52 ime_new->userdata = userdata;
53
54 spin_lock(&ime_lock);
55 list_for_each(tmp, &ime_list) {
56 ime = list_entry(tmp, struct inter_module_entry, list);
57 if (strcmp(ime->im_name, im_name) == 0) {
58 spin_unlock(&ime_lock);
59 kfree(ime_new);
60 /* Program logic error, fatal */
61 printk(KERN_ERR "inter_module_register: duplicate im_name '%s'", im_name);
62 BUG();
63 }
64 }
65 list_add(&(ime_new->list), &ime_list);
66 spin_unlock(&ime_lock);
67}
68
69/**
70 * inter_module_unregister - unregister a set of inter module data.
71 * @im_name: an arbitrary string to identify the data, must be unique
72 *
73 * Description: Check that the im_name has been registered, complain if
74 * it has not. For existing data, remove it from the
75 * inter_module_entry list.
76 */
77void inter_module_unregister(const char *im_name)
78{
79 struct list_head *tmp;
80 struct inter_module_entry *ime;
81
82 spin_lock(&ime_lock);
83 list_for_each(tmp, &ime_list) {
84 ime = list_entry(tmp, struct inter_module_entry, list);
85 if (strcmp(ime->im_name, im_name) == 0) {
86 list_del(&(ime->list));
87 spin_unlock(&ime_lock);
88 kfree(ime);
89 return;
90 }
91 }
92 spin_unlock(&ime_lock);
93 if (kmalloc_failed) {
94 printk(KERN_ERR
95 "inter_module_unregister: no entry for '%s', "
96 "probably caused by previous kmalloc failure\n",
97 im_name);
98 return;
99 }
100 else {
101 /* Program logic error, fatal */
102 printk(KERN_ERR "inter_module_unregister: no entry for '%s'", im_name);
103 BUG();
104 }
105}
106
107/**
108 * inter_module_get - return arbitrary userdata from another module.
109 * @im_name: an arbitrary string to identify the data, must be unique
110 *
111 * Description: If the im_name has not been registered, return NULL.
112 * Try to increment the use count on the owning module, if that fails
113 * then return NULL. Otherwise return the userdata.
114 */
115static const void *inter_module_get(const char *im_name)
116{
117 struct list_head *tmp;
118 struct inter_module_entry *ime;
119 const void *result = NULL;
120
121 spin_lock(&ime_lock);
122 list_for_each(tmp, &ime_list) {
123 ime = list_entry(tmp, struct inter_module_entry, list);
124 if (strcmp(ime->im_name, im_name) == 0) {
125 if (try_module_get(ime->owner))
126 result = ime->userdata;
127 break;
128 }
129 }
130 spin_unlock(&ime_lock);
131 return(result);
132}
133
134/**
135 * inter_module_get_request - im get with automatic request_module.
136 * @im_name: an arbitrary string to identify the data, must be unique
137 * @modname: module that is expected to register im_name
138 *
139 * Description: If inter_module_get fails, do request_module then retry.
140 */
141const void *inter_module_get_request(const char *im_name, const char *modname)
142{
143 const void *result = inter_module_get(im_name);
144 if (!result) {
145 request_module("%s", modname);
146 result = inter_module_get(im_name);
147 }
148 return(result);
149}
150
151/**
152 * inter_module_put - release use of data from another module.
153 * @im_name: an arbitrary string to identify the data, must be unique
154 *
155 * Description: If the im_name has not been registered, complain,
156 * otherwise decrement the use count on the owning module.
157 */
158void inter_module_put(const char *im_name)
159{
160 struct list_head *tmp;
161 struct inter_module_entry *ime;
162
163 spin_lock(&ime_lock);
164 list_for_each(tmp, &ime_list) {
165 ime = list_entry(tmp, struct inter_module_entry, list);
166 if (strcmp(ime->im_name, im_name) == 0) {
167 if (ime->owner)
168 module_put(ime->owner);
169 spin_unlock(&ime_lock);
170 return;
171 }
172 }
173 spin_unlock(&ime_lock);
174 printk(KERN_ERR "inter_module_put: no entry for '%s'", im_name);
175 BUG();
176}
177
178EXPORT_SYMBOL(inter_module_register);
179EXPORT_SYMBOL(inter_module_unregister);
180EXPORT_SYMBOL(inter_module_get_request);
181EXPORT_SYMBOL(inter_module_put);
182
183MODULE_LICENSE("GPL");
184