diff options
152 files changed, 6063 insertions, 2493 deletions
diff --git a/Documentation/devicetree/bindings/mtd/fsmc-nand.txt b/Documentation/devicetree/bindings/mtd/fsmc-nand.txt new file mode 100644 index 000000000000..e2c663b354d2 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/fsmc-nand.txt | |||
@@ -0,0 +1,33 @@ | |||
1 | * FSMC NAND | ||
2 | |||
3 | Required properties: | ||
4 | - compatible : "st,spear600-fsmc-nand" | ||
5 | - reg : Address range of the mtd chip | ||
6 | - reg-names: Should contain the reg names "fsmc_regs" and "nand_data" | ||
7 | - st,ale-off : Chip specific offset to ALE | ||
8 | - st,cle-off : Chip specific offset to CLE | ||
9 | |||
10 | Optional properties: | ||
11 | - bank-width : Width (in bytes) of the device. If not present, the width | ||
12 | defaults to 1 byte | ||
13 | - nand-skip-bbtscan: Indicates the the BBT scanning should be skipped | ||
14 | |||
15 | Example: | ||
16 | |||
17 | fsmc: flash@d1800000 { | ||
18 | compatible = "st,spear600-fsmc-nand"; | ||
19 | #address-cells = <1>; | ||
20 | #size-cells = <1>; | ||
21 | reg = <0xd1800000 0x1000 /* FSMC Register */ | ||
22 | 0xd2000000 0x4000>; /* NAND Base */ | ||
23 | reg-names = "fsmc_regs", "nand_data"; | ||
24 | st,ale-off = <0x20000>; | ||
25 | st,cle-off = <0x10000>; | ||
26 | |||
27 | bank-width = <1>; | ||
28 | nand-skip-bbtscan; | ||
29 | |||
30 | partition@0 { | ||
31 | ... | ||
32 | }; | ||
33 | }; | ||
diff --git a/Documentation/devicetree/bindings/mtd/spear_smi.txt b/Documentation/devicetree/bindings/mtd/spear_smi.txt new file mode 100644 index 000000000000..7248aadd89e4 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/spear_smi.txt | |||
@@ -0,0 +1,31 @@ | |||
1 | * SPEAr SMI | ||
2 | |||
3 | Required properties: | ||
4 | - compatible : "st,spear600-smi" | ||
5 | - reg : Address range of the mtd chip | ||
6 | - #address-cells, #size-cells : Must be present if the device has sub-nodes | ||
7 | representing partitions. | ||
8 | - interrupt-parent: Should be the phandle for the interrupt controller | ||
9 | that services interrupts for this device | ||
10 | - interrupts: Should contain the STMMAC interrupts | ||
11 | - clock-rate : Functional clock rate of SMI in Hz | ||
12 | |||
13 | Optional properties: | ||
14 | - st,smi-fast-mode : Flash supports read in fast mode | ||
15 | |||
16 | Example: | ||
17 | |||
18 | smi: flash@fc000000 { | ||
19 | compatible = "st,spear600-smi"; | ||
20 | #address-cells = <1>; | ||
21 | #size-cells = <1>; | ||
22 | reg = <0xfc000000 0x1000>; | ||
23 | interrupt-parent = <&vic1>; | ||
24 | interrupts = <12>; | ||
25 | clock-rate = <50000000>; /* 50MHz */ | ||
26 | |||
27 | flash@f8000000 { | ||
28 | st,smi-fast-mode; | ||
29 | ... | ||
30 | }; | ||
31 | }; | ||
diff --git a/arch/arm/mach-omap1/flash.c b/arch/arm/mach-omap1/flash.c index f9bf78d4fdfb..401eb3c080c2 100644 --- a/arch/arm/mach-omap1/flash.c +++ b/arch/arm/mach-omap1/flash.c | |||
@@ -17,20 +17,12 @@ | |||
17 | 17 | ||
18 | void omap1_set_vpp(struct platform_device *pdev, int enable) | 18 | void omap1_set_vpp(struct platform_device *pdev, int enable) |
19 | { | 19 | { |
20 | static int count; | ||
21 | u32 l; | 20 | u32 l; |
22 | 21 | ||
23 | if (enable) { | 22 | l = omap_readl(EMIFS_CONFIG); |
24 | if (count++ == 0) { | 23 | if (enable) |
25 | l = omap_readl(EMIFS_CONFIG); | 24 | l |= OMAP_EMIFS_CONFIG_WP; |
26 | l |= OMAP_EMIFS_CONFIG_WP; | 25 | else |
27 | omap_writel(l, EMIFS_CONFIG); | 26 | l &= ~OMAP_EMIFS_CONFIG_WP; |
28 | } | 27 | omap_writel(l, EMIFS_CONFIG); |
29 | } else { | ||
30 | if (count && (--count == 0)) { | ||
31 | l = omap_readl(EMIFS_CONFIG); | ||
32 | l &= ~OMAP_EMIFS_CONFIG_WP; | ||
33 | omap_writel(l, EMIFS_CONFIG); | ||
34 | } | ||
35 | } | ||
36 | } | 28 | } |
diff --git a/arch/arm/mach-s3c24xx/simtec-nor.c b/arch/arm/mach-s3c24xx/simtec-nor.c index 2119ca6a73bc..b9d6d4f92c03 100644 --- a/arch/arm/mach-s3c24xx/simtec-nor.c +++ b/arch/arm/mach-s3c24xx/simtec-nor.c | |||
@@ -35,9 +35,7 @@ | |||
35 | static void simtec_nor_vpp(struct platform_device *pdev, int vpp) | 35 | static void simtec_nor_vpp(struct platform_device *pdev, int vpp) |
36 | { | 36 | { |
37 | unsigned int val; | 37 | unsigned int val; |
38 | unsigned long flags; | ||
39 | 38 | ||
40 | local_irq_save(flags); | ||
41 | val = __raw_readb(BAST_VA_CTRL3); | 39 | val = __raw_readb(BAST_VA_CTRL3); |
42 | 40 | ||
43 | printk(KERN_DEBUG "%s(%d)\n", __func__, vpp); | 41 | printk(KERN_DEBUG "%s(%d)\n", __func__, vpp); |
@@ -48,7 +46,6 @@ static void simtec_nor_vpp(struct platform_device *pdev, int vpp) | |||
48 | val &= ~BAST_CPLD_CTRL3_ROMWEN; | 46 | val &= ~BAST_CPLD_CTRL3_ROMWEN; |
49 | 47 | ||
50 | __raw_writeb(val, BAST_VA_CTRL3); | 48 | __raw_writeb(val, BAST_VA_CTRL3); |
51 | local_irq_restore(flags); | ||
52 | } | 49 | } |
53 | 50 | ||
54 | static struct physmap_flash_data simtec_nor_pdata = { | 51 | static struct physmap_flash_data simtec_nor_pdata = { |
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index a125d4e114ec..f49e28abe0ab 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/mtd/mtd.h> | 39 | #include <linux/mtd/mtd.h> |
40 | #include <linux/mtd/partitions.h> | 40 | #include <linux/mtd/partitions.h> |
41 | #include <linux/mtd/physmap.h> | 41 | #include <linux/mtd/physmap.h> |
42 | #include <linux/mtd/sh_flctl.h> | ||
42 | #include <linux/pm_clock.h> | 43 | #include <linux/pm_clock.h> |
43 | #include <linux/smsc911x.h> | 44 | #include <linux/smsc911x.h> |
44 | #include <linux/sh_intc.h> | 45 | #include <linux/sh_intc.h> |
@@ -956,6 +957,50 @@ static struct platform_device fsi_ak4643_device = { | |||
956 | }, | 957 | }, |
957 | }; | 958 | }; |
958 | 959 | ||
960 | /* FLCTL */ | ||
961 | static struct mtd_partition nand_partition_info[] = { | ||
962 | { | ||
963 | .name = "system", | ||
964 | .offset = 0, | ||
965 | .size = 128 * 1024 * 1024, | ||
966 | }, | ||
967 | { | ||
968 | .name = "userdata", | ||
969 | .offset = MTDPART_OFS_APPEND, | ||
970 | .size = 256 * 1024 * 1024, | ||
971 | }, | ||
972 | { | ||
973 | .name = "cache", | ||
974 | .offset = MTDPART_OFS_APPEND, | ||
975 | .size = 128 * 1024 * 1024, | ||
976 | }, | ||
977 | }; | ||
978 | |||
979 | static struct resource nand_flash_resources[] = { | ||
980 | [0] = { | ||
981 | .start = 0xe6a30000, | ||
982 | .end = 0xe6a3009b, | ||
983 | .flags = IORESOURCE_MEM, | ||
984 | } | ||
985 | }; | ||
986 | |||
987 | static struct sh_flctl_platform_data nand_flash_data = { | ||
988 | .parts = nand_partition_info, | ||
989 | .nr_parts = ARRAY_SIZE(nand_partition_info), | ||
990 | .flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | ||
991 | | SHBUSSEL | SEL_16BIT | SNAND_E, | ||
992 | .use_holden = 1, | ||
993 | }; | ||
994 | |||
995 | static struct platform_device nand_flash_device = { | ||
996 | .name = "sh_flctl", | ||
997 | .resource = nand_flash_resources, | ||
998 | .num_resources = ARRAY_SIZE(nand_flash_resources), | ||
999 | .dev = { | ||
1000 | .platform_data = &nand_flash_data, | ||
1001 | }, | ||
1002 | }; | ||
1003 | |||
959 | /* | 1004 | /* |
960 | * The card detect pin of the top SD/MMC slot (CN7) is active low and is | 1005 | * The card detect pin of the top SD/MMC slot (CN7) is active low and is |
961 | * connected to GPIO A22 of SH7372 (GPIO_PORT41). | 1006 | * connected to GPIO A22 of SH7372 (GPIO_PORT41). |
@@ -1259,6 +1304,7 @@ static struct platform_device *mackerel_devices[] __initdata = { | |||
1259 | &fsi_device, | 1304 | &fsi_device, |
1260 | &fsi_ak4643_device, | 1305 | &fsi_ak4643_device, |
1261 | &fsi_hdmi_device, | 1306 | &fsi_hdmi_device, |
1307 | &nand_flash_device, | ||
1262 | &sdhi0_device, | 1308 | &sdhi0_device, |
1263 | #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) | 1309 | #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) |
1264 | &sdhi1_device, | 1310 | &sdhi1_device, |
@@ -1488,6 +1534,30 @@ static void __init mackerel_init(void) | |||
1488 | gpio_request(GPIO_FN_MMCCMD0, NULL); | 1534 | gpio_request(GPIO_FN_MMCCMD0, NULL); |
1489 | gpio_request(GPIO_FN_MMCCLK0, NULL); | 1535 | gpio_request(GPIO_FN_MMCCLK0, NULL); |
1490 | 1536 | ||
1537 | /* FLCTL */ | ||
1538 | gpio_request(GPIO_FN_D0_NAF0, NULL); | ||
1539 | gpio_request(GPIO_FN_D1_NAF1, NULL); | ||
1540 | gpio_request(GPIO_FN_D2_NAF2, NULL); | ||
1541 | gpio_request(GPIO_FN_D3_NAF3, NULL); | ||
1542 | gpio_request(GPIO_FN_D4_NAF4, NULL); | ||
1543 | gpio_request(GPIO_FN_D5_NAF5, NULL); | ||
1544 | gpio_request(GPIO_FN_D6_NAF6, NULL); | ||
1545 | gpio_request(GPIO_FN_D7_NAF7, NULL); | ||
1546 | gpio_request(GPIO_FN_D8_NAF8, NULL); | ||
1547 | gpio_request(GPIO_FN_D9_NAF9, NULL); | ||
1548 | gpio_request(GPIO_FN_D10_NAF10, NULL); | ||
1549 | gpio_request(GPIO_FN_D11_NAF11, NULL); | ||
1550 | gpio_request(GPIO_FN_D12_NAF12, NULL); | ||
1551 | gpio_request(GPIO_FN_D13_NAF13, NULL); | ||
1552 | gpio_request(GPIO_FN_D14_NAF14, NULL); | ||
1553 | gpio_request(GPIO_FN_D15_NAF15, NULL); | ||
1554 | gpio_request(GPIO_FN_FCE0, NULL); | ||
1555 | gpio_request(GPIO_FN_WE0_FWE, NULL); | ||
1556 | gpio_request(GPIO_FN_FRB, NULL); | ||
1557 | gpio_request(GPIO_FN_A4_FOE, NULL); | ||
1558 | gpio_request(GPIO_FN_A5_FCDE, NULL); | ||
1559 | gpio_request(GPIO_FN_RD_FSC, NULL); | ||
1560 | |||
1491 | /* enable GPS module (GT-720F) */ | 1561 | /* enable GPS module (GT-720F) */ |
1492 | gpio_request(GPIO_FN_SCIFA2_TXD1, NULL); | 1562 | gpio_request(GPIO_FN_SCIFA2_TXD1, NULL); |
1493 | gpio_request(GPIO_FN_SCIFA2_RXD1, NULL); | 1563 | gpio_request(GPIO_FN_SCIFA2_RXD1, NULL); |
@@ -1532,6 +1602,7 @@ static void __init mackerel_init(void) | |||
1532 | sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device); | 1602 | sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device); |
1533 | sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs0_device); | 1603 | sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs0_device); |
1534 | sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs1_device); | 1604 | sh7372_add_device_to_domain(&sh7372_a3sp, &usbhs1_device); |
1605 | sh7372_add_device_to_domain(&sh7372_a3sp, &nand_flash_device); | ||
1535 | sh7372_add_device_to_domain(&sh7372_a3sp, &sh_mmcif_device); | 1606 | sh7372_add_device_to_domain(&sh7372_a3sp, &sh_mmcif_device); |
1536 | sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi0_device); | 1607 | sh7372_add_device_to_domain(&sh7372_a3sp, &sdhi0_device); |
1537 | #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) | 1608 | #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) |
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c index de243e3c8392..94d1f88246d3 100644 --- a/arch/arm/mach-shmobile/clock-sh7372.c +++ b/arch/arm/mach-shmobile/clock-sh7372.c | |||
@@ -511,7 +511,7 @@ enum { MSTP001, MSTP000, | |||
511 | MSTP223, | 511 | MSTP223, |
512 | MSTP218, MSTP217, MSTP216, MSTP214, MSTP208, MSTP207, | 512 | MSTP218, MSTP217, MSTP216, MSTP214, MSTP208, MSTP207, |
513 | MSTP206, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, | 513 | MSTP206, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, |
514 | MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312, | 514 | MSTP328, MSTP323, MSTP322, MSTP315, MSTP314, MSTP313, MSTP312, |
515 | MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP407, MSTP406, | 515 | MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP407, MSTP406, |
516 | MSTP405, MSTP404, MSTP403, MSTP400, | 516 | MSTP405, MSTP404, MSTP403, MSTP400, |
517 | MSTP_NR }; | 517 | MSTP_NR }; |
@@ -553,6 +553,7 @@ static struct clk mstp_clks[MSTP_NR] = { | |||
553 | [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSI2 */ | 553 | [MSTP328] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR3, 28, 0), /* FSI2 */ |
554 | [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */ | 554 | [MSTP323] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 23, 0), /* IIC1 */ |
555 | [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */ | 555 | [MSTP322] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 22, 0), /* USB0 */ |
556 | [MSTP315] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 15, 0), /* FLCTL*/ | ||
556 | [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */ | 557 | [MSTP314] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 14, 0), /* SDHI0 */ |
557 | [MSTP313] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 13, 0), /* SDHI1 */ | 558 | [MSTP313] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 13, 0), /* SDHI1 */ |
558 | [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMC */ | 559 | [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMC */ |
@@ -653,6 +654,7 @@ static struct clk_lookup lookups[] = { | |||
653 | CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */ | 654 | CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */ |
654 | CLKDEV_DEV_ID("r8a66597_udc.0", &mstp_clks[MSTP322]), /* USB0 */ | 655 | CLKDEV_DEV_ID("r8a66597_udc.0", &mstp_clks[MSTP322]), /* USB0 */ |
655 | CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[MSTP322]), /* USB0 */ | 656 | CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[MSTP322]), /* USB0 */ |
657 | CLKDEV_DEV_ID("sh_flctl.0", &mstp_clks[MSTP315]), /* FLCTL */ | ||
656 | CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */ | 658 | CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */ |
657 | CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */ | 659 | CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */ |
658 | CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMC */ | 660 | CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMC */ |
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c index 8b90c44d237f..1621ad07d284 100644 --- a/arch/arm/mach-u300/core.c +++ b/arch/arm/mach-u300/core.c | |||
@@ -1544,6 +1544,8 @@ static struct fsmc_nand_platform_data nand_platform_data = { | |||
1544 | .nr_partitions = ARRAY_SIZE(u300_partitions), | 1544 | .nr_partitions = ARRAY_SIZE(u300_partitions), |
1545 | .options = NAND_SKIP_BBTSCAN, | 1545 | .options = NAND_SKIP_BBTSCAN, |
1546 | .width = FSMC_NAND_BW8, | 1546 | .width = FSMC_NAND_BW8, |
1547 | .ale_off = PLAT_NAND_ALE, | ||
1548 | .cle_off = PLAT_NAND_CLE, | ||
1547 | }; | 1549 | }; |
1548 | 1550 | ||
1549 | static struct platform_device nand_device = { | 1551 | static struct platform_device nand_device = { |
diff --git a/arch/arm/mach-u300/include/mach/u300-regs.h b/arch/arm/mach-u300/include/mach/u300-regs.h index 7b7cba960b69..65f87c523892 100644 --- a/arch/arm/mach-u300/include/mach/u300-regs.h +++ b/arch/arm/mach-u300/include/mach/u300-regs.h | |||
@@ -24,6 +24,11 @@ | |||
24 | /* NFIF */ | 24 | /* NFIF */ |
25 | #define U300_NAND_IF_PHYS_BASE 0x9f800000 | 25 | #define U300_NAND_IF_PHYS_BASE 0x9f800000 |
26 | 26 | ||
27 | /* ALE, CLE offset for FSMC NAND */ | ||
28 | #define PLAT_NAND_CLE (1 << 16) | ||
29 | #define PLAT_NAND_ALE (1 << 17) | ||
30 | |||
31 | |||
27 | /* AHB Peripherals */ | 32 | /* AHB Peripherals */ |
28 | #define U300_AHB_PER_PHYS_BASE 0xa0000000 | 33 | #define U300_AHB_PER_PHYS_BASE 0xa0000000 |
29 | #define U300_AHB_PER_VIRT_BASE 0xff010000 | 34 | #define U300_AHB_PER_VIRT_BASE 0xff010000 |
diff --git a/arch/mips/cavium-octeon/flash_setup.c b/arch/mips/cavium-octeon/flash_setup.c index 0a430e06f5e5..e44a55bc7f0d 100644 --- a/arch/mips/cavium-octeon/flash_setup.c +++ b/arch/mips/cavium-octeon/flash_setup.c | |||
@@ -60,7 +60,7 @@ static int __init flash_init(void) | |||
60 | if (mymtd) { | 60 | if (mymtd) { |
61 | mymtd->owner = THIS_MODULE; | 61 | mymtd->owner = THIS_MODULE; |
62 | mtd_device_parse_register(mymtd, part_probe_types, | 62 | mtd_device_parse_register(mymtd, part_probe_types, |
63 | 0, NULL, 0); | 63 | NULL, NULL, 0); |
64 | } else { | 64 | } else { |
65 | pr_err("Failed to register MTD device for flash\n"); | 65 | pr_err("Failed to register MTD device for flash\n"); |
66 | } | 66 | } |
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 65334c49b71e..c81ef7e10e08 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c | |||
@@ -22,10 +22,10 @@ | |||
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/dmaengine.h> | 23 | #include <linux/dmaengine.h> |
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/fsl/mxs-dma.h> | ||
25 | 26 | ||
26 | #include <asm/irq.h> | 27 | #include <asm/irq.h> |
27 | #include <mach/mxs.h> | 28 | #include <mach/mxs.h> |
28 | #include <mach/dma.h> | ||
29 | #include <mach/common.h> | 29 | #include <mach/common.h> |
30 | 30 | ||
31 | #include "dmaengine.h" | 31 | #include "dmaengine.h" |
@@ -337,10 +337,32 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan) | |||
337 | clk_disable_unprepare(mxs_dma->clk); | 337 | clk_disable_unprepare(mxs_dma->clk); |
338 | } | 338 | } |
339 | 339 | ||
340 | /* | ||
341 | * How to use the flags for ->device_prep_slave_sg() : | ||
342 | * [1] If there is only one DMA command in the DMA chain, the code should be: | ||
343 | * ...... | ||
344 | * ->device_prep_slave_sg(DMA_CTRL_ACK); | ||
345 | * ...... | ||
346 | * [2] If there are two DMA commands in the DMA chain, the code should be | ||
347 | * ...... | ||
348 | * ->device_prep_slave_sg(0); | ||
349 | * ...... | ||
350 | * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
351 | * ...... | ||
352 | * [3] If there are more than two DMA commands in the DMA chain, the code | ||
353 | * should be: | ||
354 | * ...... | ||
355 | * ->device_prep_slave_sg(0); // First | ||
356 | * ...... | ||
357 | * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]); | ||
358 | * ...... | ||
359 | * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last | ||
360 | * ...... | ||
361 | */ | ||
340 | static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | 362 | static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( |
341 | struct dma_chan *chan, struct scatterlist *sgl, | 363 | struct dma_chan *chan, struct scatterlist *sgl, |
342 | unsigned int sg_len, enum dma_transfer_direction direction, | 364 | unsigned int sg_len, enum dma_transfer_direction direction, |
343 | unsigned long append, void *context) | 365 | unsigned long flags, void *context) |
344 | { | 366 | { |
345 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 367 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
346 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 368 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
@@ -348,6 +370,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
348 | struct scatterlist *sg; | 370 | struct scatterlist *sg; |
349 | int i, j; | 371 | int i, j; |
350 | u32 *pio; | 372 | u32 *pio; |
373 | bool append = flags & DMA_PREP_INTERRUPT; | ||
351 | int idx = append ? mxs_chan->desc_count : 0; | 374 | int idx = append ? mxs_chan->desc_count : 0; |
352 | 375 | ||
353 | if (mxs_chan->status == DMA_IN_PROGRESS && !append) | 376 | if (mxs_chan->status == DMA_IN_PROGRESS && !append) |
@@ -374,7 +397,6 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
374 | ccw->bits |= CCW_CHAIN; | 397 | ccw->bits |= CCW_CHAIN; |
375 | ccw->bits &= ~CCW_IRQ; | 398 | ccw->bits &= ~CCW_IRQ; |
376 | ccw->bits &= ~CCW_DEC_SEM; | 399 | ccw->bits &= ~CCW_DEC_SEM; |
377 | ccw->bits &= ~CCW_WAIT4END; | ||
378 | } else { | 400 | } else { |
379 | idx = 0; | 401 | idx = 0; |
380 | } | 402 | } |
@@ -389,7 +411,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
389 | ccw->bits = 0; | 411 | ccw->bits = 0; |
390 | ccw->bits |= CCW_IRQ; | 412 | ccw->bits |= CCW_IRQ; |
391 | ccw->bits |= CCW_DEC_SEM; | 413 | ccw->bits |= CCW_DEC_SEM; |
392 | ccw->bits |= CCW_WAIT4END; | 414 | if (flags & DMA_CTRL_ACK) |
415 | ccw->bits |= CCW_WAIT4END; | ||
393 | ccw->bits |= CCW_HALT_ON_TERM; | 416 | ccw->bits |= CCW_HALT_ON_TERM; |
394 | ccw->bits |= CCW_TERM_FLUSH; | 417 | ccw->bits |= CCW_TERM_FLUSH; |
395 | ccw->bits |= BF_CCW(sg_len, PIO_NUM); | 418 | ccw->bits |= BF_CCW(sg_len, PIO_NUM); |
@@ -420,7 +443,8 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | |||
420 | ccw->bits &= ~CCW_CHAIN; | 443 | ccw->bits &= ~CCW_CHAIN; |
421 | ccw->bits |= CCW_IRQ; | 444 | ccw->bits |= CCW_IRQ; |
422 | ccw->bits |= CCW_DEC_SEM; | 445 | ccw->bits |= CCW_DEC_SEM; |
423 | ccw->bits |= CCW_WAIT4END; | 446 | if (flags & DMA_CTRL_ACK) |
447 | ccw->bits |= CCW_WAIT4END; | ||
424 | } | 448 | } |
425 | } | 449 | } |
426 | } | 450 | } |
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index 65f36cf2ff33..b0f2ef988188 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c | |||
@@ -38,10 +38,10 @@ | |||
38 | #include <linux/gpio.h> | 38 | #include <linux/gpio.h> |
39 | #include <linux/regulator/consumer.h> | 39 | #include <linux/regulator/consumer.h> |
40 | #include <linux/module.h> | 40 | #include <linux/module.h> |
41 | #include <linux/fsl/mxs-dma.h> | ||
41 | 42 | ||
42 | #include <mach/mxs.h> | 43 | #include <mach/mxs.h> |
43 | #include <mach/common.h> | 44 | #include <mach/common.h> |
44 | #include <mach/dma.h> | ||
45 | #include <mach/mmc.h> | 45 | #include <mach/mmc.h> |
46 | 46 | ||
47 | #define DRIVER_NAME "mxs-mmc" | 47 | #define DRIVER_NAME "mxs-mmc" |
@@ -305,7 +305,7 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id) | |||
305 | } | 305 | } |
306 | 306 | ||
307 | static struct dma_async_tx_descriptor *mxs_mmc_prep_dma( | 307 | static struct dma_async_tx_descriptor *mxs_mmc_prep_dma( |
308 | struct mxs_mmc_host *host, unsigned int append) | 308 | struct mxs_mmc_host *host, unsigned long flags) |
309 | { | 309 | { |
310 | struct dma_async_tx_descriptor *desc; | 310 | struct dma_async_tx_descriptor *desc; |
311 | struct mmc_data *data = host->data; | 311 | struct mmc_data *data = host->data; |
@@ -325,7 +325,7 @@ static struct dma_async_tx_descriptor *mxs_mmc_prep_dma( | |||
325 | } | 325 | } |
326 | 326 | ||
327 | desc = dmaengine_prep_slave_sg(host->dmach, | 327 | desc = dmaengine_prep_slave_sg(host->dmach, |
328 | sgl, sg_len, host->slave_dirn, append); | 328 | sgl, sg_len, host->slave_dirn, flags); |
329 | if (desc) { | 329 | if (desc) { |
330 | desc->callback = mxs_mmc_dma_irq_callback; | 330 | desc->callback = mxs_mmc_dma_irq_callback; |
331 | desc->callback_param = host; | 331 | desc->callback_param = host; |
@@ -358,7 +358,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host) | |||
358 | host->ssp_pio_words[2] = cmd1; | 358 | host->ssp_pio_words[2] = cmd1; |
359 | host->dma_dir = DMA_NONE; | 359 | host->dma_dir = DMA_NONE; |
360 | host->slave_dirn = DMA_TRANS_NONE; | 360 | host->slave_dirn = DMA_TRANS_NONE; |
361 | desc = mxs_mmc_prep_dma(host, 0); | 361 | desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK); |
362 | if (!desc) | 362 | if (!desc) |
363 | goto out; | 363 | goto out; |
364 | 364 | ||
@@ -398,7 +398,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host) | |||
398 | host->ssp_pio_words[2] = cmd1; | 398 | host->ssp_pio_words[2] = cmd1; |
399 | host->dma_dir = DMA_NONE; | 399 | host->dma_dir = DMA_NONE; |
400 | host->slave_dirn = DMA_TRANS_NONE; | 400 | host->slave_dirn = DMA_TRANS_NONE; |
401 | desc = mxs_mmc_prep_dma(host, 0); | 401 | desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK); |
402 | if (!desc) | 402 | if (!desc) |
403 | goto out; | 403 | goto out; |
404 | 404 | ||
@@ -526,7 +526,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) | |||
526 | host->data = data; | 526 | host->data = data; |
527 | host->dma_dir = dma_data_dir; | 527 | host->dma_dir = dma_data_dir; |
528 | host->slave_dirn = slave_dirn; | 528 | host->slave_dirn = slave_dirn; |
529 | desc = mxs_mmc_prep_dma(host, 1); | 529 | desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
530 | if (!desc) | 530 | if (!desc) |
531 | goto out; | 531 | goto out; |
532 | 532 | ||
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index 284cf3433720..5760c1a4b3f6 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig | |||
@@ -304,9 +304,6 @@ config MTD_OOPS | |||
304 | buffer in a flash partition where it can be read back at some | 304 | buffer in a flash partition where it can be read back at some |
305 | later point. | 305 | later point. |
306 | 306 | ||
307 | To use, add console=ttyMTDx to the kernel command line, | ||
308 | where x is the MTD device number to use. | ||
309 | |||
310 | config MTD_SWAP | 307 | config MTD_SWAP |
311 | tristate "Swap on MTD device support" | 308 | tristate "Swap on MTD device support" |
312 | depends on MTD && SWAP | 309 | depends on MTD && SWAP |
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 9bcd1f415f43..dbbd2edfb812 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c | |||
@@ -87,7 +87,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private ** | |||
87 | 87 | ||
88 | static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, | 88 | static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, |
89 | size_t *retlen, void **virt, resource_size_t *phys); | 89 | size_t *retlen, void **virt, resource_size_t *phys); |
90 | static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len); | 90 | static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len); |
91 | 91 | ||
92 | static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode); | 92 | static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode); |
93 | static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); | 93 | static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); |
@@ -262,9 +262,9 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd) | |||
262 | static void fixup_use_point(struct mtd_info *mtd) | 262 | static void fixup_use_point(struct mtd_info *mtd) |
263 | { | 263 | { |
264 | struct map_info *map = mtd->priv; | 264 | struct map_info *map = mtd->priv; |
265 | if (!mtd->point && map_is_linear(map)) { | 265 | if (!mtd->_point && map_is_linear(map)) { |
266 | mtd->point = cfi_intelext_point; | 266 | mtd->_point = cfi_intelext_point; |
267 | mtd->unpoint = cfi_intelext_unpoint; | 267 | mtd->_unpoint = cfi_intelext_unpoint; |
268 | } | 268 | } |
269 | } | 269 | } |
270 | 270 | ||
@@ -274,8 +274,8 @@ static void fixup_use_write_buffers(struct mtd_info *mtd) | |||
274 | struct cfi_private *cfi = map->fldrv_priv; | 274 | struct cfi_private *cfi = map->fldrv_priv; |
275 | if (cfi->cfiq->BufWriteTimeoutTyp) { | 275 | if (cfi->cfiq->BufWriteTimeoutTyp) { |
276 | printk(KERN_INFO "Using buffer write method\n" ); | 276 | printk(KERN_INFO "Using buffer write method\n" ); |
277 | mtd->write = cfi_intelext_write_buffers; | 277 | mtd->_write = cfi_intelext_write_buffers; |
278 | mtd->writev = cfi_intelext_writev; | 278 | mtd->_writev = cfi_intelext_writev; |
279 | } | 279 | } |
280 | } | 280 | } |
281 | 281 | ||
@@ -443,15 +443,15 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary) | |||
443 | mtd->type = MTD_NORFLASH; | 443 | mtd->type = MTD_NORFLASH; |
444 | 444 | ||
445 | /* Fill in the default mtd operations */ | 445 | /* Fill in the default mtd operations */ |
446 | mtd->erase = cfi_intelext_erase_varsize; | 446 | mtd->_erase = cfi_intelext_erase_varsize; |
447 | mtd->read = cfi_intelext_read; | 447 | mtd->_read = cfi_intelext_read; |
448 | mtd->write = cfi_intelext_write_words; | 448 | mtd->_write = cfi_intelext_write_words; |
449 | mtd->sync = cfi_intelext_sync; | 449 | mtd->_sync = cfi_intelext_sync; |
450 | mtd->lock = cfi_intelext_lock; | 450 | mtd->_lock = cfi_intelext_lock; |
451 | mtd->unlock = cfi_intelext_unlock; | 451 | mtd->_unlock = cfi_intelext_unlock; |
452 | mtd->is_locked = cfi_intelext_is_locked; | 452 | mtd->_is_locked = cfi_intelext_is_locked; |
453 | mtd->suspend = cfi_intelext_suspend; | 453 | mtd->_suspend = cfi_intelext_suspend; |
454 | mtd->resume = cfi_intelext_resume; | 454 | mtd->_resume = cfi_intelext_resume; |
455 | mtd->flags = MTD_CAP_NORFLASH; | 455 | mtd->flags = MTD_CAP_NORFLASH; |
456 | mtd->name = map->name; | 456 | mtd->name = map->name; |
457 | mtd->writesize = 1; | 457 | mtd->writesize = 1; |
@@ -600,12 +600,12 @@ static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd) | |||
600 | } | 600 | } |
601 | 601 | ||
602 | #ifdef CONFIG_MTD_OTP | 602 | #ifdef CONFIG_MTD_OTP |
603 | mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg; | 603 | mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg; |
604 | mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg; | 604 | mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg; |
605 | mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg; | 605 | mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg; |
606 | mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg; | 606 | mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg; |
607 | mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info; | 607 | mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info; |
608 | mtd->get_user_prot_info = cfi_intelext_get_user_prot_info; | 608 | mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info; |
609 | #endif | 609 | #endif |
610 | 610 | ||
611 | /* This function has the potential to distort the reality | 611 | /* This function has the potential to distort the reality |
@@ -1017,8 +1017,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad | |||
1017 | case FL_READY: | 1017 | case FL_READY: |
1018 | case FL_STATUS: | 1018 | case FL_STATUS: |
1019 | case FL_JEDEC_QUERY: | 1019 | case FL_JEDEC_QUERY: |
1020 | /* We should really make set_vpp() count, rather than doing this */ | ||
1021 | DISABLE_VPP(map); | ||
1022 | break; | 1020 | break; |
1023 | default: | 1021 | default: |
1024 | printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate); | 1022 | printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate); |
@@ -1324,7 +1322,7 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len, | |||
1324 | int chipnum; | 1322 | int chipnum; |
1325 | int ret = 0; | 1323 | int ret = 0; |
1326 | 1324 | ||
1327 | if (!map->virt || (from + len > mtd->size)) | 1325 | if (!map->virt) |
1328 | return -EINVAL; | 1326 | return -EINVAL; |
1329 | 1327 | ||
1330 | /* Now lock the chip(s) to POINT state */ | 1328 | /* Now lock the chip(s) to POINT state */ |
@@ -1334,7 +1332,6 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len, | |||
1334 | ofs = from - (chipnum << cfi->chipshift); | 1332 | ofs = from - (chipnum << cfi->chipshift); |
1335 | 1333 | ||
1336 | *virt = map->virt + cfi->chips[chipnum].start + ofs; | 1334 | *virt = map->virt + cfi->chips[chipnum].start + ofs; |
1337 | *retlen = 0; | ||
1338 | if (phys) | 1335 | if (phys) |
1339 | *phys = map->phys + cfi->chips[chipnum].start + ofs; | 1336 | *phys = map->phys + cfi->chips[chipnum].start + ofs; |
1340 | 1337 | ||
@@ -1369,12 +1366,12 @@ static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len, | |||
1369 | return 0; | 1366 | return 0; |
1370 | } | 1367 | } |
1371 | 1368 | ||
1372 | static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | 1369 | static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len) |
1373 | { | 1370 | { |
1374 | struct map_info *map = mtd->priv; | 1371 | struct map_info *map = mtd->priv; |
1375 | struct cfi_private *cfi = map->fldrv_priv; | 1372 | struct cfi_private *cfi = map->fldrv_priv; |
1376 | unsigned long ofs; | 1373 | unsigned long ofs; |
1377 | int chipnum; | 1374 | int chipnum, err = 0; |
1378 | 1375 | ||
1379 | /* Now unlock the chip(s) POINT state */ | 1376 | /* Now unlock the chip(s) POINT state */ |
1380 | 1377 | ||
@@ -1382,7 +1379,7 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | |||
1382 | chipnum = (from >> cfi->chipshift); | 1379 | chipnum = (from >> cfi->chipshift); |
1383 | ofs = from - (chipnum << cfi->chipshift); | 1380 | ofs = from - (chipnum << cfi->chipshift); |
1384 | 1381 | ||
1385 | while (len) { | 1382 | while (len && !err) { |
1386 | unsigned long thislen; | 1383 | unsigned long thislen; |
1387 | struct flchip *chip; | 1384 | struct flchip *chip; |
1388 | 1385 | ||
@@ -1400,8 +1397,10 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | |||
1400 | chip->ref_point_counter--; | 1397 | chip->ref_point_counter--; |
1401 | if(chip->ref_point_counter == 0) | 1398 | if(chip->ref_point_counter == 0) |
1402 | chip->state = FL_READY; | 1399 | chip->state = FL_READY; |
1403 | } else | 1400 | } else { |
1404 | printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */ | 1401 | printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name); |
1402 | err = -EINVAL; | ||
1403 | } | ||
1405 | 1404 | ||
1406 | put_chip(map, chip, chip->start); | 1405 | put_chip(map, chip, chip->start); |
1407 | mutex_unlock(&chip->mutex); | 1406 | mutex_unlock(&chip->mutex); |
@@ -1410,6 +1409,8 @@ static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | |||
1410 | ofs = 0; | 1409 | ofs = 0; |
1411 | chipnum++; | 1410 | chipnum++; |
1412 | } | 1411 | } |
1412 | |||
1413 | return err; | ||
1413 | } | 1414 | } |
1414 | 1415 | ||
1415 | static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) | 1416 | static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) |
@@ -1456,8 +1457,6 @@ static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, siz | |||
1456 | chipnum = (from >> cfi->chipshift); | 1457 | chipnum = (from >> cfi->chipshift); |
1457 | ofs = from - (chipnum << cfi->chipshift); | 1458 | ofs = from - (chipnum << cfi->chipshift); |
1458 | 1459 | ||
1459 | *retlen = 0; | ||
1460 | |||
1461 | while (len) { | 1460 | while (len) { |
1462 | unsigned long thislen; | 1461 | unsigned long thislen; |
1463 | 1462 | ||
@@ -1551,7 +1550,8 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1551 | } | 1550 | } |
1552 | 1551 | ||
1553 | xip_enable(map, chip, adr); | 1552 | xip_enable(map, chip, adr); |
1554 | out: put_chip(map, chip, adr); | 1553 | out: DISABLE_VPP(map); |
1554 | put_chip(map, chip, adr); | ||
1555 | mutex_unlock(&chip->mutex); | 1555 | mutex_unlock(&chip->mutex); |
1556 | return ret; | 1556 | return ret; |
1557 | } | 1557 | } |
@@ -1565,10 +1565,6 @@ static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t le | |||
1565 | int chipnum; | 1565 | int chipnum; |
1566 | unsigned long ofs; | 1566 | unsigned long ofs; |
1567 | 1567 | ||
1568 | *retlen = 0; | ||
1569 | if (!len) | ||
1570 | return 0; | ||
1571 | |||
1572 | chipnum = to >> cfi->chipshift; | 1568 | chipnum = to >> cfi->chipshift; |
1573 | ofs = to - (chipnum << cfi->chipshift); | 1569 | ofs = to - (chipnum << cfi->chipshift); |
1574 | 1570 | ||
@@ -1794,7 +1790,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1794 | } | 1790 | } |
1795 | 1791 | ||
1796 | xip_enable(map, chip, cmd_adr); | 1792 | xip_enable(map, chip, cmd_adr); |
1797 | out: put_chip(map, chip, cmd_adr); | 1793 | out: DISABLE_VPP(map); |
1794 | put_chip(map, chip, cmd_adr); | ||
1798 | mutex_unlock(&chip->mutex); | 1795 | mutex_unlock(&chip->mutex); |
1799 | return ret; | 1796 | return ret; |
1800 | } | 1797 | } |
@@ -1813,7 +1810,6 @@ static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs, | |||
1813 | for (i = 0; i < count; i++) | 1810 | for (i = 0; i < count; i++) |
1814 | len += vecs[i].iov_len; | 1811 | len += vecs[i].iov_len; |
1815 | 1812 | ||
1816 | *retlen = 0; | ||
1817 | if (!len) | 1813 | if (!len) |
1818 | return 0; | 1814 | return 0; |
1819 | 1815 | ||
@@ -1932,6 +1928,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1932 | ret = -EIO; | 1928 | ret = -EIO; |
1933 | } else if (chipstatus & 0x20 && retries--) { | 1929 | } else if (chipstatus & 0x20 && retries--) { |
1934 | printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); | 1930 | printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); |
1931 | DISABLE_VPP(map); | ||
1935 | put_chip(map, chip, adr); | 1932 | put_chip(map, chip, adr); |
1936 | mutex_unlock(&chip->mutex); | 1933 | mutex_unlock(&chip->mutex); |
1937 | goto retry; | 1934 | goto retry; |
@@ -1944,7 +1941,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1944 | } | 1941 | } |
1945 | 1942 | ||
1946 | xip_enable(map, chip, adr); | 1943 | xip_enable(map, chip, adr); |
1947 | out: put_chip(map, chip, adr); | 1944 | out: DISABLE_VPP(map); |
1945 | put_chip(map, chip, adr); | ||
1948 | mutex_unlock(&chip->mutex); | 1946 | mutex_unlock(&chip->mutex); |
1949 | return ret; | 1947 | return ret; |
1950 | } | 1948 | } |
@@ -2086,7 +2084,8 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip | |||
2086 | } | 2084 | } |
2087 | 2085 | ||
2088 | xip_enable(map, chip, adr); | 2086 | xip_enable(map, chip, adr); |
2089 | out: put_chip(map, chip, adr); | 2087 | out: DISABLE_VPP(map); |
2088 | put_chip(map, chip, adr); | ||
2090 | mutex_unlock(&chip->mutex); | 2089 | mutex_unlock(&chip->mutex); |
2091 | return ret; | 2090 | return ret; |
2092 | } | 2091 | } |
@@ -2483,7 +2482,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd) | |||
2483 | allowed to. Or should we return -EAGAIN, because the upper layers | 2482 | allowed to. Or should we return -EAGAIN, because the upper layers |
2484 | ought to have already shut down anything which was using the device | 2483 | ought to have already shut down anything which was using the device |
2485 | anyway? The latter for now. */ | 2484 | anyway? The latter for now. */ |
2486 | printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate); | 2485 | printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state); |
2487 | ret = -EAGAIN; | 2486 | ret = -EAGAIN; |
2488 | case FL_PM_SUSPENDED: | 2487 | case FL_PM_SUSPENDED: |
2489 | break; | 2488 | break; |
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 8d70895a58d6..d02592e6a0f0 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c | |||
@@ -59,6 +59,9 @@ static void cfi_amdstd_resume (struct mtd_info *); | |||
59 | static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); | 59 | static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *); |
60 | static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); | 60 | static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); |
61 | 61 | ||
62 | static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, | ||
63 | size_t *retlen, const u_char *buf); | ||
64 | |||
62 | static void cfi_amdstd_destroy(struct mtd_info *); | 65 | static void cfi_amdstd_destroy(struct mtd_info *); |
63 | 66 | ||
64 | struct mtd_info *cfi_cmdset_0002(struct map_info *, int); | 67 | struct mtd_info *cfi_cmdset_0002(struct map_info *, int); |
@@ -189,7 +192,7 @@ static void fixup_use_write_buffers(struct mtd_info *mtd) | |||
189 | struct cfi_private *cfi = map->fldrv_priv; | 192 | struct cfi_private *cfi = map->fldrv_priv; |
190 | if (cfi->cfiq->BufWriteTimeoutTyp) { | 193 | if (cfi->cfiq->BufWriteTimeoutTyp) { |
191 | pr_debug("Using buffer write method\n" ); | 194 | pr_debug("Using buffer write method\n" ); |
192 | mtd->write = cfi_amdstd_write_buffers; | 195 | mtd->_write = cfi_amdstd_write_buffers; |
193 | } | 196 | } |
194 | } | 197 | } |
195 | 198 | ||
@@ -228,8 +231,8 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd) | |||
228 | static void fixup_use_secsi(struct mtd_info *mtd) | 231 | static void fixup_use_secsi(struct mtd_info *mtd) |
229 | { | 232 | { |
230 | /* Setup for chips with a secsi area */ | 233 | /* Setup for chips with a secsi area */ |
231 | mtd->read_user_prot_reg = cfi_amdstd_secsi_read; | 234 | mtd->_read_user_prot_reg = cfi_amdstd_secsi_read; |
232 | mtd->read_fact_prot_reg = cfi_amdstd_secsi_read; | 235 | mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read; |
233 | } | 236 | } |
234 | 237 | ||
235 | static void fixup_use_erase_chip(struct mtd_info *mtd) | 238 | static void fixup_use_erase_chip(struct mtd_info *mtd) |
@@ -238,7 +241,7 @@ static void fixup_use_erase_chip(struct mtd_info *mtd) | |||
238 | struct cfi_private *cfi = map->fldrv_priv; | 241 | struct cfi_private *cfi = map->fldrv_priv; |
239 | if ((cfi->cfiq->NumEraseRegions == 1) && | 242 | if ((cfi->cfiq->NumEraseRegions == 1) && |
240 | ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { | 243 | ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { |
241 | mtd->erase = cfi_amdstd_erase_chip; | 244 | mtd->_erase = cfi_amdstd_erase_chip; |
242 | } | 245 | } |
243 | 246 | ||
244 | } | 247 | } |
@@ -249,8 +252,8 @@ static void fixup_use_erase_chip(struct mtd_info *mtd) | |||
249 | */ | 252 | */ |
250 | static void fixup_use_atmel_lock(struct mtd_info *mtd) | 253 | static void fixup_use_atmel_lock(struct mtd_info *mtd) |
251 | { | 254 | { |
252 | mtd->lock = cfi_atmel_lock; | 255 | mtd->_lock = cfi_atmel_lock; |
253 | mtd->unlock = cfi_atmel_unlock; | 256 | mtd->_unlock = cfi_atmel_unlock; |
254 | mtd->flags |= MTD_POWERUP_LOCK; | 257 | mtd->flags |= MTD_POWERUP_LOCK; |
255 | } | 258 | } |
256 | 259 | ||
@@ -429,12 +432,12 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) | |||
429 | mtd->type = MTD_NORFLASH; | 432 | mtd->type = MTD_NORFLASH; |
430 | 433 | ||
431 | /* Fill in the default mtd operations */ | 434 | /* Fill in the default mtd operations */ |
432 | mtd->erase = cfi_amdstd_erase_varsize; | 435 | mtd->_erase = cfi_amdstd_erase_varsize; |
433 | mtd->write = cfi_amdstd_write_words; | 436 | mtd->_write = cfi_amdstd_write_words; |
434 | mtd->read = cfi_amdstd_read; | 437 | mtd->_read = cfi_amdstd_read; |
435 | mtd->sync = cfi_amdstd_sync; | 438 | mtd->_sync = cfi_amdstd_sync; |
436 | mtd->suspend = cfi_amdstd_suspend; | 439 | mtd->_suspend = cfi_amdstd_suspend; |
437 | mtd->resume = cfi_amdstd_resume; | 440 | mtd->_resume = cfi_amdstd_resume; |
438 | mtd->flags = MTD_CAP_NORFLASH; | 441 | mtd->flags = MTD_CAP_NORFLASH; |
439 | mtd->name = map->name; | 442 | mtd->name = map->name; |
440 | mtd->writesize = 1; | 443 | mtd->writesize = 1; |
@@ -443,6 +446,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) | |||
443 | pr_debug("MTD %s(): write buffer size %d\n", __func__, | 446 | pr_debug("MTD %s(): write buffer size %d\n", __func__, |
444 | mtd->writebufsize); | 447 | mtd->writebufsize); |
445 | 448 | ||
449 | mtd->_panic_write = cfi_amdstd_panic_write; | ||
446 | mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; | 450 | mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; |
447 | 451 | ||
448 | if (cfi->cfi_mode==CFI_MODE_CFI){ | 452 | if (cfi->cfi_mode==CFI_MODE_CFI){ |
@@ -770,8 +774,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad | |||
770 | 774 | ||
771 | case FL_READY: | 775 | case FL_READY: |
772 | case FL_STATUS: | 776 | case FL_STATUS: |
773 | /* We should really make set_vpp() count, rather than doing this */ | ||
774 | DISABLE_VPP(map); | ||
775 | break; | 777 | break; |
776 | default: | 778 | default: |
777 | printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); | 779 | printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); |
@@ -1013,13 +1015,9 @@ static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_ | |||
1013 | int ret = 0; | 1015 | int ret = 0; |
1014 | 1016 | ||
1015 | /* ofs: offset within the first chip that the first read should start */ | 1017 | /* ofs: offset within the first chip that the first read should start */ |
1016 | |||
1017 | chipnum = (from >> cfi->chipshift); | 1018 | chipnum = (from >> cfi->chipshift); |
1018 | ofs = from - (chipnum << cfi->chipshift); | 1019 | ofs = from - (chipnum << cfi->chipshift); |
1019 | 1020 | ||
1020 | |||
1021 | *retlen = 0; | ||
1022 | |||
1023 | while (len) { | 1021 | while (len) { |
1024 | unsigned long thislen; | 1022 | unsigned long thislen; |
1025 | 1023 | ||
@@ -1097,16 +1095,11 @@ static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, | |||
1097 | int chipnum; | 1095 | int chipnum; |
1098 | int ret = 0; | 1096 | int ret = 0; |
1099 | 1097 | ||
1100 | |||
1101 | /* ofs: offset within the first chip that the first read should start */ | 1098 | /* ofs: offset within the first chip that the first read should start */ |
1102 | |||
1103 | /* 8 secsi bytes per chip */ | 1099 | /* 8 secsi bytes per chip */ |
1104 | chipnum=from>>3; | 1100 | chipnum=from>>3; |
1105 | ofs=from & 7; | 1101 | ofs=from & 7; |
1106 | 1102 | ||
1107 | |||
1108 | *retlen = 0; | ||
1109 | |||
1110 | while (len) { | 1103 | while (len) { |
1111 | unsigned long thislen; | 1104 | unsigned long thislen; |
1112 | 1105 | ||
@@ -1234,6 +1227,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1234 | xip_enable(map, chip, adr); | 1227 | xip_enable(map, chip, adr); |
1235 | op_done: | 1228 | op_done: |
1236 | chip->state = FL_READY; | 1229 | chip->state = FL_READY; |
1230 | DISABLE_VPP(map); | ||
1237 | put_chip(map, chip, adr); | 1231 | put_chip(map, chip, adr); |
1238 | mutex_unlock(&chip->mutex); | 1232 | mutex_unlock(&chip->mutex); |
1239 | 1233 | ||
@@ -1251,10 +1245,6 @@ static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len, | |||
1251 | unsigned long ofs, chipstart; | 1245 | unsigned long ofs, chipstart; |
1252 | DECLARE_WAITQUEUE(wait, current); | 1246 | DECLARE_WAITQUEUE(wait, current); |
1253 | 1247 | ||
1254 | *retlen = 0; | ||
1255 | if (!len) | ||
1256 | return 0; | ||
1257 | |||
1258 | chipnum = to >> cfi->chipshift; | 1248 | chipnum = to >> cfi->chipshift; |
1259 | ofs = to - (chipnum << cfi->chipshift); | 1249 | ofs = to - (chipnum << cfi->chipshift); |
1260 | chipstart = cfi->chips[chipnum].start; | 1250 | chipstart = cfi->chips[chipnum].start; |
@@ -1476,6 +1466,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1476 | ret = -EIO; | 1466 | ret = -EIO; |
1477 | op_done: | 1467 | op_done: |
1478 | chip->state = FL_READY; | 1468 | chip->state = FL_READY; |
1469 | DISABLE_VPP(map); | ||
1479 | put_chip(map, chip, adr); | 1470 | put_chip(map, chip, adr); |
1480 | mutex_unlock(&chip->mutex); | 1471 | mutex_unlock(&chip->mutex); |
1481 | 1472 | ||
@@ -1493,10 +1484,6 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, | |||
1493 | int chipnum; | 1484 | int chipnum; |
1494 | unsigned long ofs; | 1485 | unsigned long ofs; |
1495 | 1486 | ||
1496 | *retlen = 0; | ||
1497 | if (!len) | ||
1498 | return 0; | ||
1499 | |||
1500 | chipnum = to >> cfi->chipshift; | 1487 | chipnum = to >> cfi->chipshift; |
1501 | ofs = to - (chipnum << cfi->chipshift); | 1488 | ofs = to - (chipnum << cfi->chipshift); |
1502 | 1489 | ||
@@ -1562,6 +1549,238 @@ static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, | |||
1562 | return 0; | 1549 | return 0; |
1563 | } | 1550 | } |
1564 | 1551 | ||
1552 | /* | ||
1553 | * Wait for the flash chip to become ready to write data | ||
1554 | * | ||
1555 | * This is only called during the panic_write() path. When panic_write() | ||
1556 | * is called, the kernel is in the process of a panic, and will soon be | ||
1557 | * dead. Therefore we don't take any locks, and attempt to get access | ||
1558 | * to the chip as soon as possible. | ||
1559 | */ | ||
1560 | static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip, | ||
1561 | unsigned long adr) | ||
1562 | { | ||
1563 | struct cfi_private *cfi = map->fldrv_priv; | ||
1564 | int retries = 10; | ||
1565 | int i; | ||
1566 | |||
1567 | /* | ||
1568 | * If the driver thinks the chip is idle, and no toggle bits | ||
1569 | * are changing, then the chip is actually idle for sure. | ||
1570 | */ | ||
1571 | if (chip->state == FL_READY && chip_ready(map, adr)) | ||
1572 | return 0; | ||
1573 | |||
1574 | /* | ||
1575 | * Try several times to reset the chip and then wait for it | ||
1576 | * to become idle. The upper limit of a few milliseconds of | ||
1577 | * delay isn't a big problem: the kernel is dying anyway. It | ||
1578 | * is more important to save the messages. | ||
1579 | */ | ||
1580 | while (retries > 0) { | ||
1581 | const unsigned long timeo = (HZ / 1000) + 1; | ||
1582 | |||
1583 | /* send the reset command */ | ||
1584 | map_write(map, CMD(0xF0), chip->start); | ||
1585 | |||
1586 | /* wait for the chip to become ready */ | ||
1587 | for (i = 0; i < jiffies_to_usecs(timeo); i++) { | ||
1588 | if (chip_ready(map, adr)) | ||
1589 | return 0; | ||
1590 | |||
1591 | udelay(1); | ||
1592 | } | ||
1593 | } | ||
1594 | |||
1595 | /* the chip never became ready */ | ||
1596 | return -EBUSY; | ||
1597 | } | ||
1598 | |||
1599 | /* | ||
1600 | * Write out one word of data to a single flash chip during a kernel panic | ||
1601 | * | ||
1602 | * This is only called during the panic_write() path. When panic_write() | ||
1603 | * is called, the kernel is in the process of a panic, and will soon be | ||
1604 | * dead. Therefore we don't take any locks, and attempt to get access | ||
1605 | * to the chip as soon as possible. | ||
1606 | * | ||
1607 | * The implementation of this routine is intentionally similar to | ||
1608 | * do_write_oneword(), in order to ease code maintenance. | ||
1609 | */ | ||
1610 | static int do_panic_write_oneword(struct map_info *map, struct flchip *chip, | ||
1611 | unsigned long adr, map_word datum) | ||
1612 | { | ||
1613 | const unsigned long uWriteTimeout = (HZ / 1000) + 1; | ||
1614 | struct cfi_private *cfi = map->fldrv_priv; | ||
1615 | int retry_cnt = 0; | ||
1616 | map_word oldd; | ||
1617 | int ret = 0; | ||
1618 | int i; | ||
1619 | |||
1620 | adr += chip->start; | ||
1621 | |||
1622 | ret = cfi_amdstd_panic_wait(map, chip, adr); | ||
1623 | if (ret) | ||
1624 | return ret; | ||
1625 | |||
1626 | pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n", | ||
1627 | __func__, adr, datum.x[0]); | ||
1628 | |||
1629 | /* | ||
1630 | * Check for a NOP for the case when the datum to write is already | ||
1631 | * present - it saves time and works around buggy chips that corrupt | ||
1632 | * data at other locations when 0xff is written to a location that | ||
1633 | * already contains 0xff. | ||
1634 | */ | ||
1635 | oldd = map_read(map, adr); | ||
1636 | if (map_word_equal(map, oldd, datum)) { | ||
1637 | pr_debug("MTD %s(): NOP\n", __func__); | ||
1638 | goto op_done; | ||
1639 | } | ||
1640 | |||
1641 | ENABLE_VPP(map); | ||
1642 | |||
1643 | retry: | ||
1644 | cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
1645 | cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); | ||
1646 | cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); | ||
1647 | map_write(map, datum, adr); | ||
1648 | |||
1649 | for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) { | ||
1650 | if (chip_ready(map, adr)) | ||
1651 | break; | ||
1652 | |||
1653 | udelay(1); | ||
1654 | } | ||
1655 | |||
1656 | if (!chip_good(map, adr, datum)) { | ||
1657 | /* reset on all failures. */ | ||
1658 | map_write(map, CMD(0xF0), chip->start); | ||
1659 | /* FIXME - should have reset delay before continuing */ | ||
1660 | |||
1661 | if (++retry_cnt <= MAX_WORD_RETRIES) | ||
1662 | goto retry; | ||
1663 | |||
1664 | ret = -EIO; | ||
1665 | } | ||
1666 | |||
1667 | op_done: | ||
1668 | DISABLE_VPP(map); | ||
1669 | return ret; | ||
1670 | } | ||
1671 | |||
1672 | /* | ||
1673 | * Write out some data during a kernel panic | ||
1674 | * | ||
1675 | * This is used by the mtdoops driver to save the dying messages from a | ||
1676 | * kernel which has panic'd. | ||
1677 | * | ||
1678 | * This routine ignores all of the locking used throughout the rest of the | ||
1679 | * driver, in order to ensure that the data gets written out no matter what | ||
1680 | * state this driver (and the flash chip itself) was in when the kernel crashed. | ||
1681 | * | ||
1682 | * The implementation of this routine is intentionally similar to | ||
1683 | * cfi_amdstd_write_words(), in order to ease code maintenance. | ||
1684 | */ | ||
1685 | static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, | ||
1686 | size_t *retlen, const u_char *buf) | ||
1687 | { | ||
1688 | struct map_info *map = mtd->priv; | ||
1689 | struct cfi_private *cfi = map->fldrv_priv; | ||
1690 | unsigned long ofs, chipstart; | ||
1691 | int ret = 0; | ||
1692 | int chipnum; | ||
1693 | |||
1694 | chipnum = to >> cfi->chipshift; | ||
1695 | ofs = to - (chipnum << cfi->chipshift); | ||
1696 | chipstart = cfi->chips[chipnum].start; | ||
1697 | |||
1698 | /* If it's not bus aligned, do the first byte write */ | ||
1699 | if (ofs & (map_bankwidth(map) - 1)) { | ||
1700 | unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1); | ||
1701 | int i = ofs - bus_ofs; | ||
1702 | int n = 0; | ||
1703 | map_word tmp_buf; | ||
1704 | |||
1705 | ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs); | ||
1706 | if (ret) | ||
1707 | return ret; | ||
1708 | |||
1709 | /* Load 'tmp_buf' with old contents of flash */ | ||
1710 | tmp_buf = map_read(map, bus_ofs + chipstart); | ||
1711 | |||
1712 | /* Number of bytes to copy from buffer */ | ||
1713 | n = min_t(int, len, map_bankwidth(map) - i); | ||
1714 | |||
1715 | tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); | ||
1716 | |||
1717 | ret = do_panic_write_oneword(map, &cfi->chips[chipnum], | ||
1718 | bus_ofs, tmp_buf); | ||
1719 | if (ret) | ||
1720 | return ret; | ||
1721 | |||
1722 | ofs += n; | ||
1723 | buf += n; | ||
1724 | (*retlen) += n; | ||
1725 | len -= n; | ||
1726 | |||
1727 | if (ofs >> cfi->chipshift) { | ||
1728 | chipnum++; | ||
1729 | ofs = 0; | ||
1730 | if (chipnum == cfi->numchips) | ||
1731 | return 0; | ||
1732 | } | ||
1733 | } | ||
1734 | |||
1735 | /* We are now aligned, write as much as possible */ | ||
1736 | while (len >= map_bankwidth(map)) { | ||
1737 | map_word datum; | ||
1738 | |||
1739 | datum = map_word_load(map, buf); | ||
1740 | |||
1741 | ret = do_panic_write_oneword(map, &cfi->chips[chipnum], | ||
1742 | ofs, datum); | ||
1743 | if (ret) | ||
1744 | return ret; | ||
1745 | |||
1746 | ofs += map_bankwidth(map); | ||
1747 | buf += map_bankwidth(map); | ||
1748 | (*retlen) += map_bankwidth(map); | ||
1749 | len -= map_bankwidth(map); | ||
1750 | |||
1751 | if (ofs >> cfi->chipshift) { | ||
1752 | chipnum++; | ||
1753 | ofs = 0; | ||
1754 | if (chipnum == cfi->numchips) | ||
1755 | return 0; | ||
1756 | |||
1757 | chipstart = cfi->chips[chipnum].start; | ||
1758 | } | ||
1759 | } | ||
1760 | |||
1761 | /* Write the trailing bytes if any */ | ||
1762 | if (len & (map_bankwidth(map) - 1)) { | ||
1763 | map_word tmp_buf; | ||
1764 | |||
1765 | ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs); | ||
1766 | if (ret) | ||
1767 | return ret; | ||
1768 | |||
1769 | tmp_buf = map_read(map, ofs + chipstart); | ||
1770 | |||
1771 | tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); | ||
1772 | |||
1773 | ret = do_panic_write_oneword(map, &cfi->chips[chipnum], | ||
1774 | ofs, tmp_buf); | ||
1775 | if (ret) | ||
1776 | return ret; | ||
1777 | |||
1778 | (*retlen) += len; | ||
1779 | } | ||
1780 | |||
1781 | return 0; | ||
1782 | } | ||
1783 | |||
1565 | 1784 | ||
1566 | /* | 1785 | /* |
1567 | * Handle devices with one erase region, that only implement | 1786 | * Handle devices with one erase region, that only implement |
@@ -1649,6 +1868,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) | |||
1649 | 1868 | ||
1650 | chip->state = FL_READY; | 1869 | chip->state = FL_READY; |
1651 | xip_enable(map, chip, adr); | 1870 | xip_enable(map, chip, adr); |
1871 | DISABLE_VPP(map); | ||
1652 | put_chip(map, chip, adr); | 1872 | put_chip(map, chip, adr); |
1653 | mutex_unlock(&chip->mutex); | 1873 | mutex_unlock(&chip->mutex); |
1654 | 1874 | ||
@@ -1739,6 +1959,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1739 | } | 1959 | } |
1740 | 1960 | ||
1741 | chip->state = FL_READY; | 1961 | chip->state = FL_READY; |
1962 | DISABLE_VPP(map); | ||
1742 | put_chip(map, chip, adr); | 1963 | put_chip(map, chip, adr); |
1743 | mutex_unlock(&chip->mutex); | 1964 | mutex_unlock(&chip->mutex); |
1744 | return ret; | 1965 | return ret; |
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c index 85e80180b65b..096993f9711e 100644 --- a/drivers/mtd/chips/cfi_cmdset_0020.c +++ b/drivers/mtd/chips/cfi_cmdset_0020.c | |||
@@ -228,15 +228,15 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map) | |||
228 | } | 228 | } |
229 | 229 | ||
230 | /* Also select the correct geometry setup too */ | 230 | /* Also select the correct geometry setup too */ |
231 | mtd->erase = cfi_staa_erase_varsize; | 231 | mtd->_erase = cfi_staa_erase_varsize; |
232 | mtd->read = cfi_staa_read; | 232 | mtd->_read = cfi_staa_read; |
233 | mtd->write = cfi_staa_write_buffers; | 233 | mtd->_write = cfi_staa_write_buffers; |
234 | mtd->writev = cfi_staa_writev; | 234 | mtd->_writev = cfi_staa_writev; |
235 | mtd->sync = cfi_staa_sync; | 235 | mtd->_sync = cfi_staa_sync; |
236 | mtd->lock = cfi_staa_lock; | 236 | mtd->_lock = cfi_staa_lock; |
237 | mtd->unlock = cfi_staa_unlock; | 237 | mtd->_unlock = cfi_staa_unlock; |
238 | mtd->suspend = cfi_staa_suspend; | 238 | mtd->_suspend = cfi_staa_suspend; |
239 | mtd->resume = cfi_staa_resume; | 239 | mtd->_resume = cfi_staa_resume; |
240 | mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE; | 240 | mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE; |
241 | mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */ | 241 | mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */ |
242 | mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; | 242 | mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; |
@@ -394,8 +394,6 @@ static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t | |||
394 | chipnum = (from >> cfi->chipshift); | 394 | chipnum = (from >> cfi->chipshift); |
395 | ofs = from - (chipnum << cfi->chipshift); | 395 | ofs = from - (chipnum << cfi->chipshift); |
396 | 396 | ||
397 | *retlen = 0; | ||
398 | |||
399 | while (len) { | 397 | while (len) { |
400 | unsigned long thislen; | 398 | unsigned long thislen; |
401 | 399 | ||
@@ -617,10 +615,6 @@ static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to, | |||
617 | int chipnum; | 615 | int chipnum; |
618 | unsigned long ofs; | 616 | unsigned long ofs; |
619 | 617 | ||
620 | *retlen = 0; | ||
621 | if (!len) | ||
622 | return 0; | ||
623 | |||
624 | chipnum = to >> cfi->chipshift; | 618 | chipnum = to >> cfi->chipshift; |
625 | ofs = to - (chipnum << cfi->chipshift); | 619 | ofs = to - (chipnum << cfi->chipshift); |
626 | 620 | ||
@@ -904,12 +898,6 @@ static int cfi_staa_erase_varsize(struct mtd_info *mtd, | |||
904 | int i, first; | 898 | int i, first; |
905 | struct mtd_erase_region_info *regions = mtd->eraseregions; | 899 | struct mtd_erase_region_info *regions = mtd->eraseregions; |
906 | 900 | ||
907 | if (instr->addr > mtd->size) | ||
908 | return -EINVAL; | ||
909 | |||
910 | if ((instr->len + instr->addr) > mtd->size) | ||
911 | return -EINVAL; | ||
912 | |||
913 | /* Check that both start and end of the requested erase are | 901 | /* Check that both start and end of the requested erase are |
914 | * aligned with the erasesize at the appropriate addresses. | 902 | * aligned with the erasesize at the appropriate addresses. |
915 | */ | 903 | */ |
@@ -1155,9 +1143,6 @@ static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | |||
1155 | if (len & (mtd->erasesize -1)) | 1143 | if (len & (mtd->erasesize -1)) |
1156 | return -EINVAL; | 1144 | return -EINVAL; |
1157 | 1145 | ||
1158 | if ((len + ofs) > mtd->size) | ||
1159 | return -EINVAL; | ||
1160 | |||
1161 | chipnum = ofs >> cfi->chipshift; | 1146 | chipnum = ofs >> cfi->chipshift; |
1162 | adr = ofs - (chipnum << cfi->chipshift); | 1147 | adr = ofs - (chipnum << cfi->chipshift); |
1163 | 1148 | ||
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c index 8e464054a631..f992418f40a8 100644 --- a/drivers/mtd/chips/cfi_util.c +++ b/drivers/mtd/chips/cfi_util.c | |||
@@ -173,12 +173,6 @@ int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob, | |||
173 | int i, first; | 173 | int i, first; |
174 | struct mtd_erase_region_info *regions = mtd->eraseregions; | 174 | struct mtd_erase_region_info *regions = mtd->eraseregions; |
175 | 175 | ||
176 | if (ofs > mtd->size) | ||
177 | return -EINVAL; | ||
178 | |||
179 | if ((len + ofs) > mtd->size) | ||
180 | return -EINVAL; | ||
181 | |||
182 | /* Check that both start and end of the requested erase are | 176 | /* Check that both start and end of the requested erase are |
183 | * aligned with the erasesize at the appropriate addresses. | 177 | * aligned with the erasesize at the appropriate addresses. |
184 | */ | 178 | */ |
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h index 89c6595454a5..800b0e853e86 100644 --- a/drivers/mtd/chips/fwh_lock.h +++ b/drivers/mtd/chips/fwh_lock.h | |||
@@ -101,7 +101,7 @@ static void fixup_use_fwh_lock(struct mtd_info *mtd) | |||
101 | { | 101 | { |
102 | printk(KERN_NOTICE "using fwh lock/unlock method\n"); | 102 | printk(KERN_NOTICE "using fwh lock/unlock method\n"); |
103 | /* Setup for the chips with the fwh lock method */ | 103 | /* Setup for the chips with the fwh lock method */ |
104 | mtd->lock = fwh_lock_varsize; | 104 | mtd->_lock = fwh_lock_varsize; |
105 | mtd->unlock = fwh_unlock_varsize; | 105 | mtd->_unlock = fwh_unlock_varsize; |
106 | } | 106 | } |
107 | #endif /* FWH_LOCK_H */ | 107 | #endif /* FWH_LOCK_H */ |
diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c index f2b872946871..f7a5bca92aef 100644 --- a/drivers/mtd/chips/map_absent.c +++ b/drivers/mtd/chips/map_absent.c | |||
@@ -55,10 +55,10 @@ static struct mtd_info *map_absent_probe(struct map_info *map) | |||
55 | mtd->name = map->name; | 55 | mtd->name = map->name; |
56 | mtd->type = MTD_ABSENT; | 56 | mtd->type = MTD_ABSENT; |
57 | mtd->size = map->size; | 57 | mtd->size = map->size; |
58 | mtd->erase = map_absent_erase; | 58 | mtd->_erase = map_absent_erase; |
59 | mtd->read = map_absent_read; | 59 | mtd->_read = map_absent_read; |
60 | mtd->write = map_absent_write; | 60 | mtd->_write = map_absent_write; |
61 | mtd->sync = map_absent_sync; | 61 | mtd->_sync = map_absent_sync; |
62 | mtd->flags = 0; | 62 | mtd->flags = 0; |
63 | mtd->erasesize = PAGE_SIZE; | 63 | mtd->erasesize = PAGE_SIZE; |
64 | mtd->writesize = 1; | 64 | mtd->writesize = 1; |
@@ -70,13 +70,11 @@ static struct mtd_info *map_absent_probe(struct map_info *map) | |||
70 | 70 | ||
71 | static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) | 71 | static int map_absent_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) |
72 | { | 72 | { |
73 | *retlen = 0; | ||
74 | return -ENODEV; | 73 | return -ENODEV; |
75 | } | 74 | } |
76 | 75 | ||
77 | static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) | 76 | static int map_absent_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) |
78 | { | 77 | { |
79 | *retlen = 0; | ||
80 | return -ENODEV; | 78 | return -ENODEV; |
81 | } | 79 | } |
82 | 80 | ||
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c index 67640ccb2d41..991c2a1c05d3 100644 --- a/drivers/mtd/chips/map_ram.c +++ b/drivers/mtd/chips/map_ram.c | |||
@@ -64,11 +64,11 @@ static struct mtd_info *map_ram_probe(struct map_info *map) | |||
64 | mtd->name = map->name; | 64 | mtd->name = map->name; |
65 | mtd->type = MTD_RAM; | 65 | mtd->type = MTD_RAM; |
66 | mtd->size = map->size; | 66 | mtd->size = map->size; |
67 | mtd->erase = mapram_erase; | 67 | mtd->_erase = mapram_erase; |
68 | mtd->get_unmapped_area = mapram_unmapped_area; | 68 | mtd->_get_unmapped_area = mapram_unmapped_area; |
69 | mtd->read = mapram_read; | 69 | mtd->_read = mapram_read; |
70 | mtd->write = mapram_write; | 70 | mtd->_write = mapram_write; |
71 | mtd->sync = mapram_nop; | 71 | mtd->_sync = mapram_nop; |
72 | mtd->flags = MTD_CAP_RAM; | 72 | mtd->flags = MTD_CAP_RAM; |
73 | mtd->writesize = 1; | 73 | mtd->writesize = 1; |
74 | 74 | ||
@@ -122,14 +122,10 @@ static int mapram_erase (struct mtd_info *mtd, struct erase_info *instr) | |||
122 | unsigned long i; | 122 | unsigned long i; |
123 | 123 | ||
124 | allff = map_word_ff(map); | 124 | allff = map_word_ff(map); |
125 | |||
126 | for (i=0; i<instr->len; i += map_bankwidth(map)) | 125 | for (i=0; i<instr->len; i += map_bankwidth(map)) |
127 | map_write(map, allff, instr->addr + i); | 126 | map_write(map, allff, instr->addr + i); |
128 | |||
129 | instr->state = MTD_ERASE_DONE; | 127 | instr->state = MTD_ERASE_DONE; |
130 | |||
131 | mtd_erase_callback(instr); | 128 | mtd_erase_callback(instr); |
132 | |||
133 | return 0; | 129 | return 0; |
134 | } | 130 | } |
135 | 131 | ||
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c index 593f73d480d2..47a43cf7e5c6 100644 --- a/drivers/mtd/chips/map_rom.c +++ b/drivers/mtd/chips/map_rom.c | |||
@@ -41,11 +41,11 @@ static struct mtd_info *map_rom_probe(struct map_info *map) | |||
41 | mtd->name = map->name; | 41 | mtd->name = map->name; |
42 | mtd->type = MTD_ROM; | 42 | mtd->type = MTD_ROM; |
43 | mtd->size = map->size; | 43 | mtd->size = map->size; |
44 | mtd->get_unmapped_area = maprom_unmapped_area; | 44 | mtd->_get_unmapped_area = maprom_unmapped_area; |
45 | mtd->read = maprom_read; | 45 | mtd->_read = maprom_read; |
46 | mtd->write = maprom_write; | 46 | mtd->_write = maprom_write; |
47 | mtd->sync = maprom_nop; | 47 | mtd->_sync = maprom_nop; |
48 | mtd->erase = maprom_erase; | 48 | mtd->_erase = maprom_erase; |
49 | mtd->flags = MTD_CAP_ROM; | 49 | mtd->flags = MTD_CAP_ROM; |
50 | mtd->erasesize = map->size; | 50 | mtd->erasesize = map->size; |
51 | mtd->writesize = 1; | 51 | mtd->writesize = 1; |
@@ -85,8 +85,7 @@ static void maprom_nop(struct mtd_info *mtd) | |||
85 | 85 | ||
86 | static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) | 86 | static int maprom_write (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) |
87 | { | 87 | { |
88 | printk(KERN_NOTICE "maprom_write called\n"); | 88 | return -EROFS; |
89 | return -EIO; | ||
90 | } | 89 | } |
91 | 90 | ||
92 | static int maprom_erase (struct mtd_info *mtd, struct erase_info *info) | 91 | static int maprom_erase (struct mtd_info *mtd, struct erase_info *info) |
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 8d3dac40d7e6..4cdb2af7bf44 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig | |||
@@ -103,6 +103,13 @@ config M25PXX_USE_FAST_READ | |||
103 | help | 103 | help |
104 | This option enables FAST_READ access supported by ST M25Pxx. | 104 | This option enables FAST_READ access supported by ST M25Pxx. |
105 | 105 | ||
106 | config MTD_SPEAR_SMI | ||
107 | tristate "SPEAR MTD NOR Support through SMI controller" | ||
108 | depends on PLAT_SPEAR | ||
109 | default y | ||
110 | help | ||
111 | This enable SNOR support on SPEAR platforms using SMI controller | ||
112 | |||
106 | config MTD_SST25L | 113 | config MTD_SST25L |
107 | tristate "Support SST25L (non JEDEC) SPI Flash chips" | 114 | tristate "Support SST25L (non JEDEC) SPI Flash chips" |
108 | depends on SPI_MASTER | 115 | depends on SPI_MASTER |
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile index 56c7cd462f11..a4dd1d822b6c 100644 --- a/drivers/mtd/devices/Makefile +++ b/drivers/mtd/devices/Makefile | |||
@@ -17,6 +17,7 @@ obj-$(CONFIG_MTD_LART) += lart.o | |||
17 | obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o | 17 | obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o |
18 | obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o | 18 | obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o |
19 | obj-$(CONFIG_MTD_M25P80) += m25p80.o | 19 | obj-$(CONFIG_MTD_M25P80) += m25p80.o |
20 | obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o | ||
20 | obj-$(CONFIG_MTD_SST25L) += sst25l.o | 21 | obj-$(CONFIG_MTD_SST25L) += sst25l.o |
21 | 22 | ||
22 | CFLAGS_docg3.o += -I$(src) \ No newline at end of file | 23 | CFLAGS_docg3.o += -I$(src) \ No newline at end of file |
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c index e7e46d1e7463..a4a80b742e65 100644 --- a/drivers/mtd/devices/block2mtd.c +++ b/drivers/mtd/devices/block2mtd.c | |||
@@ -104,14 +104,6 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
104 | int offset = from & (PAGE_SIZE-1); | 104 | int offset = from & (PAGE_SIZE-1); |
105 | int cpylen; | 105 | int cpylen; |
106 | 106 | ||
107 | if (from > mtd->size) | ||
108 | return -EINVAL; | ||
109 | if (from + len > mtd->size) | ||
110 | len = mtd->size - from; | ||
111 | |||
112 | if (retlen) | ||
113 | *retlen = 0; | ||
114 | |||
115 | while (len) { | 107 | while (len) { |
116 | if ((offset + len) > PAGE_SIZE) | 108 | if ((offset + len) > PAGE_SIZE) |
117 | cpylen = PAGE_SIZE - offset; // multiple pages | 109 | cpylen = PAGE_SIZE - offset; // multiple pages |
@@ -148,8 +140,6 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf, | |||
148 | int offset = to & ~PAGE_MASK; // page offset | 140 | int offset = to & ~PAGE_MASK; // page offset |
149 | int cpylen; | 141 | int cpylen; |
150 | 142 | ||
151 | if (retlen) | ||
152 | *retlen = 0; | ||
153 | while (len) { | 143 | while (len) { |
154 | if ((offset+len) > PAGE_SIZE) | 144 | if ((offset+len) > PAGE_SIZE) |
155 | cpylen = PAGE_SIZE - offset; // multiple pages | 145 | cpylen = PAGE_SIZE - offset; // multiple pages |
@@ -188,13 +178,6 @@ static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
188 | struct block2mtd_dev *dev = mtd->priv; | 178 | struct block2mtd_dev *dev = mtd->priv; |
189 | int err; | 179 | int err; |
190 | 180 | ||
191 | if (!len) | ||
192 | return 0; | ||
193 | if (to >= mtd->size) | ||
194 | return -ENOSPC; | ||
195 | if (to + len > mtd->size) | ||
196 | len = mtd->size - to; | ||
197 | |||
198 | mutex_lock(&dev->write_mutex); | 181 | mutex_lock(&dev->write_mutex); |
199 | err = _block2mtd_write(dev, buf, to, len, retlen); | 182 | err = _block2mtd_write(dev, buf, to, len, retlen); |
200 | mutex_unlock(&dev->write_mutex); | 183 | mutex_unlock(&dev->write_mutex); |
@@ -283,13 +266,14 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size) | |||
283 | dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK; | 266 | dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK; |
284 | dev->mtd.erasesize = erase_size; | 267 | dev->mtd.erasesize = erase_size; |
285 | dev->mtd.writesize = 1; | 268 | dev->mtd.writesize = 1; |
269 | dev->mtd.writebufsize = PAGE_SIZE; | ||
286 | dev->mtd.type = MTD_RAM; | 270 | dev->mtd.type = MTD_RAM; |
287 | dev->mtd.flags = MTD_CAP_RAM; | 271 | dev->mtd.flags = MTD_CAP_RAM; |
288 | dev->mtd.erase = block2mtd_erase; | 272 | dev->mtd._erase = block2mtd_erase; |
289 | dev->mtd.write = block2mtd_write; | 273 | dev->mtd._write = block2mtd_write; |
290 | dev->mtd.writev = mtd_writev; | 274 | dev->mtd._writev = mtd_writev; |
291 | dev->mtd.sync = block2mtd_sync; | 275 | dev->mtd._sync = block2mtd_sync; |
292 | dev->mtd.read = block2mtd_read; | 276 | dev->mtd._read = block2mtd_read; |
293 | dev->mtd.priv = dev; | 277 | dev->mtd.priv = dev; |
294 | dev->mtd.owner = THIS_MODULE; | 278 | dev->mtd.owner = THIS_MODULE; |
295 | 279 | ||
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c index b1cdf6479019..a4eb8b5b85ec 100644 --- a/drivers/mtd/devices/doc2000.c +++ b/drivers/mtd/devices/doc2000.c | |||
@@ -562,14 +562,15 @@ void DoC2k_init(struct mtd_info *mtd) | |||
562 | 562 | ||
563 | mtd->type = MTD_NANDFLASH; | 563 | mtd->type = MTD_NANDFLASH; |
564 | mtd->flags = MTD_CAP_NANDFLASH; | 564 | mtd->flags = MTD_CAP_NANDFLASH; |
565 | mtd->writesize = 512; | 565 | mtd->writebufsize = mtd->writesize = 512; |
566 | mtd->oobsize = 16; | 566 | mtd->oobsize = 16; |
567 | mtd->ecc_strength = 2; | ||
567 | mtd->owner = THIS_MODULE; | 568 | mtd->owner = THIS_MODULE; |
568 | mtd->erase = doc_erase; | 569 | mtd->_erase = doc_erase; |
569 | mtd->read = doc_read; | 570 | mtd->_read = doc_read; |
570 | mtd->write = doc_write; | 571 | mtd->_write = doc_write; |
571 | mtd->read_oob = doc_read_oob; | 572 | mtd->_read_oob = doc_read_oob; |
572 | mtd->write_oob = doc_write_oob; | 573 | mtd->_write_oob = doc_write_oob; |
573 | this->curfloor = -1; | 574 | this->curfloor = -1; |
574 | this->curchip = -1; | 575 | this->curchip = -1; |
575 | mutex_init(&this->lock); | 576 | mutex_init(&this->lock); |
@@ -602,13 +603,7 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
602 | int i, len256 = 0, ret=0; | 603 | int i, len256 = 0, ret=0; |
603 | size_t left = len; | 604 | size_t left = len; |
604 | 605 | ||
605 | /* Don't allow read past end of device */ | ||
606 | if (from >= this->totlen) | ||
607 | return -EINVAL; | ||
608 | |||
609 | mutex_lock(&this->lock); | 606 | mutex_lock(&this->lock); |
610 | |||
611 | *retlen = 0; | ||
612 | while (left) { | 607 | while (left) { |
613 | len = left; | 608 | len = left; |
614 | 609 | ||
@@ -748,13 +743,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
748 | size_t left = len; | 743 | size_t left = len; |
749 | int status; | 744 | int status; |
750 | 745 | ||
751 | /* Don't allow write past end of device */ | ||
752 | if (to >= this->totlen) | ||
753 | return -EINVAL; | ||
754 | |||
755 | mutex_lock(&this->lock); | 746 | mutex_lock(&this->lock); |
756 | |||
757 | *retlen = 0; | ||
758 | while (left) { | 747 | while (left) { |
759 | len = left; | 748 | len = left; |
760 | 749 | ||
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c index 7543b98f46c4..f6927955dab0 100644 --- a/drivers/mtd/devices/doc2001.c +++ b/drivers/mtd/devices/doc2001.c | |||
@@ -346,14 +346,15 @@ void DoCMil_init(struct mtd_info *mtd) | |||
346 | 346 | ||
347 | /* FIXME: erase size is not always 8KiB */ | 347 | /* FIXME: erase size is not always 8KiB */ |
348 | mtd->erasesize = 0x2000; | 348 | mtd->erasesize = 0x2000; |
349 | mtd->writesize = 512; | 349 | mtd->writebufsize = mtd->writesize = 512; |
350 | mtd->oobsize = 16; | 350 | mtd->oobsize = 16; |
351 | mtd->ecc_strength = 2; | ||
351 | mtd->owner = THIS_MODULE; | 352 | mtd->owner = THIS_MODULE; |
352 | mtd->erase = doc_erase; | 353 | mtd->_erase = doc_erase; |
353 | mtd->read = doc_read; | 354 | mtd->_read = doc_read; |
354 | mtd->write = doc_write; | 355 | mtd->_write = doc_write; |
355 | mtd->read_oob = doc_read_oob; | 356 | mtd->_read_oob = doc_read_oob; |
356 | mtd->write_oob = doc_write_oob; | 357 | mtd->_write_oob = doc_write_oob; |
357 | this->curfloor = -1; | 358 | this->curfloor = -1; |
358 | this->curchip = -1; | 359 | this->curchip = -1; |
359 | 360 | ||
@@ -383,10 +384,6 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len, | |||
383 | void __iomem *docptr = this->virtadr; | 384 | void __iomem *docptr = this->virtadr; |
384 | struct Nand *mychip = &this->chips[from >> (this->chipshift)]; | 385 | struct Nand *mychip = &this->chips[from >> (this->chipshift)]; |
385 | 386 | ||
386 | /* Don't allow read past end of device */ | ||
387 | if (from >= this->totlen) | ||
388 | return -EINVAL; | ||
389 | |||
390 | /* Don't allow a single read to cross a 512-byte block boundary */ | 387 | /* Don't allow a single read to cross a 512-byte block boundary */ |
391 | if (from + len > ((from | 0x1ff) + 1)) | 388 | if (from + len > ((from | 0x1ff) + 1)) |
392 | len = ((from | 0x1ff) + 1) - from; | 389 | len = ((from | 0x1ff) + 1) - from; |
@@ -494,10 +491,6 @@ static int doc_write (struct mtd_info *mtd, loff_t to, size_t len, | |||
494 | void __iomem *docptr = this->virtadr; | 491 | void __iomem *docptr = this->virtadr; |
495 | struct Nand *mychip = &this->chips[to >> (this->chipshift)]; | 492 | struct Nand *mychip = &this->chips[to >> (this->chipshift)]; |
496 | 493 | ||
497 | /* Don't allow write past end of device */ | ||
498 | if (to >= this->totlen) | ||
499 | return -EINVAL; | ||
500 | |||
501 | #if 0 | 494 | #if 0 |
502 | /* Don't allow a single write to cross a 512-byte block boundary */ | 495 | /* Don't allow a single write to cross a 512-byte block boundary */ |
503 | if (to + len > ( (to | 0x1ff) + 1)) | 496 | if (to + len > ( (to | 0x1ff) + 1)) |
@@ -599,7 +592,6 @@ static int doc_write (struct mtd_info *mtd, loff_t to, size_t len, | |||
599 | printk("Error programming flash\n"); | 592 | printk("Error programming flash\n"); |
600 | /* Error in programming | 593 | /* Error in programming |
601 | FIXME: implement Bad Block Replacement (in nftl.c ??) */ | 594 | FIXME: implement Bad Block Replacement (in nftl.c ??) */ |
602 | *retlen = 0; | ||
603 | ret = -EIO; | 595 | ret = -EIO; |
604 | } | 596 | } |
605 | dummy = ReadDOC(docptr, LastDataRead); | 597 | dummy = ReadDOC(docptr, LastDataRead); |
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c index 177510d0e7ee..04eb2e4aa50f 100644 --- a/drivers/mtd/devices/doc2001plus.c +++ b/drivers/mtd/devices/doc2001plus.c | |||
@@ -467,14 +467,15 @@ void DoCMilPlus_init(struct mtd_info *mtd) | |||
467 | 467 | ||
468 | mtd->type = MTD_NANDFLASH; | 468 | mtd->type = MTD_NANDFLASH; |
469 | mtd->flags = MTD_CAP_NANDFLASH; | 469 | mtd->flags = MTD_CAP_NANDFLASH; |
470 | mtd->writesize = 512; | 470 | mtd->writebufsize = mtd->writesize = 512; |
471 | mtd->oobsize = 16; | 471 | mtd->oobsize = 16; |
472 | mtd->ecc_strength = 2; | ||
472 | mtd->owner = THIS_MODULE; | 473 | mtd->owner = THIS_MODULE; |
473 | mtd->erase = doc_erase; | 474 | mtd->_erase = doc_erase; |
474 | mtd->read = doc_read; | 475 | mtd->_read = doc_read; |
475 | mtd->write = doc_write; | 476 | mtd->_write = doc_write; |
476 | mtd->read_oob = doc_read_oob; | 477 | mtd->_read_oob = doc_read_oob; |
477 | mtd->write_oob = doc_write_oob; | 478 | mtd->_write_oob = doc_write_oob; |
478 | this->curfloor = -1; | 479 | this->curfloor = -1; |
479 | this->curchip = -1; | 480 | this->curchip = -1; |
480 | 481 | ||
@@ -581,10 +582,6 @@ static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
581 | void __iomem * docptr = this->virtadr; | 582 | void __iomem * docptr = this->virtadr; |
582 | struct Nand *mychip = &this->chips[from >> (this->chipshift)]; | 583 | struct Nand *mychip = &this->chips[from >> (this->chipshift)]; |
583 | 584 | ||
584 | /* Don't allow read past end of device */ | ||
585 | if (from >= this->totlen) | ||
586 | return -EINVAL; | ||
587 | |||
588 | /* Don't allow a single read to cross a 512-byte block boundary */ | 585 | /* Don't allow a single read to cross a 512-byte block boundary */ |
589 | if (from + len > ((from | 0x1ff) + 1)) | 586 | if (from + len > ((from | 0x1ff) + 1)) |
590 | len = ((from | 0x1ff) + 1) - from; | 587 | len = ((from | 0x1ff) + 1) - from; |
@@ -700,10 +697,6 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
700 | void __iomem * docptr = this->virtadr; | 697 | void __iomem * docptr = this->virtadr; |
701 | struct Nand *mychip = &this->chips[to >> (this->chipshift)]; | 698 | struct Nand *mychip = &this->chips[to >> (this->chipshift)]; |
702 | 699 | ||
703 | /* Don't allow write past end of device */ | ||
704 | if (to >= this->totlen) | ||
705 | return -EINVAL; | ||
706 | |||
707 | /* Don't allow writes which aren't exactly one block (512 bytes) */ | 700 | /* Don't allow writes which aren't exactly one block (512 bytes) */ |
708 | if ((to & 0x1ff) || (len != 0x200)) | 701 | if ((to & 0x1ff) || (len != 0x200)) |
709 | return -EINVAL; | 702 | return -EINVAL; |
@@ -800,7 +793,6 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
800 | printk("MTD: Error 0x%x programming at 0x%x\n", dummy, (int)to); | 793 | printk("MTD: Error 0x%x programming at 0x%x\n", dummy, (int)to); |
801 | /* Error in programming | 794 | /* Error in programming |
802 | FIXME: implement Bad Block Replacement (in nftl.c ??) */ | 795 | FIXME: implement Bad Block Replacement (in nftl.c ??) */ |
803 | *retlen = 0; | ||
804 | ret = -EIO; | 796 | ret = -EIO; |
805 | } | 797 | } |
806 | dummy = ReadDOC(docptr, Mplus_LastDataRead); | 798 | dummy = ReadDOC(docptr, Mplus_LastDataRead); |
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index ad11ef0a81f4..8272c02668d6 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c | |||
@@ -80,14 +80,9 @@ static struct nand_ecclayout docg3_oobinfo = { | |||
80 | .oobavail = 8, | 80 | .oobavail = 8, |
81 | }; | 81 | }; |
82 | 82 | ||
83 | /** | ||
84 | * struct docg3_bch - BCH engine | ||
85 | */ | ||
86 | static struct bch_control *docg3_bch; | ||
87 | |||
88 | static inline u8 doc_readb(struct docg3 *docg3, u16 reg) | 83 | static inline u8 doc_readb(struct docg3 *docg3, u16 reg) |
89 | { | 84 | { |
90 | u8 val = readb(docg3->base + reg); | 85 | u8 val = readb(docg3->cascade->base + reg); |
91 | 86 | ||
92 | trace_docg3_io(0, 8, reg, (int)val); | 87 | trace_docg3_io(0, 8, reg, (int)val); |
93 | return val; | 88 | return val; |
@@ -95,7 +90,7 @@ static inline u8 doc_readb(struct docg3 *docg3, u16 reg) | |||
95 | 90 | ||
96 | static inline u16 doc_readw(struct docg3 *docg3, u16 reg) | 91 | static inline u16 doc_readw(struct docg3 *docg3, u16 reg) |
97 | { | 92 | { |
98 | u16 val = readw(docg3->base + reg); | 93 | u16 val = readw(docg3->cascade->base + reg); |
99 | 94 | ||
100 | trace_docg3_io(0, 16, reg, (int)val); | 95 | trace_docg3_io(0, 16, reg, (int)val); |
101 | return val; | 96 | return val; |
@@ -103,13 +98,13 @@ static inline u16 doc_readw(struct docg3 *docg3, u16 reg) | |||
103 | 98 | ||
104 | static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg) | 99 | static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg) |
105 | { | 100 | { |
106 | writeb(val, docg3->base + reg); | 101 | writeb(val, docg3->cascade->base + reg); |
107 | trace_docg3_io(1, 8, reg, val); | 102 | trace_docg3_io(1, 8, reg, val); |
108 | } | 103 | } |
109 | 104 | ||
110 | static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg) | 105 | static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg) |
111 | { | 106 | { |
112 | writew(val, docg3->base + reg); | 107 | writew(val, docg3->cascade->base + reg); |
113 | trace_docg3_io(1, 16, reg, val); | 108 | trace_docg3_io(1, 16, reg, val); |
114 | } | 109 | } |
115 | 110 | ||
@@ -643,7 +638,8 @@ static int doc_ecc_bch_fix_data(struct docg3 *docg3, void *buf, u8 *hwecc) | |||
643 | 638 | ||
644 | for (i = 0; i < DOC_ECC_BCH_SIZE; i++) | 639 | for (i = 0; i < DOC_ECC_BCH_SIZE; i++) |
645 | ecc[i] = bitrev8(hwecc[i]); | 640 | ecc[i] = bitrev8(hwecc[i]); |
646 | numerrs = decode_bch(docg3_bch, NULL, DOC_ECC_BCH_COVERED_BYTES, | 641 | numerrs = decode_bch(docg3->cascade->bch, NULL, |
642 | DOC_ECC_BCH_COVERED_BYTES, | ||
647 | NULL, ecc, NULL, errorpos); | 643 | NULL, ecc, NULL, errorpos); |
648 | BUG_ON(numerrs == -EINVAL); | 644 | BUG_ON(numerrs == -EINVAL); |
649 | if (numerrs < 0) | 645 | if (numerrs < 0) |
@@ -734,7 +730,7 @@ err: | |||
734 | * doc_read_page_getbytes - Reads bytes from a prepared page | 730 | * doc_read_page_getbytes - Reads bytes from a prepared page |
735 | * @docg3: the device | 731 | * @docg3: the device |
736 | * @len: the number of bytes to be read (must be a multiple of 4) | 732 | * @len: the number of bytes to be read (must be a multiple of 4) |
737 | * @buf: the buffer to be filled in | 733 | * @buf: the buffer to be filled in (or NULL is forget bytes) |
738 | * @first: 1 if first time read, DOC_READADDRESS should be set | 734 | * @first: 1 if first time read, DOC_READADDRESS should be set |
739 | * | 735 | * |
740 | */ | 736 | */ |
@@ -849,7 +845,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from, | |||
849 | struct mtd_oob_ops *ops) | 845 | struct mtd_oob_ops *ops) |
850 | { | 846 | { |
851 | struct docg3 *docg3 = mtd->priv; | 847 | struct docg3 *docg3 = mtd->priv; |
852 | int block0, block1, page, ret, ofs = 0; | 848 | int block0, block1, page, ret, skip, ofs = 0; |
853 | u8 *oobbuf = ops->oobbuf; | 849 | u8 *oobbuf = ops->oobbuf; |
854 | u8 *buf = ops->datbuf; | 850 | u8 *buf = ops->datbuf; |
855 | size_t len, ooblen, nbdata, nboob; | 851 | size_t len, ooblen, nbdata, nboob; |
@@ -869,34 +865,36 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from, | |||
869 | 865 | ||
870 | doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n", | 866 | doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n", |
871 | from, ops->mode, buf, len, oobbuf, ooblen); | 867 | from, ops->mode, buf, len, oobbuf, ooblen); |
872 | if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % DOC_LAYOUT_OOB_SIZE) || | 868 | if (ooblen % DOC_LAYOUT_OOB_SIZE) |
873 | (from % DOC_LAYOUT_PAGE_SIZE)) | ||
874 | return -EINVAL; | 869 | return -EINVAL; |
875 | 870 | ||
876 | ret = -EINVAL; | 871 | if (from + len > mtd->size) |
877 | calc_block_sector(from + len, &block0, &block1, &page, &ofs, | 872 | return -EINVAL; |
878 | docg3->reliable); | ||
879 | if (block1 > docg3->max_block) | ||
880 | goto err; | ||
881 | 873 | ||
882 | ops->oobretlen = 0; | 874 | ops->oobretlen = 0; |
883 | ops->retlen = 0; | 875 | ops->retlen = 0; |
884 | ret = 0; | 876 | ret = 0; |
877 | skip = from % DOC_LAYOUT_PAGE_SIZE; | ||
878 | mutex_lock(&docg3->cascade->lock); | ||
885 | while (!ret && (len > 0 || ooblen > 0)) { | 879 | while (!ret && (len > 0 || ooblen > 0)) { |
886 | calc_block_sector(from, &block0, &block1, &page, &ofs, | 880 | calc_block_sector(from - skip, &block0, &block1, &page, &ofs, |
887 | docg3->reliable); | 881 | docg3->reliable); |
888 | nbdata = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE); | 882 | nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip); |
889 | nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE); | 883 | nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE); |
890 | ret = doc_read_page_prepare(docg3, block0, block1, page, ofs); | 884 | ret = doc_read_page_prepare(docg3, block0, block1, page, ofs); |
891 | if (ret < 0) | 885 | if (ret < 0) |
892 | goto err; | 886 | goto out; |
893 | ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES); | 887 | ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES); |
894 | if (ret < 0) | 888 | if (ret < 0) |
895 | goto err_in_read; | 889 | goto err_in_read; |
896 | ret = doc_read_page_getbytes(docg3, nbdata, buf, 1); | 890 | ret = doc_read_page_getbytes(docg3, skip, NULL, 1); |
891 | if (ret < skip) | ||
892 | goto err_in_read; | ||
893 | ret = doc_read_page_getbytes(docg3, nbdata, buf, 0); | ||
897 | if (ret < nbdata) | 894 | if (ret < nbdata) |
898 | goto err_in_read; | 895 | goto err_in_read; |
899 | doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE - nbdata, | 896 | doc_read_page_getbytes(docg3, |
897 | DOC_LAYOUT_PAGE_SIZE - nbdata - skip, | ||
900 | NULL, 0); | 898 | NULL, 0); |
901 | ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0); | 899 | ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0); |
902 | if (ret < nboob) | 900 | if (ret < nboob) |
@@ -950,13 +948,15 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from, | |||
950 | len -= nbdata; | 948 | len -= nbdata; |
951 | ooblen -= nboob; | 949 | ooblen -= nboob; |
952 | from += DOC_LAYOUT_PAGE_SIZE; | 950 | from += DOC_LAYOUT_PAGE_SIZE; |
951 | skip = 0; | ||
953 | } | 952 | } |
954 | 953 | ||
954 | out: | ||
955 | mutex_unlock(&docg3->cascade->lock); | ||
955 | return ret; | 956 | return ret; |
956 | err_in_read: | 957 | err_in_read: |
957 | doc_read_page_finish(docg3); | 958 | doc_read_page_finish(docg3); |
958 | err: | 959 | goto out; |
959 | return ret; | ||
960 | } | 960 | } |
961 | 961 | ||
962 | /** | 962 | /** |
@@ -1114,10 +1114,10 @@ static int doc_get_op_status(struct docg3 *docg3) | |||
1114 | */ | 1114 | */ |
1115 | static int doc_write_erase_wait_status(struct docg3 *docg3) | 1115 | static int doc_write_erase_wait_status(struct docg3 *docg3) |
1116 | { | 1116 | { |
1117 | int status, ret = 0; | 1117 | int i, status, ret = 0; |
1118 | 1118 | ||
1119 | if (!doc_is_ready(docg3)) | 1119 | for (i = 0; !doc_is_ready(docg3) && i < 5; i++) |
1120 | usleep_range(3000, 3000); | 1120 | msleep(20); |
1121 | if (!doc_is_ready(docg3)) { | 1121 | if (!doc_is_ready(docg3)) { |
1122 | doc_dbg("Timeout reached and the chip is still not ready\n"); | 1122 | doc_dbg("Timeout reached and the chip is still not ready\n"); |
1123 | ret = -EAGAIN; | 1123 | ret = -EAGAIN; |
@@ -1196,18 +1196,19 @@ static int doc_erase(struct mtd_info *mtd, struct erase_info *info) | |||
1196 | int block0, block1, page, ret, ofs = 0; | 1196 | int block0, block1, page, ret, ofs = 0; |
1197 | 1197 | ||
1198 | doc_dbg("doc_erase(from=%lld, len=%lld\n", info->addr, info->len); | 1198 | doc_dbg("doc_erase(from=%lld, len=%lld\n", info->addr, info->len); |
1199 | doc_set_device_id(docg3, docg3->device_id); | ||
1200 | 1199 | ||
1201 | info->state = MTD_ERASE_PENDING; | 1200 | info->state = MTD_ERASE_PENDING; |
1202 | calc_block_sector(info->addr + info->len, &block0, &block1, &page, | 1201 | calc_block_sector(info->addr + info->len, &block0, &block1, &page, |
1203 | &ofs, docg3->reliable); | 1202 | &ofs, docg3->reliable); |
1204 | ret = -EINVAL; | 1203 | ret = -EINVAL; |
1205 | if (block1 > docg3->max_block || page || ofs) | 1204 | if (info->addr + info->len > mtd->size || page || ofs) |
1206 | goto reset_err; | 1205 | goto reset_err; |
1207 | 1206 | ||
1208 | ret = 0; | 1207 | ret = 0; |
1209 | calc_block_sector(info->addr, &block0, &block1, &page, &ofs, | 1208 | calc_block_sector(info->addr, &block0, &block1, &page, &ofs, |
1210 | docg3->reliable); | 1209 | docg3->reliable); |
1210 | mutex_lock(&docg3->cascade->lock); | ||
1211 | doc_set_device_id(docg3, docg3->device_id); | ||
1211 | doc_set_reliable_mode(docg3); | 1212 | doc_set_reliable_mode(docg3); |
1212 | for (len = info->len; !ret && len > 0; len -= mtd->erasesize) { | 1213 | for (len = info->len; !ret && len > 0; len -= mtd->erasesize) { |
1213 | info->state = MTD_ERASING; | 1214 | info->state = MTD_ERASING; |
@@ -1215,6 +1216,7 @@ static int doc_erase(struct mtd_info *mtd, struct erase_info *info) | |||
1215 | block0 += 2; | 1216 | block0 += 2; |
1216 | block1 += 2; | 1217 | block1 += 2; |
1217 | } | 1218 | } |
1219 | mutex_unlock(&docg3->cascade->lock); | ||
1218 | 1220 | ||
1219 | if (ret) | 1221 | if (ret) |
1220 | goto reset_err; | 1222 | goto reset_err; |
@@ -1401,7 +1403,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, | |||
1401 | struct mtd_oob_ops *ops) | 1403 | struct mtd_oob_ops *ops) |
1402 | { | 1404 | { |
1403 | struct docg3 *docg3 = mtd->priv; | 1405 | struct docg3 *docg3 = mtd->priv; |
1404 | int block0, block1, page, ret, pofs = 0, autoecc, oobdelta; | 1406 | int ret, autoecc, oobdelta; |
1405 | u8 *oobbuf = ops->oobbuf; | 1407 | u8 *oobbuf = ops->oobbuf; |
1406 | u8 *buf = ops->datbuf; | 1408 | u8 *buf = ops->datbuf; |
1407 | size_t len, ooblen; | 1409 | size_t len, ooblen; |
@@ -1438,12 +1440,8 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, | |||
1438 | if (len && ooblen && | 1440 | if (len && ooblen && |
1439 | (len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta)) | 1441 | (len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta)) |
1440 | return -EINVAL; | 1442 | return -EINVAL; |
1441 | 1443 | if (ofs + len > mtd->size) | |
1442 | ret = -EINVAL; | 1444 | return -EINVAL; |
1443 | calc_block_sector(ofs + len, &block0, &block1, &page, &pofs, | ||
1444 | docg3->reliable); | ||
1445 | if (block1 > docg3->max_block) | ||
1446 | goto err; | ||
1447 | 1445 | ||
1448 | ops->oobretlen = 0; | 1446 | ops->oobretlen = 0; |
1449 | ops->retlen = 0; | 1447 | ops->retlen = 0; |
@@ -1457,6 +1455,7 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, | |||
1457 | if (autoecc < 0) | 1455 | if (autoecc < 0) |
1458 | return autoecc; | 1456 | return autoecc; |
1459 | 1457 | ||
1458 | mutex_lock(&docg3->cascade->lock); | ||
1460 | while (!ret && len > 0) { | 1459 | while (!ret && len > 0) { |
1461 | memset(oob, 0, sizeof(oob)); | 1460 | memset(oob, 0, sizeof(oob)); |
1462 | if (ofs == docg3->oob_write_ofs) | 1461 | if (ofs == docg3->oob_write_ofs) |
@@ -1477,8 +1476,9 @@ static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, | |||
1477 | } | 1476 | } |
1478 | ops->retlen += DOC_LAYOUT_PAGE_SIZE; | 1477 | ops->retlen += DOC_LAYOUT_PAGE_SIZE; |
1479 | } | 1478 | } |
1480 | err: | 1479 | |
1481 | doc_set_device_id(docg3, 0); | 1480 | doc_set_device_id(docg3, 0); |
1481 | mutex_unlock(&docg3->cascade->lock); | ||
1482 | return ret; | 1482 | return ret; |
1483 | } | 1483 | } |
1484 | 1484 | ||
@@ -1535,9 +1535,11 @@ static ssize_t dps0_is_key_locked(struct device *dev, | |||
1535 | struct docg3 *docg3 = sysfs_dev2docg3(dev, attr); | 1535 | struct docg3 *docg3 = sysfs_dev2docg3(dev, attr); |
1536 | int dps0; | 1536 | int dps0; |
1537 | 1537 | ||
1538 | mutex_lock(&docg3->cascade->lock); | ||
1538 | doc_set_device_id(docg3, docg3->device_id); | 1539 | doc_set_device_id(docg3, docg3->device_id); |
1539 | dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS); | 1540 | dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS); |
1540 | doc_set_device_id(docg3, 0); | 1541 | doc_set_device_id(docg3, 0); |
1542 | mutex_unlock(&docg3->cascade->lock); | ||
1541 | 1543 | ||
1542 | return sprintf(buf, "%d\n", !(dps0 & DOC_DPS_KEY_OK)); | 1544 | return sprintf(buf, "%d\n", !(dps0 & DOC_DPS_KEY_OK)); |
1543 | } | 1545 | } |
@@ -1548,9 +1550,11 @@ static ssize_t dps1_is_key_locked(struct device *dev, | |||
1548 | struct docg3 *docg3 = sysfs_dev2docg3(dev, attr); | 1550 | struct docg3 *docg3 = sysfs_dev2docg3(dev, attr); |
1549 | int dps1; | 1551 | int dps1; |
1550 | 1552 | ||
1553 | mutex_lock(&docg3->cascade->lock); | ||
1551 | doc_set_device_id(docg3, docg3->device_id); | 1554 | doc_set_device_id(docg3, docg3->device_id); |
1552 | dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS); | 1555 | dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS); |
1553 | doc_set_device_id(docg3, 0); | 1556 | doc_set_device_id(docg3, 0); |
1557 | mutex_unlock(&docg3->cascade->lock); | ||
1554 | 1558 | ||
1555 | return sprintf(buf, "%d\n", !(dps1 & DOC_DPS_KEY_OK)); | 1559 | return sprintf(buf, "%d\n", !(dps1 & DOC_DPS_KEY_OK)); |
1556 | } | 1560 | } |
@@ -1565,10 +1569,12 @@ static ssize_t dps0_insert_key(struct device *dev, | |||
1565 | if (count != DOC_LAYOUT_DPS_KEY_LENGTH) | 1569 | if (count != DOC_LAYOUT_DPS_KEY_LENGTH) |
1566 | return -EINVAL; | 1570 | return -EINVAL; |
1567 | 1571 | ||
1572 | mutex_lock(&docg3->cascade->lock); | ||
1568 | doc_set_device_id(docg3, docg3->device_id); | 1573 | doc_set_device_id(docg3, docg3->device_id); |
1569 | for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++) | 1574 | for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++) |
1570 | doc_writeb(docg3, buf[i], DOC_DPS0_KEY); | 1575 | doc_writeb(docg3, buf[i], DOC_DPS0_KEY); |
1571 | doc_set_device_id(docg3, 0); | 1576 | doc_set_device_id(docg3, 0); |
1577 | mutex_unlock(&docg3->cascade->lock); | ||
1572 | return count; | 1578 | return count; |
1573 | } | 1579 | } |
1574 | 1580 | ||
@@ -1582,10 +1588,12 @@ static ssize_t dps1_insert_key(struct device *dev, | |||
1582 | if (count != DOC_LAYOUT_DPS_KEY_LENGTH) | 1588 | if (count != DOC_LAYOUT_DPS_KEY_LENGTH) |
1583 | return -EINVAL; | 1589 | return -EINVAL; |
1584 | 1590 | ||
1591 | mutex_lock(&docg3->cascade->lock); | ||
1585 | doc_set_device_id(docg3, docg3->device_id); | 1592 | doc_set_device_id(docg3, docg3->device_id); |
1586 | for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++) | 1593 | for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++) |
1587 | doc_writeb(docg3, buf[i], DOC_DPS1_KEY); | 1594 | doc_writeb(docg3, buf[i], DOC_DPS1_KEY); |
1588 | doc_set_device_id(docg3, 0); | 1595 | doc_set_device_id(docg3, 0); |
1596 | mutex_unlock(&docg3->cascade->lock); | ||
1589 | return count; | 1597 | return count; |
1590 | } | 1598 | } |
1591 | 1599 | ||
@@ -1601,13 +1609,13 @@ static struct device_attribute doc_sys_attrs[DOC_MAX_NBFLOORS][4] = { | |||
1601 | }; | 1609 | }; |
1602 | 1610 | ||
1603 | static int doc_register_sysfs(struct platform_device *pdev, | 1611 | static int doc_register_sysfs(struct platform_device *pdev, |
1604 | struct mtd_info **floors) | 1612 | struct docg3_cascade *cascade) |
1605 | { | 1613 | { |
1606 | int ret = 0, floor, i = 0; | 1614 | int ret = 0, floor, i = 0; |
1607 | struct device *dev = &pdev->dev; | 1615 | struct device *dev = &pdev->dev; |
1608 | 1616 | ||
1609 | for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS && floors[floor]; | 1617 | for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS && |
1610 | floor++) | 1618 | cascade->floors[floor]; floor++) |
1611 | for (i = 0; !ret && i < 4; i++) | 1619 | for (i = 0; !ret && i < 4; i++) |
1612 | ret = device_create_file(dev, &doc_sys_attrs[floor][i]); | 1620 | ret = device_create_file(dev, &doc_sys_attrs[floor][i]); |
1613 | if (!ret) | 1621 | if (!ret) |
@@ -1621,12 +1629,12 @@ static int doc_register_sysfs(struct platform_device *pdev, | |||
1621 | } | 1629 | } |
1622 | 1630 | ||
1623 | static void doc_unregister_sysfs(struct platform_device *pdev, | 1631 | static void doc_unregister_sysfs(struct platform_device *pdev, |
1624 | struct mtd_info **floors) | 1632 | struct docg3_cascade *cascade) |
1625 | { | 1633 | { |
1626 | struct device *dev = &pdev->dev; | 1634 | struct device *dev = &pdev->dev; |
1627 | int floor, i; | 1635 | int floor, i; |
1628 | 1636 | ||
1629 | for (floor = 0; floor < DOC_MAX_NBFLOORS && floors[floor]; | 1637 | for (floor = 0; floor < DOC_MAX_NBFLOORS && cascade->floors[floor]; |
1630 | floor++) | 1638 | floor++) |
1631 | for (i = 0; i < 4; i++) | 1639 | for (i = 0; i < 4; i++) |
1632 | device_remove_file(dev, &doc_sys_attrs[floor][i]); | 1640 | device_remove_file(dev, &doc_sys_attrs[floor][i]); |
@@ -1640,7 +1648,11 @@ static int dbg_flashctrl_show(struct seq_file *s, void *p) | |||
1640 | struct docg3 *docg3 = (struct docg3 *)s->private; | 1648 | struct docg3 *docg3 = (struct docg3 *)s->private; |
1641 | 1649 | ||
1642 | int pos = 0; | 1650 | int pos = 0; |
1643 | u8 fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL); | 1651 | u8 fctrl; |
1652 | |||
1653 | mutex_lock(&docg3->cascade->lock); | ||
1654 | fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL); | ||
1655 | mutex_unlock(&docg3->cascade->lock); | ||
1644 | 1656 | ||
1645 | pos += seq_printf(s, | 1657 | pos += seq_printf(s, |
1646 | "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n", | 1658 | "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n", |
@@ -1658,9 +1670,12 @@ static int dbg_asicmode_show(struct seq_file *s, void *p) | |||
1658 | { | 1670 | { |
1659 | struct docg3 *docg3 = (struct docg3 *)s->private; | 1671 | struct docg3 *docg3 = (struct docg3 *)s->private; |
1660 | 1672 | ||
1661 | int pos = 0; | 1673 | int pos = 0, pctrl, mode; |
1662 | int pctrl = doc_register_readb(docg3, DOC_ASICMODE); | 1674 | |
1663 | int mode = pctrl & 0x03; | 1675 | mutex_lock(&docg3->cascade->lock); |
1676 | pctrl = doc_register_readb(docg3, DOC_ASICMODE); | ||
1677 | mode = pctrl & 0x03; | ||
1678 | mutex_unlock(&docg3->cascade->lock); | ||
1664 | 1679 | ||
1665 | pos += seq_printf(s, | 1680 | pos += seq_printf(s, |
1666 | "%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (", | 1681 | "%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (", |
@@ -1692,7 +1707,11 @@ static int dbg_device_id_show(struct seq_file *s, void *p) | |||
1692 | { | 1707 | { |
1693 | struct docg3 *docg3 = (struct docg3 *)s->private; | 1708 | struct docg3 *docg3 = (struct docg3 *)s->private; |
1694 | int pos = 0; | 1709 | int pos = 0; |
1695 | int id = doc_register_readb(docg3, DOC_DEVICESELECT); | 1710 | int id; |
1711 | |||
1712 | mutex_lock(&docg3->cascade->lock); | ||
1713 | id = doc_register_readb(docg3, DOC_DEVICESELECT); | ||
1714 | mutex_unlock(&docg3->cascade->lock); | ||
1696 | 1715 | ||
1697 | pos += seq_printf(s, "DeviceId = %d\n", id); | 1716 | pos += seq_printf(s, "DeviceId = %d\n", id); |
1698 | return pos; | 1717 | return pos; |
@@ -1705,6 +1724,7 @@ static int dbg_protection_show(struct seq_file *s, void *p) | |||
1705 | int pos = 0; | 1724 | int pos = 0; |
1706 | int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high; | 1725 | int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high; |
1707 | 1726 | ||
1727 | mutex_lock(&docg3->cascade->lock); | ||
1708 | protect = doc_register_readb(docg3, DOC_PROTECTION); | 1728 | protect = doc_register_readb(docg3, DOC_PROTECTION); |
1709 | dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS); | 1729 | dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS); |
1710 | dps0_low = doc_register_readw(docg3, DOC_DPS0_ADDRLOW); | 1730 | dps0_low = doc_register_readw(docg3, DOC_DPS0_ADDRLOW); |
@@ -1712,6 +1732,7 @@ static int dbg_protection_show(struct seq_file *s, void *p) | |||
1712 | dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS); | 1732 | dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS); |
1713 | dps1_low = doc_register_readw(docg3, DOC_DPS1_ADDRLOW); | 1733 | dps1_low = doc_register_readw(docg3, DOC_DPS1_ADDRLOW); |
1714 | dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH); | 1734 | dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH); |
1735 | mutex_unlock(&docg3->cascade->lock); | ||
1715 | 1736 | ||
1716 | pos += seq_printf(s, "Protection = 0x%02x (", | 1737 | pos += seq_printf(s, "Protection = 0x%02x (", |
1717 | protect); | 1738 | protect); |
@@ -1804,7 +1825,7 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd) | |||
1804 | 1825 | ||
1805 | switch (chip_id) { | 1826 | switch (chip_id) { |
1806 | case DOC_CHIPID_G3: | 1827 | case DOC_CHIPID_G3: |
1807 | mtd->name = kasprintf(GFP_KERNEL, "DiskOnChip G3 floor %d", | 1828 | mtd->name = kasprintf(GFP_KERNEL, "docg3.%d", |
1808 | docg3->device_id); | 1829 | docg3->device_id); |
1809 | docg3->max_block = 2047; | 1830 | docg3->max_block = 2047; |
1810 | break; | 1831 | break; |
@@ -1817,16 +1838,17 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd) | |||
1817 | mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES; | 1838 | mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES; |
1818 | if (docg3->reliable == 2) | 1839 | if (docg3->reliable == 2) |
1819 | mtd->erasesize /= 2; | 1840 | mtd->erasesize /= 2; |
1820 | mtd->writesize = DOC_LAYOUT_PAGE_SIZE; | 1841 | mtd->writebufsize = mtd->writesize = DOC_LAYOUT_PAGE_SIZE; |
1821 | mtd->oobsize = DOC_LAYOUT_OOB_SIZE; | 1842 | mtd->oobsize = DOC_LAYOUT_OOB_SIZE; |
1822 | mtd->owner = THIS_MODULE; | 1843 | mtd->owner = THIS_MODULE; |
1823 | mtd->erase = doc_erase; | 1844 | mtd->_erase = doc_erase; |
1824 | mtd->read = doc_read; | 1845 | mtd->_read = doc_read; |
1825 | mtd->write = doc_write; | 1846 | mtd->_write = doc_write; |
1826 | mtd->read_oob = doc_read_oob; | 1847 | mtd->_read_oob = doc_read_oob; |
1827 | mtd->write_oob = doc_write_oob; | 1848 | mtd->_write_oob = doc_write_oob; |
1828 | mtd->block_isbad = doc_block_isbad; | 1849 | mtd->_block_isbad = doc_block_isbad; |
1829 | mtd->ecclayout = &docg3_oobinfo; | 1850 | mtd->ecclayout = &docg3_oobinfo; |
1851 | mtd->ecc_strength = DOC_ECC_BCH_T; | ||
1830 | } | 1852 | } |
1831 | 1853 | ||
1832 | /** | 1854 | /** |
@@ -1834,6 +1856,7 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd) | |||
1834 | * @base: the io space where the device is probed | 1856 | * @base: the io space where the device is probed |
1835 | * @floor: the floor of the probed device | 1857 | * @floor: the floor of the probed device |
1836 | * @dev: the device | 1858 | * @dev: the device |
1859 | * @cascade: the cascade of chips this devices will belong to | ||
1837 | * | 1860 | * |
1838 | * Checks whether a device at the specified IO range, and floor is available. | 1861 | * Checks whether a device at the specified IO range, and floor is available. |
1839 | * | 1862 | * |
@@ -1841,8 +1864,8 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd) | |||
1841 | * if a memory allocation failed. If floor 0 is checked, a reset of the ASIC is | 1864 | * if a memory allocation failed. If floor 0 is checked, a reset of the ASIC is |
1842 | * launched. | 1865 | * launched. |
1843 | */ | 1866 | */ |
1844 | static struct mtd_info *doc_probe_device(void __iomem *base, int floor, | 1867 | static struct mtd_info * __init |
1845 | struct device *dev) | 1868 | doc_probe_device(struct docg3_cascade *cascade, int floor, struct device *dev) |
1846 | { | 1869 | { |
1847 | int ret, bbt_nbpages; | 1870 | int ret, bbt_nbpages; |
1848 | u16 chip_id, chip_id_inv; | 1871 | u16 chip_id, chip_id_inv; |
@@ -1865,7 +1888,7 @@ static struct mtd_info *doc_probe_device(void __iomem *base, int floor, | |||
1865 | 1888 | ||
1866 | docg3->dev = dev; | 1889 | docg3->dev = dev; |
1867 | docg3->device_id = floor; | 1890 | docg3->device_id = floor; |
1868 | docg3->base = base; | 1891 | docg3->cascade = cascade; |
1869 | doc_set_device_id(docg3, docg3->device_id); | 1892 | doc_set_device_id(docg3, docg3->device_id); |
1870 | if (!floor) | 1893 | if (!floor) |
1871 | doc_set_asic_mode(docg3, DOC_ASICMODE_RESET); | 1894 | doc_set_asic_mode(docg3, DOC_ASICMODE_RESET); |
@@ -1882,7 +1905,7 @@ static struct mtd_info *doc_probe_device(void __iomem *base, int floor, | |||
1882 | switch (chip_id) { | 1905 | switch (chip_id) { |
1883 | case DOC_CHIPID_G3: | 1906 | case DOC_CHIPID_G3: |
1884 | doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n", | 1907 | doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n", |
1885 | base, floor); | 1908 | docg3->cascade->base, floor); |
1886 | break; | 1909 | break; |
1887 | default: | 1910 | default: |
1888 | doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id); | 1911 | doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id); |
@@ -1927,10 +1950,12 @@ static void doc_release_device(struct mtd_info *mtd) | |||
1927 | static int docg3_resume(struct platform_device *pdev) | 1950 | static int docg3_resume(struct platform_device *pdev) |
1928 | { | 1951 | { |
1929 | int i; | 1952 | int i; |
1953 | struct docg3_cascade *cascade; | ||
1930 | struct mtd_info **docg3_floors, *mtd; | 1954 | struct mtd_info **docg3_floors, *mtd; |
1931 | struct docg3 *docg3; | 1955 | struct docg3 *docg3; |
1932 | 1956 | ||
1933 | docg3_floors = platform_get_drvdata(pdev); | 1957 | cascade = platform_get_drvdata(pdev); |
1958 | docg3_floors = cascade->floors; | ||
1934 | mtd = docg3_floors[0]; | 1959 | mtd = docg3_floors[0]; |
1935 | docg3 = mtd->priv; | 1960 | docg3 = mtd->priv; |
1936 | 1961 | ||
@@ -1952,11 +1977,13 @@ static int docg3_resume(struct platform_device *pdev) | |||
1952 | static int docg3_suspend(struct platform_device *pdev, pm_message_t state) | 1977 | static int docg3_suspend(struct platform_device *pdev, pm_message_t state) |
1953 | { | 1978 | { |
1954 | int floor, i; | 1979 | int floor, i; |
1980 | struct docg3_cascade *cascade; | ||
1955 | struct mtd_info **docg3_floors, *mtd; | 1981 | struct mtd_info **docg3_floors, *mtd; |
1956 | struct docg3 *docg3; | 1982 | struct docg3 *docg3; |
1957 | u8 ctrl, pwr_down; | 1983 | u8 ctrl, pwr_down; |
1958 | 1984 | ||
1959 | docg3_floors = platform_get_drvdata(pdev); | 1985 | cascade = platform_get_drvdata(pdev); |
1986 | docg3_floors = cascade->floors; | ||
1960 | for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) { | 1987 | for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) { |
1961 | mtd = docg3_floors[floor]; | 1988 | mtd = docg3_floors[floor]; |
1962 | if (!mtd) | 1989 | if (!mtd) |
@@ -2006,7 +2033,7 @@ static int __init docg3_probe(struct platform_device *pdev) | |||
2006 | struct resource *ress; | 2033 | struct resource *ress; |
2007 | void __iomem *base; | 2034 | void __iomem *base; |
2008 | int ret, floor, found = 0; | 2035 | int ret, floor, found = 0; |
2009 | struct mtd_info **docg3_floors; | 2036 | struct docg3_cascade *cascade; |
2010 | 2037 | ||
2011 | ret = -ENXIO; | 2038 | ret = -ENXIO; |
2012 | ress = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 2039 | ress = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
@@ -2017,17 +2044,19 @@ static int __init docg3_probe(struct platform_device *pdev) | |||
2017 | base = ioremap(ress->start, DOC_IOSPACE_SIZE); | 2044 | base = ioremap(ress->start, DOC_IOSPACE_SIZE); |
2018 | 2045 | ||
2019 | ret = -ENOMEM; | 2046 | ret = -ENOMEM; |
2020 | docg3_floors = kzalloc(sizeof(*docg3_floors) * DOC_MAX_NBFLOORS, | 2047 | cascade = kzalloc(sizeof(*cascade) * DOC_MAX_NBFLOORS, |
2021 | GFP_KERNEL); | 2048 | GFP_KERNEL); |
2022 | if (!docg3_floors) | 2049 | if (!cascade) |
2023 | goto nomem1; | 2050 | goto nomem1; |
2024 | docg3_bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T, | 2051 | cascade->base = base; |
2052 | mutex_init(&cascade->lock); | ||
2053 | cascade->bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T, | ||
2025 | DOC_ECC_BCH_PRIMPOLY); | 2054 | DOC_ECC_BCH_PRIMPOLY); |
2026 | if (!docg3_bch) | 2055 | if (!cascade->bch) |
2027 | goto nomem2; | 2056 | goto nomem2; |
2028 | 2057 | ||
2029 | for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) { | 2058 | for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) { |
2030 | mtd = doc_probe_device(base, floor, dev); | 2059 | mtd = doc_probe_device(cascade, floor, dev); |
2031 | if (IS_ERR(mtd)) { | 2060 | if (IS_ERR(mtd)) { |
2032 | ret = PTR_ERR(mtd); | 2061 | ret = PTR_ERR(mtd); |
2033 | goto err_probe; | 2062 | goto err_probe; |
@@ -2038,7 +2067,7 @@ static int __init docg3_probe(struct platform_device *pdev) | |||
2038 | else | 2067 | else |
2039 | continue; | 2068 | continue; |
2040 | } | 2069 | } |
2041 | docg3_floors[floor] = mtd; | 2070 | cascade->floors[floor] = mtd; |
2042 | ret = mtd_device_parse_register(mtd, part_probes, NULL, NULL, | 2071 | ret = mtd_device_parse_register(mtd, part_probes, NULL, NULL, |
2043 | 0); | 2072 | 0); |
2044 | if (ret) | 2073 | if (ret) |
@@ -2046,26 +2075,26 @@ static int __init docg3_probe(struct platform_device *pdev) | |||
2046 | found++; | 2075 | found++; |
2047 | } | 2076 | } |
2048 | 2077 | ||
2049 | ret = doc_register_sysfs(pdev, docg3_floors); | 2078 | ret = doc_register_sysfs(pdev, cascade); |
2050 | if (ret) | 2079 | if (ret) |
2051 | goto err_probe; | 2080 | goto err_probe; |
2052 | if (!found) | 2081 | if (!found) |
2053 | goto notfound; | 2082 | goto notfound; |
2054 | 2083 | ||
2055 | platform_set_drvdata(pdev, docg3_floors); | 2084 | platform_set_drvdata(pdev, cascade); |
2056 | doc_dbg_register(docg3_floors[0]->priv); | 2085 | doc_dbg_register(cascade->floors[0]->priv); |
2057 | return 0; | 2086 | return 0; |
2058 | 2087 | ||
2059 | notfound: | 2088 | notfound: |
2060 | ret = -ENODEV; | 2089 | ret = -ENODEV; |
2061 | dev_info(dev, "No supported DiskOnChip found\n"); | 2090 | dev_info(dev, "No supported DiskOnChip found\n"); |
2062 | err_probe: | 2091 | err_probe: |
2063 | free_bch(docg3_bch); | 2092 | kfree(cascade->bch); |
2064 | for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) | 2093 | for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) |
2065 | if (docg3_floors[floor]) | 2094 | if (cascade->floors[floor]) |
2066 | doc_release_device(docg3_floors[floor]); | 2095 | doc_release_device(cascade->floors[floor]); |
2067 | nomem2: | 2096 | nomem2: |
2068 | kfree(docg3_floors); | 2097 | kfree(cascade); |
2069 | nomem1: | 2098 | nomem1: |
2070 | iounmap(base); | 2099 | iounmap(base); |
2071 | noress: | 2100 | noress: |
@@ -2080,19 +2109,19 @@ noress: | |||
2080 | */ | 2109 | */ |
2081 | static int __exit docg3_release(struct platform_device *pdev) | 2110 | static int __exit docg3_release(struct platform_device *pdev) |
2082 | { | 2111 | { |
2083 | struct mtd_info **docg3_floors = platform_get_drvdata(pdev); | 2112 | struct docg3_cascade *cascade = platform_get_drvdata(pdev); |
2084 | struct docg3 *docg3 = docg3_floors[0]->priv; | 2113 | struct docg3 *docg3 = cascade->floors[0]->priv; |
2085 | void __iomem *base = docg3->base; | 2114 | void __iomem *base = cascade->base; |
2086 | int floor; | 2115 | int floor; |
2087 | 2116 | ||
2088 | doc_unregister_sysfs(pdev, docg3_floors); | 2117 | doc_unregister_sysfs(pdev, cascade); |
2089 | doc_dbg_unregister(docg3); | 2118 | doc_dbg_unregister(docg3); |
2090 | for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) | 2119 | for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) |
2091 | if (docg3_floors[floor]) | 2120 | if (cascade->floors[floor]) |
2092 | doc_release_device(docg3_floors[floor]); | 2121 | doc_release_device(cascade->floors[floor]); |
2093 | 2122 | ||
2094 | kfree(docg3_floors); | 2123 | free_bch(docg3->cascade->bch); |
2095 | free_bch(docg3_bch); | 2124 | kfree(cascade); |
2096 | iounmap(base); | 2125 | iounmap(base); |
2097 | return 0; | 2126 | return 0; |
2098 | } | 2127 | } |
diff --git a/drivers/mtd/devices/docg3.h b/drivers/mtd/devices/docg3.h index db0da436b493..19fb93f96a3a 100644 --- a/drivers/mtd/devices/docg3.h +++ b/drivers/mtd/devices/docg3.h | |||
@@ -22,6 +22,8 @@ | |||
22 | #ifndef _MTD_DOCG3_H | 22 | #ifndef _MTD_DOCG3_H |
23 | #define _MTD_DOCG3_H | 23 | #define _MTD_DOCG3_H |
24 | 24 | ||
25 | #include <linux/mtd/mtd.h> | ||
26 | |||
25 | /* | 27 | /* |
26 | * Flash memory areas : | 28 | * Flash memory areas : |
27 | * - 0x0000 .. 0x07ff : IPL | 29 | * - 0x0000 .. 0x07ff : IPL |
@@ -267,9 +269,23 @@ | |||
267 | #define DOC_LAYOUT_DPS_KEY_LENGTH 8 | 269 | #define DOC_LAYOUT_DPS_KEY_LENGTH 8 |
268 | 270 | ||
269 | /** | 271 | /** |
272 | * struct docg3_cascade - Cascade of 1 to 4 docg3 chips | ||
273 | * @floors: floors (ie. one physical docg3 chip is one floor) | ||
274 | * @base: IO space to access all chips in the cascade | ||
275 | * @bch: the BCH correcting control structure | ||
276 | * @lock: lock to protect docg3 IO space from concurrent accesses | ||
277 | */ | ||
278 | struct docg3_cascade { | ||
279 | struct mtd_info *floors[DOC_MAX_NBFLOORS]; | ||
280 | void __iomem *base; | ||
281 | struct bch_control *bch; | ||
282 | struct mutex lock; | ||
283 | }; | ||
284 | |||
285 | /** | ||
270 | * struct docg3 - DiskOnChip driver private data | 286 | * struct docg3 - DiskOnChip driver private data |
271 | * @dev: the device currently under control | 287 | * @dev: the device currently under control |
272 | * @base: mapped IO space | 288 | * @cascade: the cascade this device belongs to |
273 | * @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3) | 289 | * @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3) |
274 | * @if_cfg: if true, reads are on 16bits, else reads are on 8bits | 290 | * @if_cfg: if true, reads are on 16bits, else reads are on 8bits |
275 | 291 | ||
@@ -287,7 +303,7 @@ | |||
287 | */ | 303 | */ |
288 | struct docg3 { | 304 | struct docg3 { |
289 | struct device *dev; | 305 | struct device *dev; |
290 | void __iomem *base; | 306 | struct docg3_cascade *cascade; |
291 | unsigned int device_id:4; | 307 | unsigned int device_id:4; |
292 | unsigned int if_cfg:1; | 308 | unsigned int if_cfg:1; |
293 | unsigned int reliable:2; | 309 | unsigned int reliable:2; |
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c index 3a11ea628e58..82bd00af5cc3 100644 --- a/drivers/mtd/devices/lart.c +++ b/drivers/mtd/devices/lart.c | |||
@@ -367,9 +367,6 @@ static int flash_erase (struct mtd_info *mtd,struct erase_info *instr) | |||
367 | printk (KERN_DEBUG "%s(addr = 0x%.8x, len = %d)\n", __func__, instr->addr, instr->len); | 367 | printk (KERN_DEBUG "%s(addr = 0x%.8x, len = %d)\n", __func__, instr->addr, instr->len); |
368 | #endif | 368 | #endif |
369 | 369 | ||
370 | /* sanity checks */ | ||
371 | if (instr->addr + instr->len > mtd->size) return (-EINVAL); | ||
372 | |||
373 | /* | 370 | /* |
374 | * check that both start and end of the requested erase are | 371 | * check that both start and end of the requested erase are |
375 | * aligned with the erasesize at the appropriate addresses. | 372 | * aligned with the erasesize at the appropriate addresses. |
@@ -440,10 +437,6 @@ static int flash_read (struct mtd_info *mtd,loff_t from,size_t len,size_t *retle | |||
440 | printk (KERN_DEBUG "%s(from = 0x%.8x, len = %d)\n", __func__, (__u32)from, len); | 437 | printk (KERN_DEBUG "%s(from = 0x%.8x, len = %d)\n", __func__, (__u32)from, len); |
441 | #endif | 438 | #endif |
442 | 439 | ||
443 | /* sanity checks */ | ||
444 | if (!len) return (0); | ||
445 | if (from + len > mtd->size) return (-EINVAL); | ||
446 | |||
447 | /* we always read len bytes */ | 440 | /* we always read len bytes */ |
448 | *retlen = len; | 441 | *retlen = len; |
449 | 442 | ||
@@ -522,11 +515,8 @@ static int flash_write (struct mtd_info *mtd,loff_t to,size_t len,size_t *retlen | |||
522 | printk (KERN_DEBUG "%s(to = 0x%.8x, len = %d)\n", __func__, (__u32)to, len); | 515 | printk (KERN_DEBUG "%s(to = 0x%.8x, len = %d)\n", __func__, (__u32)to, len); |
523 | #endif | 516 | #endif |
524 | 517 | ||
525 | *retlen = 0; | ||
526 | |||
527 | /* sanity checks */ | 518 | /* sanity checks */ |
528 | if (!len) return (0); | 519 | if (!len) return (0); |
529 | if (to + len > mtd->size) return (-EINVAL); | ||
530 | 520 | ||
531 | /* first, we write a 0xFF.... padded byte until we reach a dword boundary */ | 521 | /* first, we write a 0xFF.... padded byte until we reach a dword boundary */ |
532 | if (to & (BUSWIDTH - 1)) | 522 | if (to & (BUSWIDTH - 1)) |
@@ -630,14 +620,15 @@ static int __init lart_flash_init (void) | |||
630 | mtd.name = module_name; | 620 | mtd.name = module_name; |
631 | mtd.type = MTD_NORFLASH; | 621 | mtd.type = MTD_NORFLASH; |
632 | mtd.writesize = 1; | 622 | mtd.writesize = 1; |
623 | mtd.writebufsize = 4; | ||
633 | mtd.flags = MTD_CAP_NORFLASH; | 624 | mtd.flags = MTD_CAP_NORFLASH; |
634 | mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN; | 625 | mtd.size = FLASH_BLOCKSIZE_PARAM * FLASH_NUMBLOCKS_16m_PARAM + FLASH_BLOCKSIZE_MAIN * FLASH_NUMBLOCKS_16m_MAIN; |
635 | mtd.erasesize = FLASH_BLOCKSIZE_MAIN; | 626 | mtd.erasesize = FLASH_BLOCKSIZE_MAIN; |
636 | mtd.numeraseregions = ARRAY_SIZE(erase_regions); | 627 | mtd.numeraseregions = ARRAY_SIZE(erase_regions); |
637 | mtd.eraseregions = erase_regions; | 628 | mtd.eraseregions = erase_regions; |
638 | mtd.erase = flash_erase; | 629 | mtd._erase = flash_erase; |
639 | mtd.read = flash_read; | 630 | mtd._read = flash_read; |
640 | mtd.write = flash_write; | 631 | mtd._write = flash_write; |
641 | mtd.owner = THIS_MODULE; | 632 | mtd.owner = THIS_MODULE; |
642 | 633 | ||
643 | #ifdef LART_DEBUG | 634 | #ifdef LART_DEBUG |
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 7c60dddbefc0..1924d247c1cb 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c | |||
@@ -288,9 +288,6 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
288 | __func__, (long long)instr->addr, | 288 | __func__, (long long)instr->addr, |
289 | (long long)instr->len); | 289 | (long long)instr->len); |
290 | 290 | ||
291 | /* sanity checks */ | ||
292 | if (instr->addr + instr->len > flash->mtd.size) | ||
293 | return -EINVAL; | ||
294 | div_u64_rem(instr->len, mtd->erasesize, &rem); | 291 | div_u64_rem(instr->len, mtd->erasesize, &rem); |
295 | if (rem) | 292 | if (rem) |
296 | return -EINVAL; | 293 | return -EINVAL; |
@@ -349,13 +346,6 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
349 | pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev), | 346 | pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev), |
350 | __func__, (u32)from, len); | 347 | __func__, (u32)from, len); |
351 | 348 | ||
352 | /* sanity checks */ | ||
353 | if (!len) | ||
354 | return 0; | ||
355 | |||
356 | if (from + len > flash->mtd.size) | ||
357 | return -EINVAL; | ||
358 | |||
359 | spi_message_init(&m); | 349 | spi_message_init(&m); |
360 | memset(t, 0, (sizeof t)); | 350 | memset(t, 0, (sizeof t)); |
361 | 351 | ||
@@ -371,9 +361,6 @@ static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
371 | t[1].len = len; | 361 | t[1].len = len; |
372 | spi_message_add_tail(&t[1], &m); | 362 | spi_message_add_tail(&t[1], &m); |
373 | 363 | ||
374 | /* Byte count starts at zero. */ | ||
375 | *retlen = 0; | ||
376 | |||
377 | mutex_lock(&flash->lock); | 364 | mutex_lock(&flash->lock); |
378 | 365 | ||
379 | /* Wait till previous write/erase is done. */ | 366 | /* Wait till previous write/erase is done. */ |
@@ -417,15 +404,6 @@ static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
417 | pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), | 404 | pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), |
418 | __func__, (u32)to, len); | 405 | __func__, (u32)to, len); |
419 | 406 | ||
420 | *retlen = 0; | ||
421 | |||
422 | /* sanity checks */ | ||
423 | if (!len) | ||
424 | return(0); | ||
425 | |||
426 | if (to + len > flash->mtd.size) | ||
427 | return -EINVAL; | ||
428 | |||
429 | spi_message_init(&m); | 407 | spi_message_init(&m); |
430 | memset(t, 0, (sizeof t)); | 408 | memset(t, 0, (sizeof t)); |
431 | 409 | ||
@@ -509,15 +487,6 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
509 | pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), | 487 | pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), |
510 | __func__, (u32)to, len); | 488 | __func__, (u32)to, len); |
511 | 489 | ||
512 | *retlen = 0; | ||
513 | |||
514 | /* sanity checks */ | ||
515 | if (!len) | ||
516 | return 0; | ||
517 | |||
518 | if (to + len > flash->mtd.size) | ||
519 | return -EINVAL; | ||
520 | |||
521 | spi_message_init(&m); | 490 | spi_message_init(&m); |
522 | memset(t, 0, (sizeof t)); | 491 | memset(t, 0, (sizeof t)); |
523 | 492 | ||
@@ -908,14 +877,14 @@ static int __devinit m25p_probe(struct spi_device *spi) | |||
908 | flash->mtd.writesize = 1; | 877 | flash->mtd.writesize = 1; |
909 | flash->mtd.flags = MTD_CAP_NORFLASH; | 878 | flash->mtd.flags = MTD_CAP_NORFLASH; |
910 | flash->mtd.size = info->sector_size * info->n_sectors; | 879 | flash->mtd.size = info->sector_size * info->n_sectors; |
911 | flash->mtd.erase = m25p80_erase; | 880 | flash->mtd._erase = m25p80_erase; |
912 | flash->mtd.read = m25p80_read; | 881 | flash->mtd._read = m25p80_read; |
913 | 882 | ||
914 | /* sst flash chips use AAI word program */ | 883 | /* sst flash chips use AAI word program */ |
915 | if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) | 884 | if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) |
916 | flash->mtd.write = sst_write; | 885 | flash->mtd._write = sst_write; |
917 | else | 886 | else |
918 | flash->mtd.write = m25p80_write; | 887 | flash->mtd._write = m25p80_write; |
919 | 888 | ||
920 | /* prefer "small sector" erase if possible */ | 889 | /* prefer "small sector" erase if possible */ |
921 | if (info->flags & SECT_4K) { | 890 | if (info->flags & SECT_4K) { |
@@ -932,6 +901,7 @@ static int __devinit m25p_probe(struct spi_device *spi) | |||
932 | ppdata.of_node = spi->dev.of_node; | 901 | ppdata.of_node = spi->dev.of_node; |
933 | flash->mtd.dev.parent = &spi->dev; | 902 | flash->mtd.dev.parent = &spi->dev; |
934 | flash->page_size = info->page_size; | 903 | flash->page_size = info->page_size; |
904 | flash->mtd.writebufsize = flash->page_size; | ||
935 | 905 | ||
936 | if (info->addr_width) | 906 | if (info->addr_width) |
937 | flash->addr_width = info->addr_width; | 907 | flash->addr_width = info->addr_width; |
@@ -1004,21 +974,7 @@ static struct spi_driver m25p80_driver = { | |||
1004 | */ | 974 | */ |
1005 | }; | 975 | }; |
1006 | 976 | ||
1007 | 977 | module_spi_driver(m25p80_driver); | |
1008 | static int __init m25p80_init(void) | ||
1009 | { | ||
1010 | return spi_register_driver(&m25p80_driver); | ||
1011 | } | ||
1012 | |||
1013 | |||
1014 | static void __exit m25p80_exit(void) | ||
1015 | { | ||
1016 | spi_unregister_driver(&m25p80_driver); | ||
1017 | } | ||
1018 | |||
1019 | |||
1020 | module_init(m25p80_init); | ||
1021 | module_exit(m25p80_exit); | ||
1022 | 978 | ||
1023 | MODULE_LICENSE("GPL"); | 979 | MODULE_LICENSE("GPL"); |
1024 | MODULE_AUTHOR("Mike Lavender"); | 980 | MODULE_AUTHOR("Mike Lavender"); |
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c index 8423fb6d4f26..182849d39c61 100644 --- a/drivers/mtd/devices/ms02-nv.c +++ b/drivers/mtd/devices/ms02-nv.c | |||
@@ -59,12 +59,8 @@ static int ms02nv_read(struct mtd_info *mtd, loff_t from, | |||
59 | { | 59 | { |
60 | struct ms02nv_private *mp = mtd->priv; | 60 | struct ms02nv_private *mp = mtd->priv; |
61 | 61 | ||
62 | if (from + len > mtd->size) | ||
63 | return -EINVAL; | ||
64 | |||
65 | memcpy(buf, mp->uaddr + from, len); | 62 | memcpy(buf, mp->uaddr + from, len); |
66 | *retlen = len; | 63 | *retlen = len; |
67 | |||
68 | return 0; | 64 | return 0; |
69 | } | 65 | } |
70 | 66 | ||
@@ -73,12 +69,8 @@ static int ms02nv_write(struct mtd_info *mtd, loff_t to, | |||
73 | { | 69 | { |
74 | struct ms02nv_private *mp = mtd->priv; | 70 | struct ms02nv_private *mp = mtd->priv; |
75 | 71 | ||
76 | if (to + len > mtd->size) | ||
77 | return -EINVAL; | ||
78 | |||
79 | memcpy(mp->uaddr + to, buf, len); | 72 | memcpy(mp->uaddr + to, buf, len); |
80 | *retlen = len; | 73 | *retlen = len; |
81 | |||
82 | return 0; | 74 | return 0; |
83 | } | 75 | } |
84 | 76 | ||
@@ -215,8 +207,8 @@ static int __init ms02nv_init_one(ulong addr) | |||
215 | mtd->size = fixsize; | 207 | mtd->size = fixsize; |
216 | mtd->name = (char *)ms02nv_name; | 208 | mtd->name = (char *)ms02nv_name; |
217 | mtd->owner = THIS_MODULE; | 209 | mtd->owner = THIS_MODULE; |
218 | mtd->read = ms02nv_read; | 210 | mtd->_read = ms02nv_read; |
219 | mtd->write = ms02nv_write; | 211 | mtd->_write = ms02nv_write; |
220 | mtd->writesize = 1; | 212 | mtd->writesize = 1; |
221 | 213 | ||
222 | ret = -EIO; | 214 | ret = -EIO; |
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 236057ead0d2..928fb0e6d73a 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c | |||
@@ -164,9 +164,6 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
164 | dev_name(&spi->dev), (long long)instr->addr, | 164 | dev_name(&spi->dev), (long long)instr->addr, |
165 | (long long)instr->len); | 165 | (long long)instr->len); |
166 | 166 | ||
167 | /* Sanity checks */ | ||
168 | if (instr->addr + instr->len > mtd->size) | ||
169 | return -EINVAL; | ||
170 | div_u64_rem(instr->len, priv->page_size, &rem); | 167 | div_u64_rem(instr->len, priv->page_size, &rem); |
171 | if (rem) | 168 | if (rem) |
172 | return -EINVAL; | 169 | return -EINVAL; |
@@ -252,14 +249,6 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
252 | pr_debug("%s: read 0x%x..0x%x\n", dev_name(&priv->spi->dev), | 249 | pr_debug("%s: read 0x%x..0x%x\n", dev_name(&priv->spi->dev), |
253 | (unsigned)from, (unsigned)(from + len)); | 250 | (unsigned)from, (unsigned)(from + len)); |
254 | 251 | ||
255 | *retlen = 0; | ||
256 | |||
257 | /* Sanity checks */ | ||
258 | if (!len) | ||
259 | return 0; | ||
260 | if (from + len > mtd->size) | ||
261 | return -EINVAL; | ||
262 | |||
263 | /* Calculate flash page/byte address */ | 252 | /* Calculate flash page/byte address */ |
264 | addr = (((unsigned)from / priv->page_size) << priv->page_offset) | 253 | addr = (((unsigned)from / priv->page_size) << priv->page_offset) |
265 | + ((unsigned)from % priv->page_size); | 254 | + ((unsigned)from % priv->page_size); |
@@ -328,14 +317,6 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
328 | pr_debug("%s: write 0x%x..0x%x\n", | 317 | pr_debug("%s: write 0x%x..0x%x\n", |
329 | dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len)); | 318 | dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len)); |
330 | 319 | ||
331 | *retlen = 0; | ||
332 | |||
333 | /* Sanity checks */ | ||
334 | if (!len) | ||
335 | return 0; | ||
336 | if ((to + len) > mtd->size) | ||
337 | return -EINVAL; | ||
338 | |||
339 | spi_message_init(&msg); | 320 | spi_message_init(&msg); |
340 | 321 | ||
341 | x[0].tx_buf = command = priv->command; | 322 | x[0].tx_buf = command = priv->command; |
@@ -490,8 +471,6 @@ static ssize_t otp_read(struct spi_device *spi, unsigned base, | |||
490 | 471 | ||
491 | if ((off + len) > 64) | 472 | if ((off + len) > 64) |
492 | len = 64 - off; | 473 | len = 64 - off; |
493 | if (len == 0) | ||
494 | return len; | ||
495 | 474 | ||
496 | spi_message_init(&m); | 475 | spi_message_init(&m); |
497 | 476 | ||
@@ -611,16 +590,16 @@ static int dataflash_write_user_otp(struct mtd_info *mtd, | |||
611 | 590 | ||
612 | static char *otp_setup(struct mtd_info *device, char revision) | 591 | static char *otp_setup(struct mtd_info *device, char revision) |
613 | { | 592 | { |
614 | device->get_fact_prot_info = dataflash_get_otp_info; | 593 | device->_get_fact_prot_info = dataflash_get_otp_info; |
615 | device->read_fact_prot_reg = dataflash_read_fact_otp; | 594 | device->_read_fact_prot_reg = dataflash_read_fact_otp; |
616 | device->get_user_prot_info = dataflash_get_otp_info; | 595 | device->_get_user_prot_info = dataflash_get_otp_info; |
617 | device->read_user_prot_reg = dataflash_read_user_otp; | 596 | device->_read_user_prot_reg = dataflash_read_user_otp; |
618 | 597 | ||
619 | /* rev c parts (at45db321c and at45db1281 only!) use a | 598 | /* rev c parts (at45db321c and at45db1281 only!) use a |
620 | * different write procedure; not (yet?) implemented. | 599 | * different write procedure; not (yet?) implemented. |
621 | */ | 600 | */ |
622 | if (revision > 'c') | 601 | if (revision > 'c') |
623 | device->write_user_prot_reg = dataflash_write_user_otp; | 602 | device->_write_user_prot_reg = dataflash_write_user_otp; |
624 | 603 | ||
625 | return ", OTP"; | 604 | return ", OTP"; |
626 | } | 605 | } |
@@ -672,9 +651,9 @@ add_dataflash_otp(struct spi_device *spi, char *name, | |||
672 | device->owner = THIS_MODULE; | 651 | device->owner = THIS_MODULE; |
673 | device->type = MTD_DATAFLASH; | 652 | device->type = MTD_DATAFLASH; |
674 | device->flags = MTD_WRITEABLE; | 653 | device->flags = MTD_WRITEABLE; |
675 | device->erase = dataflash_erase; | 654 | device->_erase = dataflash_erase; |
676 | device->read = dataflash_read; | 655 | device->_read = dataflash_read; |
677 | device->write = dataflash_write; | 656 | device->_write = dataflash_write; |
678 | device->priv = priv; | 657 | device->priv = priv; |
679 | 658 | ||
680 | device->dev.parent = &spi->dev; | 659 | device->dev.parent = &spi->dev; |
@@ -946,18 +925,7 @@ static struct spi_driver dataflash_driver = { | |||
946 | /* FIXME: investigate suspend and resume... */ | 925 | /* FIXME: investigate suspend and resume... */ |
947 | }; | 926 | }; |
948 | 927 | ||
949 | static int __init dataflash_init(void) | 928 | module_spi_driver(dataflash_driver); |
950 | { | ||
951 | return spi_register_driver(&dataflash_driver); | ||
952 | } | ||
953 | module_init(dataflash_init); | ||
954 | |||
955 | static void __exit dataflash_exit(void) | ||
956 | { | ||
957 | spi_unregister_driver(&dataflash_driver); | ||
958 | } | ||
959 | module_exit(dataflash_exit); | ||
960 | |||
961 | 929 | ||
962 | MODULE_LICENSE("GPL"); | 930 | MODULE_LICENSE("GPL"); |
963 | MODULE_AUTHOR("Andrew Victor, David Brownell"); | 931 | MODULE_AUTHOR("Andrew Victor, David Brownell"); |
diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c index 2562689ba6b4..ec59d65897fb 100644 --- a/drivers/mtd/devices/mtdram.c +++ b/drivers/mtd/devices/mtdram.c | |||
@@ -34,34 +34,23 @@ static struct mtd_info *mtd_info; | |||
34 | 34 | ||
35 | static int ram_erase(struct mtd_info *mtd, struct erase_info *instr) | 35 | static int ram_erase(struct mtd_info *mtd, struct erase_info *instr) |
36 | { | 36 | { |
37 | if (instr->addr + instr->len > mtd->size) | ||
38 | return -EINVAL; | ||
39 | |||
40 | memset((char *)mtd->priv + instr->addr, 0xff, instr->len); | 37 | memset((char *)mtd->priv + instr->addr, 0xff, instr->len); |
41 | |||
42 | instr->state = MTD_ERASE_DONE; | 38 | instr->state = MTD_ERASE_DONE; |
43 | mtd_erase_callback(instr); | 39 | mtd_erase_callback(instr); |
44 | |||
45 | return 0; | 40 | return 0; |
46 | } | 41 | } |
47 | 42 | ||
48 | static int ram_point(struct mtd_info *mtd, loff_t from, size_t len, | 43 | static int ram_point(struct mtd_info *mtd, loff_t from, size_t len, |
49 | size_t *retlen, void **virt, resource_size_t *phys) | 44 | size_t *retlen, void **virt, resource_size_t *phys) |
50 | { | 45 | { |
51 | if (from + len > mtd->size) | ||
52 | return -EINVAL; | ||
53 | |||
54 | /* can we return a physical address with this driver? */ | ||
55 | if (phys) | ||
56 | return -EINVAL; | ||
57 | |||
58 | *virt = mtd->priv + from; | 46 | *virt = mtd->priv + from; |
59 | *retlen = len; | 47 | *retlen = len; |
60 | return 0; | 48 | return 0; |
61 | } | 49 | } |
62 | 50 | ||
63 | static void ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | 51 | static int ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) |
64 | { | 52 | { |
53 | return 0; | ||
65 | } | 54 | } |
66 | 55 | ||
67 | /* | 56 | /* |
@@ -80,11 +69,7 @@ static unsigned long ram_get_unmapped_area(struct mtd_info *mtd, | |||
80 | static int ram_read(struct mtd_info *mtd, loff_t from, size_t len, | 69 | static int ram_read(struct mtd_info *mtd, loff_t from, size_t len, |
81 | size_t *retlen, u_char *buf) | 70 | size_t *retlen, u_char *buf) |
82 | { | 71 | { |
83 | if (from + len > mtd->size) | ||
84 | return -EINVAL; | ||
85 | |||
86 | memcpy(buf, mtd->priv + from, len); | 72 | memcpy(buf, mtd->priv + from, len); |
87 | |||
88 | *retlen = len; | 73 | *retlen = len; |
89 | return 0; | 74 | return 0; |
90 | } | 75 | } |
@@ -92,11 +77,7 @@ static int ram_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
92 | static int ram_write(struct mtd_info *mtd, loff_t to, size_t len, | 77 | static int ram_write(struct mtd_info *mtd, loff_t to, size_t len, |
93 | size_t *retlen, const u_char *buf) | 78 | size_t *retlen, const u_char *buf) |
94 | { | 79 | { |
95 | if (to + len > mtd->size) | ||
96 | return -EINVAL; | ||
97 | |||
98 | memcpy((char *)mtd->priv + to, buf, len); | 80 | memcpy((char *)mtd->priv + to, buf, len); |
99 | |||
100 | *retlen = len; | 81 | *retlen = len; |
101 | return 0; | 82 | return 0; |
102 | } | 83 | } |
@@ -126,12 +107,12 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address, | |||
126 | mtd->priv = mapped_address; | 107 | mtd->priv = mapped_address; |
127 | 108 | ||
128 | mtd->owner = THIS_MODULE; | 109 | mtd->owner = THIS_MODULE; |
129 | mtd->erase = ram_erase; | 110 | mtd->_erase = ram_erase; |
130 | mtd->point = ram_point; | 111 | mtd->_point = ram_point; |
131 | mtd->unpoint = ram_unpoint; | 112 | mtd->_unpoint = ram_unpoint; |
132 | mtd->get_unmapped_area = ram_get_unmapped_area; | 113 | mtd->_get_unmapped_area = ram_get_unmapped_area; |
133 | mtd->read = ram_read; | 114 | mtd->_read = ram_read; |
134 | mtd->write = ram_write; | 115 | mtd->_write = ram_write; |
135 | 116 | ||
136 | if (mtd_device_register(mtd, NULL, 0)) | 117 | if (mtd_device_register(mtd, NULL, 0)) |
137 | return -EIO; | 118 | return -EIO; |
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c index 23423bd00b06..67823de68db6 100644 --- a/drivers/mtd/devices/phram.c +++ b/drivers/mtd/devices/phram.c | |||
@@ -33,45 +33,33 @@ struct phram_mtd_list { | |||
33 | 33 | ||
34 | static LIST_HEAD(phram_list); | 34 | static LIST_HEAD(phram_list); |
35 | 35 | ||
36 | |||
37 | static int phram_erase(struct mtd_info *mtd, struct erase_info *instr) | 36 | static int phram_erase(struct mtd_info *mtd, struct erase_info *instr) |
38 | { | 37 | { |
39 | u_char *start = mtd->priv; | 38 | u_char *start = mtd->priv; |
40 | 39 | ||
41 | if (instr->addr + instr->len > mtd->size) | ||
42 | return -EINVAL; | ||
43 | |||
44 | memset(start + instr->addr, 0xff, instr->len); | 40 | memset(start + instr->addr, 0xff, instr->len); |
45 | 41 | ||
46 | /* This'll catch a few races. Free the thing before returning :) | 42 | /* |
43 | * This'll catch a few races. Free the thing before returning :) | ||
47 | * I don't feel at all ashamed. This kind of thing is possible anyway | 44 | * I don't feel at all ashamed. This kind of thing is possible anyway |
48 | * with flash, but unlikely. | 45 | * with flash, but unlikely. |
49 | */ | 46 | */ |
50 | |||
51 | instr->state = MTD_ERASE_DONE; | 47 | instr->state = MTD_ERASE_DONE; |
52 | |||
53 | mtd_erase_callback(instr); | 48 | mtd_erase_callback(instr); |
54 | |||
55 | return 0; | 49 | return 0; |
56 | } | 50 | } |
57 | 51 | ||
58 | static int phram_point(struct mtd_info *mtd, loff_t from, size_t len, | 52 | static int phram_point(struct mtd_info *mtd, loff_t from, size_t len, |
59 | size_t *retlen, void **virt, resource_size_t *phys) | 53 | size_t *retlen, void **virt, resource_size_t *phys) |
60 | { | 54 | { |
61 | if (from + len > mtd->size) | ||
62 | return -EINVAL; | ||
63 | |||
64 | /* can we return a physical address with this driver? */ | ||
65 | if (phys) | ||
66 | return -EINVAL; | ||
67 | |||
68 | *virt = mtd->priv + from; | 55 | *virt = mtd->priv + from; |
69 | *retlen = len; | 56 | *retlen = len; |
70 | return 0; | 57 | return 0; |
71 | } | 58 | } |
72 | 59 | ||
73 | static void phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | 60 | static int phram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) |
74 | { | 61 | { |
62 | return 0; | ||
75 | } | 63 | } |
76 | 64 | ||
77 | static int phram_read(struct mtd_info *mtd, loff_t from, size_t len, | 65 | static int phram_read(struct mtd_info *mtd, loff_t from, size_t len, |
@@ -79,14 +67,7 @@ static int phram_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
79 | { | 67 | { |
80 | u_char *start = mtd->priv; | 68 | u_char *start = mtd->priv; |
81 | 69 | ||
82 | if (from >= mtd->size) | ||
83 | return -EINVAL; | ||
84 | |||
85 | if (len > mtd->size - from) | ||
86 | len = mtd->size - from; | ||
87 | |||
88 | memcpy(buf, start + from, len); | 70 | memcpy(buf, start + from, len); |
89 | |||
90 | *retlen = len; | 71 | *retlen = len; |
91 | return 0; | 72 | return 0; |
92 | } | 73 | } |
@@ -96,20 +77,11 @@ static int phram_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
96 | { | 77 | { |
97 | u_char *start = mtd->priv; | 78 | u_char *start = mtd->priv; |
98 | 79 | ||
99 | if (to >= mtd->size) | ||
100 | return -EINVAL; | ||
101 | |||
102 | if (len > mtd->size - to) | ||
103 | len = mtd->size - to; | ||
104 | |||
105 | memcpy(start + to, buf, len); | 80 | memcpy(start + to, buf, len); |
106 | |||
107 | *retlen = len; | 81 | *retlen = len; |
108 | return 0; | 82 | return 0; |
109 | } | 83 | } |
110 | 84 | ||
111 | |||
112 | |||
113 | static void unregister_devices(void) | 85 | static void unregister_devices(void) |
114 | { | 86 | { |
115 | struct phram_mtd_list *this, *safe; | 87 | struct phram_mtd_list *this, *safe; |
@@ -142,11 +114,11 @@ static int register_device(char *name, unsigned long start, unsigned long len) | |||
142 | new->mtd.name = name; | 114 | new->mtd.name = name; |
143 | new->mtd.size = len; | 115 | new->mtd.size = len; |
144 | new->mtd.flags = MTD_CAP_RAM; | 116 | new->mtd.flags = MTD_CAP_RAM; |
145 | new->mtd.erase = phram_erase; | 117 | new->mtd._erase = phram_erase; |
146 | new->mtd.point = phram_point; | 118 | new->mtd._point = phram_point; |
147 | new->mtd.unpoint = phram_unpoint; | 119 | new->mtd._unpoint = phram_unpoint; |
148 | new->mtd.read = phram_read; | 120 | new->mtd._read = phram_read; |
149 | new->mtd.write = phram_write; | 121 | new->mtd._write = phram_write; |
150 | new->mtd.owner = THIS_MODULE; | 122 | new->mtd.owner = THIS_MODULE; |
151 | new->mtd.type = MTD_RAM; | 123 | new->mtd.type = MTD_RAM; |
152 | new->mtd.erasesize = PAGE_SIZE; | 124 | new->mtd.erasesize = PAGE_SIZE; |
@@ -233,7 +205,17 @@ static inline void kill_final_newline(char *str) | |||
233 | return 1; \ | 205 | return 1; \ |
234 | } while (0) | 206 | } while (0) |
235 | 207 | ||
236 | static int phram_setup(const char *val, struct kernel_param *kp) | 208 | /* |
209 | * This shall contain the module parameter if any. It is of the form: | ||
210 | * - phram=<device>,<address>,<size> for module case | ||
211 | * - phram.phram=<device>,<address>,<size> for built-in case | ||
212 | * We leave 64 bytes for the device name, 12 for the address and 12 for the | ||
213 | * size. | ||
214 | * Example: phram.phram=rootfs,0xa0000000,512Mi | ||
215 | */ | ||
216 | static __initdata char phram_paramline[64+12+12]; | ||
217 | |||
218 | static int __init phram_setup(const char *val) | ||
237 | { | 219 | { |
238 | char buf[64+12+12], *str = buf; | 220 | char buf[64+12+12], *str = buf; |
239 | char *token[3]; | 221 | char *token[3]; |
@@ -282,12 +264,28 @@ static int phram_setup(const char *val, struct kernel_param *kp) | |||
282 | return ret; | 264 | return ret; |
283 | } | 265 | } |
284 | 266 | ||
285 | module_param_call(phram, phram_setup, NULL, NULL, 000); | 267 | static int __init phram_param_call(const char *val, struct kernel_param *kp) |
268 | { | ||
269 | /* | ||
270 | * This function is always called before 'init_phram()', whether | ||
271 | * built-in or module. | ||
272 | */ | ||
273 | if (strlen(val) >= sizeof(phram_paramline)) | ||
274 | return -ENOSPC; | ||
275 | strcpy(phram_paramline, val); | ||
276 | |||
277 | return 0; | ||
278 | } | ||
279 | |||
280 | module_param_call(phram, phram_param_call, NULL, NULL, 000); | ||
286 | MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>\""); | 281 | MODULE_PARM_DESC(phram, "Memory region to map. \"phram=<name>,<start>,<length>\""); |
287 | 282 | ||
288 | 283 | ||
289 | static int __init init_phram(void) | 284 | static int __init init_phram(void) |
290 | { | 285 | { |
286 | if (phram_paramline[0]) | ||
287 | return phram_setup(phram_paramline); | ||
288 | |||
291 | return 0; | 289 | return 0; |
292 | } | 290 | } |
293 | 291 | ||
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c index 5d53c5760a6c..0c51b988e1f8 100644 --- a/drivers/mtd/devices/pmc551.c +++ b/drivers/mtd/devices/pmc551.c | |||
@@ -94,12 +94,48 @@ | |||
94 | #include <linux/ioctl.h> | 94 | #include <linux/ioctl.h> |
95 | #include <asm/io.h> | 95 | #include <asm/io.h> |
96 | #include <linux/pci.h> | 96 | #include <linux/pci.h> |
97 | |||
98 | #include <linux/mtd/mtd.h> | 97 | #include <linux/mtd/mtd.h> |
99 | #include <linux/mtd/pmc551.h> | 98 | |
99 | #define PMC551_VERSION \ | ||
100 | "Ramix PMC551 PCI Mezzanine Ram Driver. (C) 1999,2000 Nortel Networks.\n" | ||
101 | |||
102 | #define PCI_VENDOR_ID_V3_SEMI 0x11b0 | ||
103 | #define PCI_DEVICE_ID_V3_SEMI_V370PDC 0x0200 | ||
104 | |||
105 | #define PMC551_PCI_MEM_MAP0 0x50 | ||
106 | #define PMC551_PCI_MEM_MAP1 0x54 | ||
107 | #define PMC551_PCI_MEM_MAP_MAP_ADDR_MASK 0x3ff00000 | ||
108 | #define PMC551_PCI_MEM_MAP_APERTURE_MASK 0x000000f0 | ||
109 | #define PMC551_PCI_MEM_MAP_REG_EN 0x00000002 | ||
110 | #define PMC551_PCI_MEM_MAP_ENABLE 0x00000001 | ||
111 | |||
112 | #define PMC551_SDRAM_MA 0x60 | ||
113 | #define PMC551_SDRAM_CMD 0x62 | ||
114 | #define PMC551_DRAM_CFG 0x64 | ||
115 | #define PMC551_SYS_CTRL_REG 0x78 | ||
116 | |||
117 | #define PMC551_DRAM_BLK0 0x68 | ||
118 | #define PMC551_DRAM_BLK1 0x6c | ||
119 | #define PMC551_DRAM_BLK2 0x70 | ||
120 | #define PMC551_DRAM_BLK3 0x74 | ||
121 | #define PMC551_DRAM_BLK_GET_SIZE(x) (524288 << ((x >> 4) & 0x0f)) | ||
122 | #define PMC551_DRAM_BLK_SET_COL_MUX(x, v) (((x) & ~0x00007000) | (((v) & 0x7) << 12)) | ||
123 | #define PMC551_DRAM_BLK_SET_ROW_MUX(x, v) (((x) & ~0x00000f00) | (((v) & 0xf) << 8)) | ||
124 | |||
125 | struct mypriv { | ||
126 | struct pci_dev *dev; | ||
127 | u_char *start; | ||
128 | u32 base_map0; | ||
129 | u32 curr_map0; | ||
130 | u32 asize; | ||
131 | struct mtd_info *nextpmc551; | ||
132 | }; | ||
100 | 133 | ||
101 | static struct mtd_info *pmc551list; | 134 | static struct mtd_info *pmc551list; |
102 | 135 | ||
136 | static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len, | ||
137 | size_t *retlen, void **virt, resource_size_t *phys); | ||
138 | |||
103 | static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr) | 139 | static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr) |
104 | { | 140 | { |
105 | struct mypriv *priv = mtd->priv; | 141 | struct mypriv *priv = mtd->priv; |
@@ -115,16 +151,6 @@ static int pmc551_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
115 | #endif | 151 | #endif |
116 | 152 | ||
117 | end = instr->addr + instr->len - 1; | 153 | end = instr->addr + instr->len - 1; |
118 | |||
119 | /* Is it past the end? */ | ||
120 | if (end > mtd->size) { | ||
121 | #ifdef CONFIG_MTD_PMC551_DEBUG | ||
122 | printk(KERN_DEBUG "pmc551_erase() out of bounds (%ld > %ld)\n", | ||
123 | (long)end, (long)mtd->size); | ||
124 | #endif | ||
125 | return -EINVAL; | ||
126 | } | ||
127 | |||
128 | eoff_hi = end & ~(priv->asize - 1); | 154 | eoff_hi = end & ~(priv->asize - 1); |
129 | soff_hi = instr->addr & ~(priv->asize - 1); | 155 | soff_hi = instr->addr & ~(priv->asize - 1); |
130 | eoff_lo = end & (priv->asize - 1); | 156 | eoff_lo = end & (priv->asize - 1); |
@@ -178,18 +204,6 @@ static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len, | |||
178 | printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len); | 204 | printk(KERN_DEBUG "pmc551_point(%ld, %ld)\n", (long)from, (long)len); |
179 | #endif | 205 | #endif |
180 | 206 | ||
181 | if (from + len > mtd->size) { | ||
182 | #ifdef CONFIG_MTD_PMC551_DEBUG | ||
183 | printk(KERN_DEBUG "pmc551_point() out of bounds (%ld > %ld)\n", | ||
184 | (long)from + len, (long)mtd->size); | ||
185 | #endif | ||
186 | return -EINVAL; | ||
187 | } | ||
188 | |||
189 | /* can we return a physical address with this driver? */ | ||
190 | if (phys) | ||
191 | return -EINVAL; | ||
192 | |||
193 | soff_hi = from & ~(priv->asize - 1); | 207 | soff_hi = from & ~(priv->asize - 1); |
194 | soff_lo = from & (priv->asize - 1); | 208 | soff_lo = from & (priv->asize - 1); |
195 | 209 | ||
@@ -205,11 +219,12 @@ static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len, | |||
205 | return 0; | 219 | return 0; |
206 | } | 220 | } |
207 | 221 | ||
208 | static void pmc551_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | 222 | static int pmc551_unpoint(struct mtd_info *mtd, loff_t from, size_t len) |
209 | { | 223 | { |
210 | #ifdef CONFIG_MTD_PMC551_DEBUG | 224 | #ifdef CONFIG_MTD_PMC551_DEBUG |
211 | printk(KERN_DEBUG "pmc551_unpoint()\n"); | 225 | printk(KERN_DEBUG "pmc551_unpoint()\n"); |
212 | #endif | 226 | #endif |
227 | return 0; | ||
213 | } | 228 | } |
214 | 229 | ||
215 | static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len, | 230 | static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len, |
@@ -228,16 +243,6 @@ static int pmc551_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
228 | #endif | 243 | #endif |
229 | 244 | ||
230 | end = from + len - 1; | 245 | end = from + len - 1; |
231 | |||
232 | /* Is it past the end? */ | ||
233 | if (end > mtd->size) { | ||
234 | #ifdef CONFIG_MTD_PMC551_DEBUG | ||
235 | printk(KERN_DEBUG "pmc551_read() out of bounds (%ld > %ld)\n", | ||
236 | (long)end, (long)mtd->size); | ||
237 | #endif | ||
238 | return -EINVAL; | ||
239 | } | ||
240 | |||
241 | soff_hi = from & ~(priv->asize - 1); | 246 | soff_hi = from & ~(priv->asize - 1); |
242 | eoff_hi = end & ~(priv->asize - 1); | 247 | eoff_hi = end & ~(priv->asize - 1); |
243 | soff_lo = from & (priv->asize - 1); | 248 | soff_lo = from & (priv->asize - 1); |
@@ -295,16 +300,6 @@ static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
295 | #endif | 300 | #endif |
296 | 301 | ||
297 | end = to + len - 1; | 302 | end = to + len - 1; |
298 | /* Is it past the end? or did the u32 wrap? */ | ||
299 | if (end > mtd->size) { | ||
300 | #ifdef CONFIG_MTD_PMC551_DEBUG | ||
301 | printk(KERN_DEBUG "pmc551_write() out of bounds (end: %ld, " | ||
302 | "size: %ld, to: %ld)\n", (long)end, (long)mtd->size, | ||
303 | (long)to); | ||
304 | #endif | ||
305 | return -EINVAL; | ||
306 | } | ||
307 | |||
308 | soff_hi = to & ~(priv->asize - 1); | 303 | soff_hi = to & ~(priv->asize - 1); |
309 | eoff_hi = end & ~(priv->asize - 1); | 304 | eoff_hi = end & ~(priv->asize - 1); |
310 | soff_lo = to & (priv->asize - 1); | 305 | soff_lo = to & (priv->asize - 1); |
@@ -358,7 +353,7 @@ static int pmc551_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
358 | * mechanism | 353 | * mechanism |
359 | * returns the size of the memory region found. | 354 | * returns the size of the memory region found. |
360 | */ | 355 | */ |
361 | static u32 fixup_pmc551(struct pci_dev *dev) | 356 | static int fixup_pmc551(struct pci_dev *dev) |
362 | { | 357 | { |
363 | #ifdef CONFIG_MTD_PMC551_BUGFIX | 358 | #ifdef CONFIG_MTD_PMC551_BUGFIX |
364 | u32 dram_data; | 359 | u32 dram_data; |
@@ -668,7 +663,7 @@ static int __init init_pmc551(void) | |||
668 | struct mypriv *priv; | 663 | struct mypriv *priv; |
669 | int found = 0; | 664 | int found = 0; |
670 | struct mtd_info *mtd; | 665 | struct mtd_info *mtd; |
671 | u32 length = 0; | 666 | int length = 0; |
672 | 667 | ||
673 | if (msize) { | 668 | if (msize) { |
674 | msize = (1 << (ffs(msize) - 1)) << 20; | 669 | msize = (1 << (ffs(msize) - 1)) << 20; |
@@ -786,11 +781,11 @@ static int __init init_pmc551(void) | |||
786 | 781 | ||
787 | mtd->size = msize; | 782 | mtd->size = msize; |
788 | mtd->flags = MTD_CAP_RAM; | 783 | mtd->flags = MTD_CAP_RAM; |
789 | mtd->erase = pmc551_erase; | 784 | mtd->_erase = pmc551_erase; |
790 | mtd->read = pmc551_read; | 785 | mtd->_read = pmc551_read; |
791 | mtd->write = pmc551_write; | 786 | mtd->_write = pmc551_write; |
792 | mtd->point = pmc551_point; | 787 | mtd->_point = pmc551_point; |
793 | mtd->unpoint = pmc551_unpoint; | 788 | mtd->_unpoint = pmc551_unpoint; |
794 | mtd->type = MTD_RAM; | 789 | mtd->type = MTD_RAM; |
795 | mtd->name = "PMC551 RAM board"; | 790 | mtd->name = "PMC551 RAM board"; |
796 | mtd->erasesize = 0x10000; | 791 | mtd->erasesize = 0x10000; |
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c index 288594163c22..8f52fc858e48 100644 --- a/drivers/mtd/devices/slram.c +++ b/drivers/mtd/devices/slram.c | |||
@@ -75,7 +75,7 @@ static slram_mtd_list_t *slram_mtdlist = NULL; | |||
75 | static int slram_erase(struct mtd_info *, struct erase_info *); | 75 | static int slram_erase(struct mtd_info *, struct erase_info *); |
76 | static int slram_point(struct mtd_info *, loff_t, size_t, size_t *, void **, | 76 | static int slram_point(struct mtd_info *, loff_t, size_t, size_t *, void **, |
77 | resource_size_t *); | 77 | resource_size_t *); |
78 | static void slram_unpoint(struct mtd_info *, loff_t, size_t); | 78 | static int slram_unpoint(struct mtd_info *, loff_t, size_t); |
79 | static int slram_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *); | 79 | static int slram_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *); |
80 | static int slram_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); | 80 | static int slram_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); |
81 | 81 | ||
@@ -83,21 +83,13 @@ static int slram_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
83 | { | 83 | { |
84 | slram_priv_t *priv = mtd->priv; | 84 | slram_priv_t *priv = mtd->priv; |
85 | 85 | ||
86 | if (instr->addr + instr->len > mtd->size) { | ||
87 | return(-EINVAL); | ||
88 | } | ||
89 | |||
90 | memset(priv->start + instr->addr, 0xff, instr->len); | 86 | memset(priv->start + instr->addr, 0xff, instr->len); |
91 | |||
92 | /* This'll catch a few races. Free the thing before returning :) | 87 | /* This'll catch a few races. Free the thing before returning :) |
93 | * I don't feel at all ashamed. This kind of thing is possible anyway | 88 | * I don't feel at all ashamed. This kind of thing is possible anyway |
94 | * with flash, but unlikely. | 89 | * with flash, but unlikely. |
95 | */ | 90 | */ |
96 | |||
97 | instr->state = MTD_ERASE_DONE; | 91 | instr->state = MTD_ERASE_DONE; |
98 | |||
99 | mtd_erase_callback(instr); | 92 | mtd_erase_callback(instr); |
100 | |||
101 | return(0); | 93 | return(0); |
102 | } | 94 | } |
103 | 95 | ||
@@ -106,20 +98,14 @@ static int slram_point(struct mtd_info *mtd, loff_t from, size_t len, | |||
106 | { | 98 | { |
107 | slram_priv_t *priv = mtd->priv; | 99 | slram_priv_t *priv = mtd->priv; |
108 | 100 | ||
109 | /* can we return a physical address with this driver? */ | ||
110 | if (phys) | ||
111 | return -EINVAL; | ||
112 | |||
113 | if (from + len > mtd->size) | ||
114 | return -EINVAL; | ||
115 | |||
116 | *virt = priv->start + from; | 101 | *virt = priv->start + from; |
117 | *retlen = len; | 102 | *retlen = len; |
118 | return(0); | 103 | return(0); |
119 | } | 104 | } |
120 | 105 | ||
121 | static void slram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | 106 | static int slram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) |
122 | { | 107 | { |
108 | return 0; | ||
123 | } | 109 | } |
124 | 110 | ||
125 | static int slram_read(struct mtd_info *mtd, loff_t from, size_t len, | 111 | static int slram_read(struct mtd_info *mtd, loff_t from, size_t len, |
@@ -127,14 +113,7 @@ static int slram_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
127 | { | 113 | { |
128 | slram_priv_t *priv = mtd->priv; | 114 | slram_priv_t *priv = mtd->priv; |
129 | 115 | ||
130 | if (from > mtd->size) | ||
131 | return -EINVAL; | ||
132 | |||
133 | if (from + len > mtd->size) | ||
134 | len = mtd->size - from; | ||
135 | |||
136 | memcpy(buf, priv->start + from, len); | 116 | memcpy(buf, priv->start + from, len); |
137 | |||
138 | *retlen = len; | 117 | *retlen = len; |
139 | return(0); | 118 | return(0); |
140 | } | 119 | } |
@@ -144,11 +123,7 @@ static int slram_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
144 | { | 123 | { |
145 | slram_priv_t *priv = mtd->priv; | 124 | slram_priv_t *priv = mtd->priv; |
146 | 125 | ||
147 | if (to + len > mtd->size) | ||
148 | return -EINVAL; | ||
149 | |||
150 | memcpy(priv->start + to, buf, len); | 126 | memcpy(priv->start + to, buf, len); |
151 | |||
152 | *retlen = len; | 127 | *retlen = len; |
153 | return(0); | 128 | return(0); |
154 | } | 129 | } |
@@ -199,11 +174,11 @@ static int register_device(char *name, unsigned long start, unsigned long length | |||
199 | (*curmtd)->mtdinfo->name = name; | 174 | (*curmtd)->mtdinfo->name = name; |
200 | (*curmtd)->mtdinfo->size = length; | 175 | (*curmtd)->mtdinfo->size = length; |
201 | (*curmtd)->mtdinfo->flags = MTD_CAP_RAM; | 176 | (*curmtd)->mtdinfo->flags = MTD_CAP_RAM; |
202 | (*curmtd)->mtdinfo->erase = slram_erase; | 177 | (*curmtd)->mtdinfo->_erase = slram_erase; |
203 | (*curmtd)->mtdinfo->point = slram_point; | 178 | (*curmtd)->mtdinfo->_point = slram_point; |
204 | (*curmtd)->mtdinfo->unpoint = slram_unpoint; | 179 | (*curmtd)->mtdinfo->_unpoint = slram_unpoint; |
205 | (*curmtd)->mtdinfo->read = slram_read; | 180 | (*curmtd)->mtdinfo->_read = slram_read; |
206 | (*curmtd)->mtdinfo->write = slram_write; | 181 | (*curmtd)->mtdinfo->_write = slram_write; |
207 | (*curmtd)->mtdinfo->owner = THIS_MODULE; | 182 | (*curmtd)->mtdinfo->owner = THIS_MODULE; |
208 | (*curmtd)->mtdinfo->type = MTD_RAM; | 183 | (*curmtd)->mtdinfo->type = MTD_RAM; |
209 | (*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ; | 184 | (*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ; |
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c new file mode 100644 index 000000000000..797d43cd3550 --- /dev/null +++ b/drivers/mtd/devices/spear_smi.c | |||
@@ -0,0 +1,1147 @@ | |||
1 | /* | ||
2 | * SMI (Serial Memory Controller) device driver for Serial NOR Flash on | ||
3 | * SPEAr platform | ||
4 | * The serial nor interface is largely based on drivers/mtd/m25p80.c, | ||
5 | * however the SPI interface has been replaced by SMI. | ||
6 | * | ||
7 | * Copyright © 2010 STMicroelectronics. | ||
8 | * Ashish Priyadarshi | ||
9 | * Shiraz Hashim <shiraz.hashim@st.com> | ||
10 | * | ||
11 | * This file is licensed under the terms of the GNU General Public | ||
12 | * License version 2. This program is licensed "as is" without any | ||
13 | * warranty of any kind, whether express or implied. | ||
14 | */ | ||
15 | |||
16 | #include <linux/clk.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/device.h> | ||
19 | #include <linux/err.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/io.h> | ||
23 | #include <linux/ioport.h> | ||
24 | #include <linux/jiffies.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/param.h> | ||
28 | #include <linux/platform_device.h> | ||
29 | #include <linux/mtd/mtd.h> | ||
30 | #include <linux/mtd/partitions.h> | ||
31 | #include <linux/mtd/spear_smi.h> | ||
32 | #include <linux/mutex.h> | ||
33 | #include <linux/sched.h> | ||
34 | #include <linux/slab.h> | ||
35 | #include <linux/wait.h> | ||
36 | #include <linux/of.h> | ||
37 | #include <linux/of_address.h> | ||
38 | |||
39 | /* SMI clock rate */ | ||
40 | #define SMI_MAX_CLOCK_FREQ 50000000 /* 50 MHz */ | ||
41 | |||
42 | /* MAX time out to safely come out of a erase or write busy conditions */ | ||
43 | #define SMI_PROBE_TIMEOUT (HZ / 10) | ||
44 | #define SMI_MAX_TIME_OUT (3 * HZ) | ||
45 | |||
46 | /* timeout for command completion */ | ||
47 | #define SMI_CMD_TIMEOUT (HZ / 10) | ||
48 | |||
49 | /* registers of smi */ | ||
50 | #define SMI_CR1 0x0 /* SMI control register 1 */ | ||
51 | #define SMI_CR2 0x4 /* SMI control register 2 */ | ||
52 | #define SMI_SR 0x8 /* SMI status register */ | ||
53 | #define SMI_TR 0xC /* SMI transmit register */ | ||
54 | #define SMI_RR 0x10 /* SMI receive register */ | ||
55 | |||
56 | /* defines for control_reg 1 */ | ||
57 | #define BANK_EN (0xF << 0) /* enables all banks */ | ||
58 | #define DSEL_TIME (0x6 << 4) /* Deselect time 6 + 1 SMI_CK periods */ | ||
59 | #define SW_MODE (0x1 << 28) /* enables SW Mode */ | ||
60 | #define WB_MODE (0x1 << 29) /* Write Burst Mode */ | ||
61 | #define FAST_MODE (0x1 << 15) /* Fast Mode */ | ||
62 | #define HOLD1 (0x1 << 16) /* Clock Hold period selection */ | ||
63 | |||
64 | /* defines for control_reg 2 */ | ||
65 | #define SEND (0x1 << 7) /* Send data */ | ||
66 | #define TFIE (0x1 << 8) /* Transmission Flag Interrupt Enable */ | ||
67 | #define WCIE (0x1 << 9) /* Write Complete Interrupt Enable */ | ||
68 | #define RD_STATUS_REG (0x1 << 10) /* reads status reg */ | ||
69 | #define WE (0x1 << 11) /* Write Enable */ | ||
70 | |||
71 | #define TX_LEN_SHIFT 0 | ||
72 | #define RX_LEN_SHIFT 4 | ||
73 | #define BANK_SHIFT 12 | ||
74 | |||
75 | /* defines for status register */ | ||
76 | #define SR_WIP 0x1 /* Write in progress */ | ||
77 | #define SR_WEL 0x2 /* Write enable latch */ | ||
78 | #define SR_BP0 0x4 /* Block protect 0 */ | ||
79 | #define SR_BP1 0x8 /* Block protect 1 */ | ||
80 | #define SR_BP2 0x10 /* Block protect 2 */ | ||
81 | #define SR_SRWD 0x80 /* SR write protect */ | ||
82 | #define TFF 0x100 /* Transfer Finished Flag */ | ||
83 | #define WCF 0x200 /* Transfer Finished Flag */ | ||
84 | #define ERF1 0x400 /* Forbidden Write Request */ | ||
85 | #define ERF2 0x800 /* Forbidden Access */ | ||
86 | |||
87 | #define WM_SHIFT 12 | ||
88 | |||
89 | /* flash opcodes */ | ||
90 | #define OPCODE_RDID 0x9f /* Read JEDEC ID */ | ||
91 | |||
92 | /* Flash Device Ids maintenance section */ | ||
93 | |||
94 | /* data structure to maintain flash ids from different vendors */ | ||
95 | struct flash_device { | ||
96 | char *name; | ||
97 | u8 erase_cmd; | ||
98 | u32 device_id; | ||
99 | u32 pagesize; | ||
100 | unsigned long sectorsize; | ||
101 | unsigned long size_in_bytes; | ||
102 | }; | ||
103 | |||
104 | #define FLASH_ID(n, es, id, psize, ssize, size) \ | ||
105 | { \ | ||
106 | .name = n, \ | ||
107 | .erase_cmd = es, \ | ||
108 | .device_id = id, \ | ||
109 | .pagesize = psize, \ | ||
110 | .sectorsize = ssize, \ | ||
111 | .size_in_bytes = size \ | ||
112 | } | ||
113 | |||
114 | static struct flash_device flash_devices[] = { | ||
115 | FLASH_ID("st m25p16" , 0xd8, 0x00152020, 0x100, 0x10000, 0x200000), | ||
116 | FLASH_ID("st m25p32" , 0xd8, 0x00162020, 0x100, 0x10000, 0x400000), | ||
117 | FLASH_ID("st m25p64" , 0xd8, 0x00172020, 0x100, 0x10000, 0x800000), | ||
118 | FLASH_ID("st m25p128" , 0xd8, 0x00182020, 0x100, 0x40000, 0x1000000), | ||
119 | FLASH_ID("st m25p05" , 0xd8, 0x00102020, 0x80 , 0x8000 , 0x10000), | ||
120 | FLASH_ID("st m25p10" , 0xd8, 0x00112020, 0x80 , 0x8000 , 0x20000), | ||
121 | FLASH_ID("st m25p20" , 0xd8, 0x00122020, 0x100, 0x10000, 0x40000), | ||
122 | FLASH_ID("st m25p40" , 0xd8, 0x00132020, 0x100, 0x10000, 0x80000), | ||
123 | FLASH_ID("st m25p80" , 0xd8, 0x00142020, 0x100, 0x10000, 0x100000), | ||
124 | FLASH_ID("st m45pe10" , 0xd8, 0x00114020, 0x100, 0x10000, 0x20000), | ||
125 | FLASH_ID("st m45pe20" , 0xd8, 0x00124020, 0x100, 0x10000, 0x40000), | ||
126 | FLASH_ID("st m45pe40" , 0xd8, 0x00134020, 0x100, 0x10000, 0x80000), | ||
127 | FLASH_ID("st m45pe80" , 0xd8, 0x00144020, 0x100, 0x10000, 0x100000), | ||
128 | FLASH_ID("sp s25fl004" , 0xd8, 0x00120201, 0x100, 0x10000, 0x80000), | ||
129 | FLASH_ID("sp s25fl008" , 0xd8, 0x00130201, 0x100, 0x10000, 0x100000), | ||
130 | FLASH_ID("sp s25fl016" , 0xd8, 0x00140201, 0x100, 0x10000, 0x200000), | ||
131 | FLASH_ID("sp s25fl032" , 0xd8, 0x00150201, 0x100, 0x10000, 0x400000), | ||
132 | FLASH_ID("sp s25fl064" , 0xd8, 0x00160201, 0x100, 0x10000, 0x800000), | ||
133 | FLASH_ID("atmel 25f512" , 0x52, 0x0065001F, 0x80 , 0x8000 , 0x10000), | ||
134 | FLASH_ID("atmel 25f1024" , 0x52, 0x0060001F, 0x100, 0x8000 , 0x20000), | ||
135 | FLASH_ID("atmel 25f2048" , 0x52, 0x0063001F, 0x100, 0x10000, 0x40000), | ||
136 | FLASH_ID("atmel 25f4096" , 0x52, 0x0064001F, 0x100, 0x10000, 0x80000), | ||
137 | FLASH_ID("atmel 25fs040" , 0xd7, 0x0004661F, 0x100, 0x10000, 0x80000), | ||
138 | FLASH_ID("mac 25l512" , 0xd8, 0x001020C2, 0x010, 0x10000, 0x10000), | ||
139 | FLASH_ID("mac 25l1005" , 0xd8, 0x001120C2, 0x010, 0x10000, 0x20000), | ||
140 | FLASH_ID("mac 25l2005" , 0xd8, 0x001220C2, 0x010, 0x10000, 0x40000), | ||
141 | FLASH_ID("mac 25l4005" , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000), | ||
142 | FLASH_ID("mac 25l4005a" , 0xd8, 0x001320C2, 0x010, 0x10000, 0x80000), | ||
143 | FLASH_ID("mac 25l8005" , 0xd8, 0x001420C2, 0x010, 0x10000, 0x100000), | ||
144 | FLASH_ID("mac 25l1605" , 0xd8, 0x001520C2, 0x100, 0x10000, 0x200000), | ||
145 | FLASH_ID("mac 25l1605a" , 0xd8, 0x001520C2, 0x010, 0x10000, 0x200000), | ||
146 | FLASH_ID("mac 25l3205" , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000), | ||
147 | FLASH_ID("mac 25l3205a" , 0xd8, 0x001620C2, 0x100, 0x10000, 0x400000), | ||
148 | FLASH_ID("mac 25l6405" , 0xd8, 0x001720C2, 0x100, 0x10000, 0x800000), | ||
149 | }; | ||
150 | |||
151 | /* Define spear specific structures */ | ||
152 | |||
153 | struct spear_snor_flash; | ||
154 | |||
155 | /** | ||
156 | * struct spear_smi - Structure for SMI Device | ||
157 | * | ||
158 | * @clk: functional clock | ||
159 | * @status: current status register of SMI. | ||
160 | * @clk_rate: functional clock rate of SMI (default: SMI_MAX_CLOCK_FREQ) | ||
161 | * @lock: lock to prevent parallel access of SMI. | ||
162 | * @io_base: base address for registers of SMI. | ||
163 | * @pdev: platform device | ||
164 | * @cmd_complete: queue to wait for command completion of NOR-flash. | ||
165 | * @num_flashes: number of flashes actually present on board. | ||
166 | * @flash: separate structure for each Serial NOR-flash attached to SMI. | ||
167 | */ | ||
168 | struct spear_smi { | ||
169 | struct clk *clk; | ||
170 | u32 status; | ||
171 | unsigned long clk_rate; | ||
172 | struct mutex lock; | ||
173 | void __iomem *io_base; | ||
174 | struct platform_device *pdev; | ||
175 | wait_queue_head_t cmd_complete; | ||
176 | u32 num_flashes; | ||
177 | struct spear_snor_flash *flash[MAX_NUM_FLASH_CHIP]; | ||
178 | }; | ||
179 | |||
180 | /** | ||
181 | * struct spear_snor_flash - Structure for Serial NOR Flash | ||
182 | * | ||
183 | * @bank: Bank number(0, 1, 2, 3) for each NOR-flash. | ||
184 | * @dev_id: Device ID of NOR-flash. | ||
185 | * @lock: lock to manage flash read, write and erase operations | ||
186 | * @mtd: MTD info for each NOR-flash. | ||
187 | * @num_parts: Total number of partition in each bank of NOR-flash. | ||
188 | * @parts: Partition info for each bank of NOR-flash. | ||
189 | * @page_size: Page size of NOR-flash. | ||
190 | * @base_addr: Base address of NOR-flash. | ||
191 | * @erase_cmd: erase command may vary on different flash types | ||
192 | * @fast_mode: flash supports read in fast mode | ||
193 | */ | ||
194 | struct spear_snor_flash { | ||
195 | u32 bank; | ||
196 | u32 dev_id; | ||
197 | struct mutex lock; | ||
198 | struct mtd_info mtd; | ||
199 | u32 num_parts; | ||
200 | struct mtd_partition *parts; | ||
201 | u32 page_size; | ||
202 | void __iomem *base_addr; | ||
203 | u8 erase_cmd; | ||
204 | u8 fast_mode; | ||
205 | }; | ||
206 | |||
207 | static inline struct spear_snor_flash *get_flash_data(struct mtd_info *mtd) | ||
208 | { | ||
209 | return container_of(mtd, struct spear_snor_flash, mtd); | ||
210 | } | ||
211 | |||
212 | /** | ||
213 | * spear_smi_read_sr - Read status register of flash through SMI | ||
214 | * @dev: structure of SMI information. | ||
215 | * @bank: bank to which flash is connected | ||
216 | * | ||
217 | * This routine will return the status register of the flash chip present at the | ||
218 | * given bank. | ||
219 | */ | ||
220 | static int spear_smi_read_sr(struct spear_smi *dev, u32 bank) | ||
221 | { | ||
222 | int ret; | ||
223 | u32 ctrlreg1; | ||
224 | |||
225 | mutex_lock(&dev->lock); | ||
226 | dev->status = 0; /* Will be set in interrupt handler */ | ||
227 | |||
228 | ctrlreg1 = readl(dev->io_base + SMI_CR1); | ||
229 | /* program smi in hw mode */ | ||
230 | writel(ctrlreg1 & ~(SW_MODE | WB_MODE), dev->io_base + SMI_CR1); | ||
231 | |||
232 | /* performing a rsr instruction in hw mode */ | ||
233 | writel((bank << BANK_SHIFT) | RD_STATUS_REG | TFIE, | ||
234 | dev->io_base + SMI_CR2); | ||
235 | |||
236 | /* wait for tff */ | ||
237 | ret = wait_event_interruptible_timeout(dev->cmd_complete, | ||
238 | dev->status & TFF, SMI_CMD_TIMEOUT); | ||
239 | |||
240 | /* copy dev->status (lower 16 bits) in order to release lock */ | ||
241 | if (ret > 0) | ||
242 | ret = dev->status & 0xffff; | ||
243 | else | ||
244 | ret = -EIO; | ||
245 | |||
246 | /* restore the ctrl regs state */ | ||
247 | writel(ctrlreg1, dev->io_base + SMI_CR1); | ||
248 | writel(0, dev->io_base + SMI_CR2); | ||
249 | mutex_unlock(&dev->lock); | ||
250 | |||
251 | return ret; | ||
252 | } | ||
253 | |||
254 | /** | ||
255 | * spear_smi_wait_till_ready - wait till flash is ready | ||
256 | * @dev: structure of SMI information. | ||
257 | * @bank: flash corresponding to this bank | ||
258 | * @timeout: timeout for busy wait condition | ||
259 | * | ||
260 | * This routine checks for WIP (write in progress) bit in Status register | ||
261 | * If successful the routine returns 0 else -EBUSY | ||
262 | */ | ||
263 | static int spear_smi_wait_till_ready(struct spear_smi *dev, u32 bank, | ||
264 | unsigned long timeout) | ||
265 | { | ||
266 | unsigned long finish; | ||
267 | int status; | ||
268 | |||
269 | finish = jiffies + timeout; | ||
270 | do { | ||
271 | status = spear_smi_read_sr(dev, bank); | ||
272 | if (status < 0) | ||
273 | continue; /* try till timeout */ | ||
274 | else if (!(status & SR_WIP)) | ||
275 | return 0; | ||
276 | |||
277 | cond_resched(); | ||
278 | } while (!time_after_eq(jiffies, finish)); | ||
279 | |||
280 | dev_err(&dev->pdev->dev, "smi controller is busy, timeout\n"); | ||
281 | return status; | ||
282 | } | ||
283 | |||
284 | /** | ||
285 | * spear_smi_int_handler - SMI Interrupt Handler. | ||
286 | * @irq: irq number | ||
287 | * @dev_id: structure of SMI device, embedded in dev_id. | ||
288 | * | ||
289 | * The handler clears all interrupt conditions and records the status in | ||
290 | * dev->status which is used by the driver later. | ||
291 | */ | ||
292 | static irqreturn_t spear_smi_int_handler(int irq, void *dev_id) | ||
293 | { | ||
294 | u32 status = 0; | ||
295 | struct spear_smi *dev = dev_id; | ||
296 | |||
297 | status = readl(dev->io_base + SMI_SR); | ||
298 | |||
299 | if (unlikely(!status)) | ||
300 | return IRQ_NONE; | ||
301 | |||
302 | /* clear all interrupt conditions */ | ||
303 | writel(0, dev->io_base + SMI_SR); | ||
304 | |||
305 | /* copy the status register in dev->status */ | ||
306 | dev->status |= status; | ||
307 | |||
308 | /* send the completion */ | ||
309 | wake_up_interruptible(&dev->cmd_complete); | ||
310 | |||
311 | return IRQ_HANDLED; | ||
312 | } | ||
313 | |||
314 | /** | ||
315 | * spear_smi_hw_init - initializes the smi controller. | ||
316 | * @dev: structure of smi device | ||
317 | * | ||
318 | * this routine initializes the smi controller wit the default values | ||
319 | */ | ||
320 | static void spear_smi_hw_init(struct spear_smi *dev) | ||
321 | { | ||
322 | unsigned long rate = 0; | ||
323 | u32 prescale = 0; | ||
324 | u32 val; | ||
325 | |||
326 | rate = clk_get_rate(dev->clk); | ||
327 | |||
328 | /* functional clock of smi */ | ||
329 | prescale = DIV_ROUND_UP(rate, dev->clk_rate); | ||
330 | |||
331 | /* | ||
332 | * setting the standard values, fast mode, prescaler for | ||
333 | * SMI_MAX_CLOCK_FREQ (50MHz) operation and bank enable | ||
334 | */ | ||
335 | val = HOLD1 | BANK_EN | DSEL_TIME | (prescale << 8); | ||
336 | |||
337 | mutex_lock(&dev->lock); | ||
338 | writel(val, dev->io_base + SMI_CR1); | ||
339 | mutex_unlock(&dev->lock); | ||
340 | } | ||
341 | |||
342 | /** | ||
343 | * get_flash_index - match chip id from a flash list. | ||
344 | * @flash_id: a valid nor flash chip id obtained from board. | ||
345 | * | ||
346 | * try to validate the chip id by matching from a list, if not found then simply | ||
347 | * returns negative. In case of success returns index in to the flash devices | ||
348 | * array. | ||
349 | */ | ||
350 | static int get_flash_index(u32 flash_id) | ||
351 | { | ||
352 | int index; | ||
353 | |||
354 | /* Matches chip-id to entire list of 'serial-nor flash' ids */ | ||
355 | for (index = 0; index < ARRAY_SIZE(flash_devices); index++) { | ||
356 | if (flash_devices[index].device_id == flash_id) | ||
357 | return index; | ||
358 | } | ||
359 | |||
360 | /* Memory chip is not listed and not supported */ | ||
361 | return -ENODEV; | ||
362 | } | ||
363 | |||
364 | /** | ||
365 | * spear_smi_write_enable - Enable the flash to do write operation | ||
366 | * @dev: structure of SMI device | ||
367 | * @bank: enable write for flash connected to this bank | ||
368 | * | ||
369 | * Set write enable latch with Write Enable command. | ||
370 | * Returns 0 on success. | ||
371 | */ | ||
372 | static int spear_smi_write_enable(struct spear_smi *dev, u32 bank) | ||
373 | { | ||
374 | int ret; | ||
375 | u32 ctrlreg1; | ||
376 | |||
377 | mutex_lock(&dev->lock); | ||
378 | dev->status = 0; /* Will be set in interrupt handler */ | ||
379 | |||
380 | ctrlreg1 = readl(dev->io_base + SMI_CR1); | ||
381 | /* program smi in h/w mode */ | ||
382 | writel(ctrlreg1 & ~SW_MODE, dev->io_base + SMI_CR1); | ||
383 | |||
384 | /* give the flash, write enable command */ | ||
385 | writel((bank << BANK_SHIFT) | WE | TFIE, dev->io_base + SMI_CR2); | ||
386 | |||
387 | ret = wait_event_interruptible_timeout(dev->cmd_complete, | ||
388 | dev->status & TFF, SMI_CMD_TIMEOUT); | ||
389 | |||
390 | /* restore the ctrl regs state */ | ||
391 | writel(ctrlreg1, dev->io_base + SMI_CR1); | ||
392 | writel(0, dev->io_base + SMI_CR2); | ||
393 | |||
394 | if (ret <= 0) { | ||
395 | ret = -EIO; | ||
396 | dev_err(&dev->pdev->dev, | ||
397 | "smi controller failed on write enable\n"); | ||
398 | } else { | ||
399 | /* check whether write mode status is set for required bank */ | ||
400 | if (dev->status & (1 << (bank + WM_SHIFT))) | ||
401 | ret = 0; | ||
402 | else { | ||
403 | dev_err(&dev->pdev->dev, "couldn't enable write\n"); | ||
404 | ret = -EIO; | ||
405 | } | ||
406 | } | ||
407 | |||
408 | mutex_unlock(&dev->lock); | ||
409 | return ret; | ||
410 | } | ||
411 | |||
412 | static inline u32 | ||
413 | get_sector_erase_cmd(struct spear_snor_flash *flash, u32 offset) | ||
414 | { | ||
415 | u32 cmd; | ||
416 | u8 *x = (u8 *)&cmd; | ||
417 | |||
418 | x[0] = flash->erase_cmd; | ||
419 | x[1] = offset >> 16; | ||
420 | x[2] = offset >> 8; | ||
421 | x[3] = offset; | ||
422 | |||
423 | return cmd; | ||
424 | } | ||
425 | |||
426 | /** | ||
427 | * spear_smi_erase_sector - erase one sector of flash | ||
428 | * @dev: structure of SMI information | ||
429 | * @command: erase command to be send | ||
430 | * @bank: bank to which this command needs to be send | ||
431 | * @bytes: size of command | ||
432 | * | ||
433 | * Erase one sector of flash memory at offset ``offset'' which is any | ||
434 | * address within the sector which should be erased. | ||
435 | * Returns 0 if successful, non-zero otherwise. | ||
436 | */ | ||
437 | static int spear_smi_erase_sector(struct spear_smi *dev, | ||
438 | u32 bank, u32 command, u32 bytes) | ||
439 | { | ||
440 | u32 ctrlreg1 = 0; | ||
441 | int ret; | ||
442 | |||
443 | ret = spear_smi_wait_till_ready(dev, bank, SMI_MAX_TIME_OUT); | ||
444 | if (ret) | ||
445 | return ret; | ||
446 | |||
447 | ret = spear_smi_write_enable(dev, bank); | ||
448 | if (ret) | ||
449 | return ret; | ||
450 | |||
451 | mutex_lock(&dev->lock); | ||
452 | |||
453 | ctrlreg1 = readl(dev->io_base + SMI_CR1); | ||
454 | writel((ctrlreg1 | SW_MODE) & ~WB_MODE, dev->io_base + SMI_CR1); | ||
455 | |||
456 | /* send command in sw mode */ | ||
457 | writel(command, dev->io_base + SMI_TR); | ||
458 | |||
459 | writel((bank << BANK_SHIFT) | SEND | TFIE | (bytes << TX_LEN_SHIFT), | ||
460 | dev->io_base + SMI_CR2); | ||
461 | |||
462 | ret = wait_event_interruptible_timeout(dev->cmd_complete, | ||
463 | dev->status & TFF, SMI_CMD_TIMEOUT); | ||
464 | |||
465 | if (ret <= 0) { | ||
466 | ret = -EIO; | ||
467 | dev_err(&dev->pdev->dev, "sector erase failed\n"); | ||
468 | } else | ||
469 | ret = 0; /* success */ | ||
470 | |||
471 | /* restore ctrl regs */ | ||
472 | writel(ctrlreg1, dev->io_base + SMI_CR1); | ||
473 | writel(0, dev->io_base + SMI_CR2); | ||
474 | |||
475 | mutex_unlock(&dev->lock); | ||
476 | return ret; | ||
477 | } | ||
478 | |||
479 | /** | ||
480 | * spear_mtd_erase - perform flash erase operation as requested by user | ||
481 | * @mtd: Provides the memory characteristics | ||
482 | * @e_info: Provides the erase information | ||
483 | * | ||
484 | * Erase an address range on the flash chip. The address range may extend | ||
485 | * one or more erase sectors. Return an error is there is a problem erasing. | ||
486 | */ | ||
487 | static int spear_mtd_erase(struct mtd_info *mtd, struct erase_info *e_info) | ||
488 | { | ||
489 | struct spear_snor_flash *flash = get_flash_data(mtd); | ||
490 | struct spear_smi *dev = mtd->priv; | ||
491 | u32 addr, command, bank; | ||
492 | int len, ret; | ||
493 | |||
494 | if (!flash || !dev) | ||
495 | return -ENODEV; | ||
496 | |||
497 | bank = flash->bank; | ||
498 | if (bank > dev->num_flashes - 1) { | ||
499 | dev_err(&dev->pdev->dev, "Invalid Bank Num"); | ||
500 | return -EINVAL; | ||
501 | } | ||
502 | |||
503 | addr = e_info->addr; | ||
504 | len = e_info->len; | ||
505 | |||
506 | mutex_lock(&flash->lock); | ||
507 | |||
508 | /* now erase sectors in loop */ | ||
509 | while (len) { | ||
510 | command = get_sector_erase_cmd(flash, addr); | ||
511 | /* preparing the command for flash */ | ||
512 | ret = spear_smi_erase_sector(dev, bank, command, 4); | ||
513 | if (ret) { | ||
514 | e_info->state = MTD_ERASE_FAILED; | ||
515 | mutex_unlock(&flash->lock); | ||
516 | return ret; | ||
517 | } | ||
518 | addr += mtd->erasesize; | ||
519 | len -= mtd->erasesize; | ||
520 | } | ||
521 | |||
522 | mutex_unlock(&flash->lock); | ||
523 | e_info->state = MTD_ERASE_DONE; | ||
524 | mtd_erase_callback(e_info); | ||
525 | |||
526 | return 0; | ||
527 | } | ||
528 | |||
529 | /** | ||
530 | * spear_mtd_read - performs flash read operation as requested by the user | ||
531 | * @mtd: MTD information of the memory bank | ||
532 | * @from: Address from which to start read | ||
533 | * @len: Number of bytes to be read | ||
534 | * @retlen: Fills the Number of bytes actually read | ||
535 | * @buf: Fills this after reading | ||
536 | * | ||
537 | * Read an address range from the flash chip. The address range | ||
538 | * may be any size provided it is within the physical boundaries. | ||
539 | * Returns 0 on success, non zero otherwise | ||
540 | */ | ||
541 | static int spear_mtd_read(struct mtd_info *mtd, loff_t from, size_t len, | ||
542 | size_t *retlen, u8 *buf) | ||
543 | { | ||
544 | struct spear_snor_flash *flash = get_flash_data(mtd); | ||
545 | struct spear_smi *dev = mtd->priv; | ||
546 | void *src; | ||
547 | u32 ctrlreg1, val; | ||
548 | int ret; | ||
549 | |||
550 | if (!flash || !dev) | ||
551 | return -ENODEV; | ||
552 | |||
553 | if (flash->bank > dev->num_flashes - 1) { | ||
554 | dev_err(&dev->pdev->dev, "Invalid Bank Num"); | ||
555 | return -EINVAL; | ||
556 | } | ||
557 | |||
558 | /* select address as per bank number */ | ||
559 | src = flash->base_addr + from; | ||
560 | |||
561 | mutex_lock(&flash->lock); | ||
562 | |||
563 | /* wait till previous write/erase is done. */ | ||
564 | ret = spear_smi_wait_till_ready(dev, flash->bank, SMI_MAX_TIME_OUT); | ||
565 | if (ret) { | ||
566 | mutex_unlock(&flash->lock); | ||
567 | return ret; | ||
568 | } | ||
569 | |||
570 | mutex_lock(&dev->lock); | ||
571 | /* put smi in hw mode not wbt mode */ | ||
572 | ctrlreg1 = val = readl(dev->io_base + SMI_CR1); | ||
573 | val &= ~(SW_MODE | WB_MODE); | ||
574 | if (flash->fast_mode) | ||
575 | val |= FAST_MODE; | ||
576 | |||
577 | writel(val, dev->io_base + SMI_CR1); | ||
578 | |||
579 | memcpy_fromio(buf, (u8 *)src, len); | ||
580 | |||
581 | /* restore ctrl reg1 */ | ||
582 | writel(ctrlreg1, dev->io_base + SMI_CR1); | ||
583 | mutex_unlock(&dev->lock); | ||
584 | |||
585 | *retlen = len; | ||
586 | mutex_unlock(&flash->lock); | ||
587 | |||
588 | return 0; | ||
589 | } | ||
590 | |||
591 | static inline int spear_smi_cpy_toio(struct spear_smi *dev, u32 bank, | ||
592 | void *dest, const void *src, size_t len) | ||
593 | { | ||
594 | int ret; | ||
595 | u32 ctrlreg1; | ||
596 | |||
597 | /* wait until finished previous write command. */ | ||
598 | ret = spear_smi_wait_till_ready(dev, bank, SMI_MAX_TIME_OUT); | ||
599 | if (ret) | ||
600 | return ret; | ||
601 | |||
602 | /* put smi in write enable */ | ||
603 | ret = spear_smi_write_enable(dev, bank); | ||
604 | if (ret) | ||
605 | return ret; | ||
606 | |||
607 | /* put smi in hw, write burst mode */ | ||
608 | mutex_lock(&dev->lock); | ||
609 | |||
610 | ctrlreg1 = readl(dev->io_base + SMI_CR1); | ||
611 | writel((ctrlreg1 | WB_MODE) & ~SW_MODE, dev->io_base + SMI_CR1); | ||
612 | |||
613 | memcpy_toio(dest, src, len); | ||
614 | |||
615 | writel(ctrlreg1, dev->io_base + SMI_CR1); | ||
616 | |||
617 | mutex_unlock(&dev->lock); | ||
618 | return 0; | ||
619 | } | ||
620 | |||
621 | /** | ||
622 | * spear_mtd_write - performs write operation as requested by the user. | ||
623 | * @mtd: MTD information of the memory bank. | ||
624 | * @to: Address to write. | ||
625 | * @len: Number of bytes to be written. | ||
626 | * @retlen: Number of bytes actually wrote. | ||
627 | * @buf: Buffer from which the data to be taken. | ||
628 | * | ||
629 | * Write an address range to the flash chip. Data must be written in | ||
630 | * flash_page_size chunks. The address range may be any size provided | ||
631 | * it is within the physical boundaries. | ||
632 | * Returns 0 on success, non zero otherwise | ||
633 | */ | ||
634 | static int spear_mtd_write(struct mtd_info *mtd, loff_t to, size_t len, | ||
635 | size_t *retlen, const u8 *buf) | ||
636 | { | ||
637 | struct spear_snor_flash *flash = get_flash_data(mtd); | ||
638 | struct spear_smi *dev = mtd->priv; | ||
639 | void *dest; | ||
640 | u32 page_offset, page_size; | ||
641 | int ret; | ||
642 | |||
643 | if (!flash || !dev) | ||
644 | return -ENODEV; | ||
645 | |||
646 | if (flash->bank > dev->num_flashes - 1) { | ||
647 | dev_err(&dev->pdev->dev, "Invalid Bank Num"); | ||
648 | return -EINVAL; | ||
649 | } | ||
650 | |||
651 | /* select address as per bank number */ | ||
652 | dest = flash->base_addr + to; | ||
653 | mutex_lock(&flash->lock); | ||
654 | |||
655 | page_offset = (u32)to % flash->page_size; | ||
656 | |||
657 | /* do if all the bytes fit onto one page */ | ||
658 | if (page_offset + len <= flash->page_size) { | ||
659 | ret = spear_smi_cpy_toio(dev, flash->bank, dest, buf, len); | ||
660 | if (!ret) | ||
661 | *retlen += len; | ||
662 | } else { | ||
663 | u32 i; | ||
664 | |||
665 | /* the size of data remaining on the first page */ | ||
666 | page_size = flash->page_size - page_offset; | ||
667 | |||
668 | ret = spear_smi_cpy_toio(dev, flash->bank, dest, buf, | ||
669 | page_size); | ||
670 | if (ret) | ||
671 | goto err_write; | ||
672 | else | ||
673 | *retlen += page_size; | ||
674 | |||
675 | /* write everything in pagesize chunks */ | ||
676 | for (i = page_size; i < len; i += page_size) { | ||
677 | page_size = len - i; | ||
678 | if (page_size > flash->page_size) | ||
679 | page_size = flash->page_size; | ||
680 | |||
681 | ret = spear_smi_cpy_toio(dev, flash->bank, dest + i, | ||
682 | buf + i, page_size); | ||
683 | if (ret) | ||
684 | break; | ||
685 | else | ||
686 | *retlen += page_size; | ||
687 | } | ||
688 | } | ||
689 | |||
690 | err_write: | ||
691 | mutex_unlock(&flash->lock); | ||
692 | |||
693 | return ret; | ||
694 | } | ||
695 | |||
696 | /** | ||
697 | * spear_smi_probe_flash - Detects the NOR Flash chip. | ||
698 | * @dev: structure of SMI information. | ||
699 | * @bank: bank on which flash must be probed | ||
700 | * | ||
701 | * This routine will check whether there exists a flash chip on a given memory | ||
702 | * bank ID. | ||
703 | * Return index of the probed flash in flash devices structure | ||
704 | */ | ||
705 | static int spear_smi_probe_flash(struct spear_smi *dev, u32 bank) | ||
706 | { | ||
707 | int ret; | ||
708 | u32 val = 0; | ||
709 | |||
710 | ret = spear_smi_wait_till_ready(dev, bank, SMI_PROBE_TIMEOUT); | ||
711 | if (ret) | ||
712 | return ret; | ||
713 | |||
714 | mutex_lock(&dev->lock); | ||
715 | |||
716 | dev->status = 0; /* Will be set in interrupt handler */ | ||
717 | /* put smi in sw mode */ | ||
718 | val = readl(dev->io_base + SMI_CR1); | ||
719 | writel(val | SW_MODE, dev->io_base + SMI_CR1); | ||
720 | |||
721 | /* send readid command in sw mode */ | ||
722 | writel(OPCODE_RDID, dev->io_base + SMI_TR); | ||
723 | |||
724 | val = (bank << BANK_SHIFT) | SEND | (1 << TX_LEN_SHIFT) | | ||
725 | (3 << RX_LEN_SHIFT) | TFIE; | ||
726 | writel(val, dev->io_base + SMI_CR2); | ||
727 | |||
728 | /* wait for TFF */ | ||
729 | ret = wait_event_interruptible_timeout(dev->cmd_complete, | ||
730 | dev->status & TFF, SMI_CMD_TIMEOUT); | ||
731 | if (ret <= 0) { | ||
732 | ret = -ENODEV; | ||
733 | goto err_probe; | ||
734 | } | ||
735 | |||
736 | /* get memory chip id */ | ||
737 | val = readl(dev->io_base + SMI_RR); | ||
738 | val &= 0x00ffffff; | ||
739 | ret = get_flash_index(val); | ||
740 | |||
741 | err_probe: | ||
742 | /* clear sw mode */ | ||
743 | val = readl(dev->io_base + SMI_CR1); | ||
744 | writel(val & ~SW_MODE, dev->io_base + SMI_CR1); | ||
745 | |||
746 | mutex_unlock(&dev->lock); | ||
747 | return ret; | ||
748 | } | ||
749 | |||
750 | |||
751 | #ifdef CONFIG_OF | ||
752 | static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev, | ||
753 | struct device_node *np) | ||
754 | { | ||
755 | struct spear_smi_plat_data *pdata = dev_get_platdata(&pdev->dev); | ||
756 | struct device_node *pp = NULL; | ||
757 | const __be32 *addr; | ||
758 | u32 val; | ||
759 | int len; | ||
760 | int i = 0; | ||
761 | |||
762 | if (!np) | ||
763 | return -ENODEV; | ||
764 | |||
765 | of_property_read_u32(np, "clock-rate", &val); | ||
766 | pdata->clk_rate = val; | ||
767 | |||
768 | pdata->board_flash_info = devm_kzalloc(&pdev->dev, | ||
769 | sizeof(*pdata->board_flash_info), | ||
770 | GFP_KERNEL); | ||
771 | |||
772 | /* Fill structs for each subnode (flash device) */ | ||
773 | while ((pp = of_get_next_child(np, pp))) { | ||
774 | struct spear_smi_flash_info *flash_info; | ||
775 | |||
776 | flash_info = &pdata->board_flash_info[i]; | ||
777 | pdata->np[i] = pp; | ||
778 | |||
779 | /* Read base-addr and size from DT */ | ||
780 | addr = of_get_property(pp, "reg", &len); | ||
781 | pdata->board_flash_info->mem_base = be32_to_cpup(&addr[0]); | ||
782 | pdata->board_flash_info->size = be32_to_cpup(&addr[1]); | ||
783 | |||
784 | if (of_get_property(pp, "st,smi-fast-mode", NULL)) | ||
785 | pdata->board_flash_info->fast_mode = 1; | ||
786 | |||
787 | i++; | ||
788 | } | ||
789 | |||
790 | pdata->num_flashes = i; | ||
791 | |||
792 | return 0; | ||
793 | } | ||
794 | #else | ||
795 | static int __devinit spear_smi_probe_config_dt(struct platform_device *pdev, | ||
796 | struct device_node *np) | ||
797 | { | ||
798 | return -ENOSYS; | ||
799 | } | ||
800 | #endif | ||
801 | |||
802 | static int spear_smi_setup_banks(struct platform_device *pdev, | ||
803 | u32 bank, struct device_node *np) | ||
804 | { | ||
805 | struct spear_smi *dev = platform_get_drvdata(pdev); | ||
806 | struct mtd_part_parser_data ppdata = {}; | ||
807 | struct spear_smi_flash_info *flash_info; | ||
808 | struct spear_smi_plat_data *pdata; | ||
809 | struct spear_snor_flash *flash; | ||
810 | struct mtd_partition *parts = NULL; | ||
811 | int count = 0; | ||
812 | int flash_index; | ||
813 | int ret = 0; | ||
814 | |||
815 | pdata = dev_get_platdata(&pdev->dev); | ||
816 | if (bank > pdata->num_flashes - 1) | ||
817 | return -EINVAL; | ||
818 | |||
819 | flash_info = &pdata->board_flash_info[bank]; | ||
820 | if (!flash_info) | ||
821 | return -ENODEV; | ||
822 | |||
823 | flash = kzalloc(sizeof(*flash), GFP_ATOMIC); | ||
824 | if (!flash) | ||
825 | return -ENOMEM; | ||
826 | flash->bank = bank; | ||
827 | flash->fast_mode = flash_info->fast_mode ? 1 : 0; | ||
828 | mutex_init(&flash->lock); | ||
829 | |||
830 | /* verify whether nor flash is really present on board */ | ||
831 | flash_index = spear_smi_probe_flash(dev, bank); | ||
832 | if (flash_index < 0) { | ||
833 | dev_info(&dev->pdev->dev, "smi-nor%d not found\n", bank); | ||
834 | ret = flash_index; | ||
835 | goto err_probe; | ||
836 | } | ||
837 | /* map the memory for nor flash chip */ | ||
838 | flash->base_addr = ioremap(flash_info->mem_base, flash_info->size); | ||
839 | if (!flash->base_addr) { | ||
840 | ret = -EIO; | ||
841 | goto err_probe; | ||
842 | } | ||
843 | |||
844 | dev->flash[bank] = flash; | ||
845 | flash->mtd.priv = dev; | ||
846 | |||
847 | if (flash_info->name) | ||
848 | flash->mtd.name = flash_info->name; | ||
849 | else | ||
850 | flash->mtd.name = flash_devices[flash_index].name; | ||
851 | |||
852 | flash->mtd.type = MTD_NORFLASH; | ||
853 | flash->mtd.writesize = 1; | ||
854 | flash->mtd.flags = MTD_CAP_NORFLASH; | ||
855 | flash->mtd.size = flash_info->size; | ||
856 | flash->mtd.erasesize = flash_devices[flash_index].sectorsize; | ||
857 | flash->page_size = flash_devices[flash_index].pagesize; | ||
858 | flash->mtd.writebufsize = flash->page_size; | ||
859 | flash->erase_cmd = flash_devices[flash_index].erase_cmd; | ||
860 | flash->mtd._erase = spear_mtd_erase; | ||
861 | flash->mtd._read = spear_mtd_read; | ||
862 | flash->mtd._write = spear_mtd_write; | ||
863 | flash->dev_id = flash_devices[flash_index].device_id; | ||
864 | |||
865 | dev_info(&dev->pdev->dev, "mtd .name=%s .size=%llx(%lluM)\n", | ||
866 | flash->mtd.name, flash->mtd.size, | ||
867 | flash->mtd.size / (1024 * 1024)); | ||
868 | |||
869 | dev_info(&dev->pdev->dev, ".erasesize = 0x%x(%uK)\n", | ||
870 | flash->mtd.erasesize, flash->mtd.erasesize / 1024); | ||
871 | |||
872 | #ifndef CONFIG_OF | ||
873 | if (flash_info->partitions) { | ||
874 | parts = flash_info->partitions; | ||
875 | count = flash_info->nr_partitions; | ||
876 | } | ||
877 | #endif | ||
878 | ppdata.of_node = np; | ||
879 | |||
880 | ret = mtd_device_parse_register(&flash->mtd, NULL, &ppdata, parts, | ||
881 | count); | ||
882 | if (ret) { | ||
883 | dev_err(&dev->pdev->dev, "Err MTD partition=%d\n", ret); | ||
884 | goto err_map; | ||
885 | } | ||
886 | |||
887 | return 0; | ||
888 | |||
889 | err_map: | ||
890 | iounmap(flash->base_addr); | ||
891 | |||
892 | err_probe: | ||
893 | kfree(flash); | ||
894 | return ret; | ||
895 | } | ||
896 | |||
897 | /** | ||
898 | * spear_smi_probe - Entry routine | ||
899 | * @pdev: platform device structure | ||
900 | * | ||
901 | * This is the first routine which gets invoked during booting and does all | ||
902 | * initialization/allocation work. The routine looks for available memory banks, | ||
903 | * and do proper init for any found one. | ||
904 | * Returns 0 on success, non zero otherwise | ||
905 | */ | ||
906 | static int __devinit spear_smi_probe(struct platform_device *pdev) | ||
907 | { | ||
908 | struct device_node *np = pdev->dev.of_node; | ||
909 | struct spear_smi_plat_data *pdata = NULL; | ||
910 | struct spear_smi *dev; | ||
911 | struct resource *smi_base; | ||
912 | int irq, ret = 0; | ||
913 | int i; | ||
914 | |||
915 | if (np) { | ||
916 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | ||
917 | if (!pdata) { | ||
918 | pr_err("%s: ERROR: no memory", __func__); | ||
919 | ret = -ENOMEM; | ||
920 | goto err; | ||
921 | } | ||
922 | pdev->dev.platform_data = pdata; | ||
923 | ret = spear_smi_probe_config_dt(pdev, np); | ||
924 | if (ret) { | ||
925 | ret = -ENODEV; | ||
926 | dev_err(&pdev->dev, "no platform data\n"); | ||
927 | goto err; | ||
928 | } | ||
929 | } else { | ||
930 | pdata = dev_get_platdata(&pdev->dev); | ||
931 | if (pdata < 0) { | ||
932 | ret = -ENODEV; | ||
933 | dev_err(&pdev->dev, "no platform data\n"); | ||
934 | goto err; | ||
935 | } | ||
936 | } | ||
937 | |||
938 | smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
939 | if (!smi_base) { | ||
940 | ret = -ENODEV; | ||
941 | dev_err(&pdev->dev, "invalid smi base address\n"); | ||
942 | goto err; | ||
943 | } | ||
944 | |||
945 | irq = platform_get_irq(pdev, 0); | ||
946 | if (irq < 0) { | ||
947 | ret = -ENODEV; | ||
948 | dev_err(&pdev->dev, "invalid smi irq\n"); | ||
949 | goto err; | ||
950 | } | ||
951 | |||
952 | dev = kzalloc(sizeof(*dev), GFP_ATOMIC); | ||
953 | if (!dev) { | ||
954 | ret = -ENOMEM; | ||
955 | dev_err(&pdev->dev, "mem alloc fail\n"); | ||
956 | goto err; | ||
957 | } | ||
958 | |||
959 | smi_base = request_mem_region(smi_base->start, resource_size(smi_base), | ||
960 | pdev->name); | ||
961 | if (!smi_base) { | ||
962 | ret = -EBUSY; | ||
963 | dev_err(&pdev->dev, "request mem region fail\n"); | ||
964 | goto err_mem; | ||
965 | } | ||
966 | |||
967 | dev->io_base = ioremap(smi_base->start, resource_size(smi_base)); | ||
968 | if (!dev->io_base) { | ||
969 | ret = -EIO; | ||
970 | dev_err(&pdev->dev, "ioremap fail\n"); | ||
971 | goto err_ioremap; | ||
972 | } | ||
973 | |||
974 | dev->pdev = pdev; | ||
975 | dev->clk_rate = pdata->clk_rate; | ||
976 | |||
977 | if (dev->clk_rate < 0 || dev->clk_rate > SMI_MAX_CLOCK_FREQ) | ||
978 | dev->clk_rate = SMI_MAX_CLOCK_FREQ; | ||
979 | |||
980 | dev->num_flashes = pdata->num_flashes; | ||
981 | |||
982 | if (dev->num_flashes > MAX_NUM_FLASH_CHIP) { | ||
983 | dev_err(&pdev->dev, "exceeding max number of flashes\n"); | ||
984 | dev->num_flashes = MAX_NUM_FLASH_CHIP; | ||
985 | } | ||
986 | |||
987 | dev->clk = clk_get(&pdev->dev, NULL); | ||
988 | if (IS_ERR(dev->clk)) { | ||
989 | ret = PTR_ERR(dev->clk); | ||
990 | goto err_clk; | ||
991 | } | ||
992 | |||
993 | ret = clk_enable(dev->clk); | ||
994 | if (ret) | ||
995 | goto err_clk_enable; | ||
996 | |||
997 | ret = request_irq(irq, spear_smi_int_handler, 0, pdev->name, dev); | ||
998 | if (ret) { | ||
999 | dev_err(&dev->pdev->dev, "SMI IRQ allocation failed\n"); | ||
1000 | goto err_irq; | ||
1001 | } | ||
1002 | |||
1003 | mutex_init(&dev->lock); | ||
1004 | init_waitqueue_head(&dev->cmd_complete); | ||
1005 | spear_smi_hw_init(dev); | ||
1006 | platform_set_drvdata(pdev, dev); | ||
1007 | |||
1008 | /* loop for each serial nor-flash which is connected to smi */ | ||
1009 | for (i = 0; i < dev->num_flashes; i++) { | ||
1010 | ret = spear_smi_setup_banks(pdev, i, pdata->np[i]); | ||
1011 | if (ret) { | ||
1012 | dev_err(&dev->pdev->dev, "bank setup failed\n"); | ||
1013 | goto err_bank_setup; | ||
1014 | } | ||
1015 | } | ||
1016 | |||
1017 | return 0; | ||
1018 | |||
1019 | err_bank_setup: | ||
1020 | free_irq(irq, dev); | ||
1021 | platform_set_drvdata(pdev, NULL); | ||
1022 | err_irq: | ||
1023 | clk_disable(dev->clk); | ||
1024 | err_clk_enable: | ||
1025 | clk_put(dev->clk); | ||
1026 | err_clk: | ||
1027 | iounmap(dev->io_base); | ||
1028 | err_ioremap: | ||
1029 | release_mem_region(smi_base->start, resource_size(smi_base)); | ||
1030 | err_mem: | ||
1031 | kfree(dev); | ||
1032 | err: | ||
1033 | return ret; | ||
1034 | } | ||
1035 | |||
1036 | /** | ||
1037 | * spear_smi_remove - Exit routine | ||
1038 | * @pdev: platform device structure | ||
1039 | * | ||
1040 | * free all allocations and delete the partitions. | ||
1041 | */ | ||
1042 | static int __devexit spear_smi_remove(struct platform_device *pdev) | ||
1043 | { | ||
1044 | struct spear_smi *dev; | ||
1045 | struct spear_smi_plat_data *pdata; | ||
1046 | struct spear_snor_flash *flash; | ||
1047 | struct resource *smi_base; | ||
1048 | int ret; | ||
1049 | int i, irq; | ||
1050 | |||
1051 | dev = platform_get_drvdata(pdev); | ||
1052 | if (!dev) { | ||
1053 | dev_err(&pdev->dev, "dev is null\n"); | ||
1054 | return -ENODEV; | ||
1055 | } | ||
1056 | |||
1057 | pdata = dev_get_platdata(&pdev->dev); | ||
1058 | |||
1059 | /* clean up for all nor flash */ | ||
1060 | for (i = 0; i < dev->num_flashes; i++) { | ||
1061 | flash = dev->flash[i]; | ||
1062 | if (!flash) | ||
1063 | continue; | ||
1064 | |||
1065 | /* clean up mtd stuff */ | ||
1066 | ret = mtd_device_unregister(&flash->mtd); | ||
1067 | if (ret) | ||
1068 | dev_err(&pdev->dev, "error removing mtd\n"); | ||
1069 | |||
1070 | iounmap(flash->base_addr); | ||
1071 | kfree(flash); | ||
1072 | } | ||
1073 | |||
1074 | irq = platform_get_irq(pdev, 0); | ||
1075 | free_irq(irq, dev); | ||
1076 | |||
1077 | clk_disable(dev->clk); | ||
1078 | clk_put(dev->clk); | ||
1079 | iounmap(dev->io_base); | ||
1080 | kfree(dev); | ||
1081 | |||
1082 | smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1083 | release_mem_region(smi_base->start, resource_size(smi_base)); | ||
1084 | platform_set_drvdata(pdev, NULL); | ||
1085 | |||
1086 | return 0; | ||
1087 | } | ||
1088 | |||
1089 | int spear_smi_suspend(struct platform_device *pdev, pm_message_t state) | ||
1090 | { | ||
1091 | struct spear_smi *dev = platform_get_drvdata(pdev); | ||
1092 | |||
1093 | if (dev && dev->clk) | ||
1094 | clk_disable(dev->clk); | ||
1095 | |||
1096 | return 0; | ||
1097 | } | ||
1098 | |||
1099 | int spear_smi_resume(struct platform_device *pdev) | ||
1100 | { | ||
1101 | struct spear_smi *dev = platform_get_drvdata(pdev); | ||
1102 | int ret = -EPERM; | ||
1103 | |||
1104 | if (dev && dev->clk) | ||
1105 | ret = clk_enable(dev->clk); | ||
1106 | |||
1107 | if (!ret) | ||
1108 | spear_smi_hw_init(dev); | ||
1109 | return ret; | ||
1110 | } | ||
1111 | |||
1112 | #ifdef CONFIG_OF | ||
1113 | static const struct of_device_id spear_smi_id_table[] = { | ||
1114 | { .compatible = "st,spear600-smi" }, | ||
1115 | {} | ||
1116 | }; | ||
1117 | MODULE_DEVICE_TABLE(of, spear_smi_id_table); | ||
1118 | #endif | ||
1119 | |||
1120 | static struct platform_driver spear_smi_driver = { | ||
1121 | .driver = { | ||
1122 | .name = "smi", | ||
1123 | .bus = &platform_bus_type, | ||
1124 | .owner = THIS_MODULE, | ||
1125 | .of_match_table = of_match_ptr(spear_smi_id_table), | ||
1126 | }, | ||
1127 | .probe = spear_smi_probe, | ||
1128 | .remove = __devexit_p(spear_smi_remove), | ||
1129 | .suspend = spear_smi_suspend, | ||
1130 | .resume = spear_smi_resume, | ||
1131 | }; | ||
1132 | |||
1133 | static int spear_smi_init(void) | ||
1134 | { | ||
1135 | return platform_driver_register(&spear_smi_driver); | ||
1136 | } | ||
1137 | module_init(spear_smi_init); | ||
1138 | |||
1139 | static void spear_smi_exit(void) | ||
1140 | { | ||
1141 | platform_driver_unregister(&spear_smi_driver); | ||
1142 | } | ||
1143 | module_exit(spear_smi_exit); | ||
1144 | |||
1145 | MODULE_LICENSE("GPL"); | ||
1146 | MODULE_AUTHOR("Ashish Priyadarshi, Shiraz Hashim <shiraz.hashim@st.com>"); | ||
1147 | MODULE_DESCRIPTION("MTD SMI driver for serial nor flash chips"); | ||
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c index 5fc198350b94..ab8a2f4c8d60 100644 --- a/drivers/mtd/devices/sst25l.c +++ b/drivers/mtd/devices/sst25l.c | |||
@@ -175,9 +175,6 @@ static int sst25l_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
175 | int err; | 175 | int err; |
176 | 176 | ||
177 | /* Sanity checks */ | 177 | /* Sanity checks */ |
178 | if (instr->addr + instr->len > flash->mtd.size) | ||
179 | return -EINVAL; | ||
180 | |||
181 | if ((uint32_t)instr->len % mtd->erasesize) | 178 | if ((uint32_t)instr->len % mtd->erasesize) |
182 | return -EINVAL; | 179 | return -EINVAL; |
183 | 180 | ||
@@ -223,16 +220,6 @@ static int sst25l_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
223 | unsigned char command[4]; | 220 | unsigned char command[4]; |
224 | int ret; | 221 | int ret; |
225 | 222 | ||
226 | /* Sanity checking */ | ||
227 | if (len == 0) | ||
228 | return 0; | ||
229 | |||
230 | if (from + len > flash->mtd.size) | ||
231 | return -EINVAL; | ||
232 | |||
233 | if (retlen) | ||
234 | *retlen = 0; | ||
235 | |||
236 | spi_message_init(&message); | 223 | spi_message_init(&message); |
237 | memset(&transfer, 0, sizeof(transfer)); | 224 | memset(&transfer, 0, sizeof(transfer)); |
238 | 225 | ||
@@ -274,13 +261,6 @@ static int sst25l_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
274 | int i, j, ret, bytes, copied = 0; | 261 | int i, j, ret, bytes, copied = 0; |
275 | unsigned char command[5]; | 262 | unsigned char command[5]; |
276 | 263 | ||
277 | /* Sanity checks */ | ||
278 | if (!len) | ||
279 | return 0; | ||
280 | |||
281 | if (to + len > flash->mtd.size) | ||
282 | return -EINVAL; | ||
283 | |||
284 | if ((uint32_t)to % mtd->writesize) | 264 | if ((uint32_t)to % mtd->writesize) |
285 | return -EINVAL; | 265 | return -EINVAL; |
286 | 266 | ||
@@ -402,10 +382,11 @@ static int __devinit sst25l_probe(struct spi_device *spi) | |||
402 | flash->mtd.flags = MTD_CAP_NORFLASH; | 382 | flash->mtd.flags = MTD_CAP_NORFLASH; |
403 | flash->mtd.erasesize = flash_info->erase_size; | 383 | flash->mtd.erasesize = flash_info->erase_size; |
404 | flash->mtd.writesize = flash_info->page_size; | 384 | flash->mtd.writesize = flash_info->page_size; |
385 | flash->mtd.writebufsize = flash_info->page_size; | ||
405 | flash->mtd.size = flash_info->page_size * flash_info->nr_pages; | 386 | flash->mtd.size = flash_info->page_size * flash_info->nr_pages; |
406 | flash->mtd.erase = sst25l_erase; | 387 | flash->mtd._erase = sst25l_erase; |
407 | flash->mtd.read = sst25l_read; | 388 | flash->mtd._read = sst25l_read; |
408 | flash->mtd.write = sst25l_write; | 389 | flash->mtd._write = sst25l_write; |
409 | 390 | ||
410 | dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name, | 391 | dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name, |
411 | (long long)flash->mtd.size >> 10); | 392 | (long long)flash->mtd.size >> 10); |
@@ -418,9 +399,9 @@ static int __devinit sst25l_probe(struct spi_device *spi) | |||
418 | flash->mtd.numeraseregions); | 399 | flash->mtd.numeraseregions); |
419 | 400 | ||
420 | 401 | ||
421 | ret = mtd_device_parse_register(&flash->mtd, NULL, 0, | 402 | ret = mtd_device_parse_register(&flash->mtd, NULL, NULL, |
422 | data ? data->parts : NULL, | 403 | data ? data->parts : NULL, |
423 | data ? data->nr_parts : 0); | 404 | data ? data->nr_parts : 0); |
424 | if (ret) { | 405 | if (ret) { |
425 | kfree(flash); | 406 | kfree(flash); |
426 | dev_set_drvdata(&spi->dev, NULL); | 407 | dev_set_drvdata(&spi->dev, NULL); |
@@ -450,18 +431,7 @@ static struct spi_driver sst25l_driver = { | |||
450 | .remove = __devexit_p(sst25l_remove), | 431 | .remove = __devexit_p(sst25l_remove), |
451 | }; | 432 | }; |
452 | 433 | ||
453 | static int __init sst25l_init(void) | 434 | module_spi_driver(sst25l_driver); |
454 | { | ||
455 | return spi_register_driver(&sst25l_driver); | ||
456 | } | ||
457 | |||
458 | static void __exit sst25l_exit(void) | ||
459 | { | ||
460 | spi_unregister_driver(&sst25l_driver); | ||
461 | } | ||
462 | |||
463 | module_init(sst25l_init); | ||
464 | module_exit(sst25l_exit); | ||
465 | 435 | ||
466 | MODULE_DESCRIPTION("MTD SPI driver for SST25L Flash chips"); | 436 | MODULE_DESCRIPTION("MTD SPI driver for SST25L Flash chips"); |
467 | MODULE_AUTHOR("Andre Renaud <andre@bluewatersys.com>, " | 437 | MODULE_AUTHOR("Andre Renaud <andre@bluewatersys.com>, " |
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c index 28646c95cfb8..3af351484098 100644 --- a/drivers/mtd/inftlcore.c +++ b/drivers/mtd/inftlcore.c | |||
@@ -56,7 +56,7 @@ static void inftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
56 | if (memcmp(mtd->name, "DiskOnChip", 10)) | 56 | if (memcmp(mtd->name, "DiskOnChip", 10)) |
57 | return; | 57 | return; |
58 | 58 | ||
59 | if (!mtd->block_isbad) { | 59 | if (!mtd->_block_isbad) { |
60 | printk(KERN_ERR | 60 | printk(KERN_ERR |
61 | "INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n" | 61 | "INFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n" |
62 | "Please use the new diskonchip driver under the NAND subsystem.\n"); | 62 | "Please use the new diskonchip driver under the NAND subsystem.\n"); |
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c index 536bbceaeaad..d3cfe26beeaa 100644 --- a/drivers/mtd/lpddr/lpddr_cmds.c +++ b/drivers/mtd/lpddr/lpddr_cmds.c | |||
@@ -40,7 +40,7 @@ static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); | |||
40 | static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); | 40 | static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
41 | static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len, | 41 | static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len, |
42 | size_t *retlen, void **mtdbuf, resource_size_t *phys); | 42 | size_t *retlen, void **mtdbuf, resource_size_t *phys); |
43 | static void lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len); | 43 | static int lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len); |
44 | static int get_chip(struct map_info *map, struct flchip *chip, int mode); | 44 | static int get_chip(struct map_info *map, struct flchip *chip, int mode); |
45 | static int chip_ready(struct map_info *map, struct flchip *chip, int mode); | 45 | static int chip_ready(struct map_info *map, struct flchip *chip, int mode); |
46 | static void put_chip(struct map_info *map, struct flchip *chip); | 46 | static void put_chip(struct map_info *map, struct flchip *chip); |
@@ -63,18 +63,18 @@ struct mtd_info *lpddr_cmdset(struct map_info *map) | |||
63 | mtd->type = MTD_NORFLASH; | 63 | mtd->type = MTD_NORFLASH; |
64 | 64 | ||
65 | /* Fill in the default mtd operations */ | 65 | /* Fill in the default mtd operations */ |
66 | mtd->read = lpddr_read; | 66 | mtd->_read = lpddr_read; |
67 | mtd->type = MTD_NORFLASH; | 67 | mtd->type = MTD_NORFLASH; |
68 | mtd->flags = MTD_CAP_NORFLASH; | 68 | mtd->flags = MTD_CAP_NORFLASH; |
69 | mtd->flags &= ~MTD_BIT_WRITEABLE; | 69 | mtd->flags &= ~MTD_BIT_WRITEABLE; |
70 | mtd->erase = lpddr_erase; | 70 | mtd->_erase = lpddr_erase; |
71 | mtd->write = lpddr_write_buffers; | 71 | mtd->_write = lpddr_write_buffers; |
72 | mtd->writev = lpddr_writev; | 72 | mtd->_writev = lpddr_writev; |
73 | mtd->lock = lpddr_lock; | 73 | mtd->_lock = lpddr_lock; |
74 | mtd->unlock = lpddr_unlock; | 74 | mtd->_unlock = lpddr_unlock; |
75 | if (map_is_linear(map)) { | 75 | if (map_is_linear(map)) { |
76 | mtd->point = lpddr_point; | 76 | mtd->_point = lpddr_point; |
77 | mtd->unpoint = lpddr_unpoint; | 77 | mtd->_unpoint = lpddr_unpoint; |
78 | } | 78 | } |
79 | mtd->size = 1 << lpddr->qinfo->DevSizeShift; | 79 | mtd->size = 1 << lpddr->qinfo->DevSizeShift; |
80 | mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift; | 80 | mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift; |
@@ -530,14 +530,12 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len, | |||
530 | struct flchip *chip = &lpddr->chips[chipnum]; | 530 | struct flchip *chip = &lpddr->chips[chipnum]; |
531 | int ret = 0; | 531 | int ret = 0; |
532 | 532 | ||
533 | if (!map->virt || (adr + len > mtd->size)) | 533 | if (!map->virt) |
534 | return -EINVAL; | 534 | return -EINVAL; |
535 | 535 | ||
536 | /* ofs: offset within the first chip that the first read should start */ | 536 | /* ofs: offset within the first chip that the first read should start */ |
537 | ofs = adr - (chipnum << lpddr->chipshift); | 537 | ofs = adr - (chipnum << lpddr->chipshift); |
538 | |||
539 | *mtdbuf = (void *)map->virt + chip->start + ofs; | 538 | *mtdbuf = (void *)map->virt + chip->start + ofs; |
540 | *retlen = 0; | ||
541 | 539 | ||
542 | while (len) { | 540 | while (len) { |
543 | unsigned long thislen; | 541 | unsigned long thislen; |
@@ -575,11 +573,11 @@ static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len, | |||
575 | return 0; | 573 | return 0; |
576 | } | 574 | } |
577 | 575 | ||
578 | static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len) | 576 | static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len) |
579 | { | 577 | { |
580 | struct map_info *map = mtd->priv; | 578 | struct map_info *map = mtd->priv; |
581 | struct lpddr_private *lpddr = map->fldrv_priv; | 579 | struct lpddr_private *lpddr = map->fldrv_priv; |
582 | int chipnum = adr >> lpddr->chipshift; | 580 | int chipnum = adr >> lpddr->chipshift, err = 0; |
583 | unsigned long ofs; | 581 | unsigned long ofs; |
584 | 582 | ||
585 | /* ofs: offset within the first chip that the first read should start */ | 583 | /* ofs: offset within the first chip that the first read should start */ |
@@ -603,9 +601,11 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len) | |||
603 | chip->ref_point_counter--; | 601 | chip->ref_point_counter--; |
604 | if (chip->ref_point_counter == 0) | 602 | if (chip->ref_point_counter == 0) |
605 | chip->state = FL_READY; | 603 | chip->state = FL_READY; |
606 | } else | 604 | } else { |
607 | printk(KERN_WARNING "%s: Warning: unpoint called on non" | 605 | printk(KERN_WARNING "%s: Warning: unpoint called on non" |
608 | "pointed region\n", map->name); | 606 | "pointed region\n", map->name); |
607 | err = -EINVAL; | ||
608 | } | ||
609 | 609 | ||
610 | put_chip(map, chip); | 610 | put_chip(map, chip); |
611 | mutex_unlock(&chip->mutex); | 611 | mutex_unlock(&chip->mutex); |
@@ -614,6 +614,8 @@ static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len) | |||
614 | ofs = 0; | 614 | ofs = 0; |
615 | chipnum++; | 615 | chipnum++; |
616 | } | 616 | } |
617 | |||
618 | return err; | ||
617 | } | 619 | } |
618 | 620 | ||
619 | static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, | 621 | static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, |
@@ -637,13 +639,11 @@ static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs, | |||
637 | int chipnum; | 639 | int chipnum; |
638 | unsigned long ofs, vec_seek, i; | 640 | unsigned long ofs, vec_seek, i; |
639 | int wbufsize = 1 << lpddr->qinfo->BufSizeShift; | 641 | int wbufsize = 1 << lpddr->qinfo->BufSizeShift; |
640 | |||
641 | size_t len = 0; | 642 | size_t len = 0; |
642 | 643 | ||
643 | for (i = 0; i < count; i++) | 644 | for (i = 0; i < count; i++) |
644 | len += vecs[i].iov_len; | 645 | len += vecs[i].iov_len; |
645 | 646 | ||
646 | *retlen = 0; | ||
647 | if (!len) | 647 | if (!len) |
648 | return 0; | 648 | return 0; |
649 | 649 | ||
@@ -688,9 +688,6 @@ static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
688 | ofs = instr->addr; | 688 | ofs = instr->addr; |
689 | len = instr->len; | 689 | len = instr->len; |
690 | 690 | ||
691 | if (ofs > mtd->size || (len + ofs) > mtd->size) | ||
692 | return -EINVAL; | ||
693 | |||
694 | while (len > 0) { | 691 | while (len > 0) { |
695 | ret = do_erase_oneblock(mtd, ofs); | 692 | ret = do_erase_oneblock(mtd, ofs); |
696 | if (ret) | 693 | if (ret) |
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c index 650126c361f1..ef5cde84a8b3 100644 --- a/drivers/mtd/maps/bfin-async-flash.c +++ b/drivers/mtd/maps/bfin-async-flash.c | |||
@@ -164,8 +164,8 @@ static int __devinit bfin_flash_probe(struct platform_device *pdev) | |||
164 | return -ENXIO; | 164 | return -ENXIO; |
165 | } | 165 | } |
166 | 166 | ||
167 | mtd_device_parse_register(state->mtd, part_probe_types, 0, | 167 | mtd_device_parse_register(state->mtd, part_probe_types, NULL, |
168 | pdata->parts, pdata->nr_parts); | 168 | pdata->parts, pdata->nr_parts); |
169 | 169 | ||
170 | platform_set_drvdata(pdev, state); | 170 | platform_set_drvdata(pdev, state); |
171 | 171 | ||
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c index f43b365b848c..080f06053bd4 100644 --- a/drivers/mtd/maps/dc21285.c +++ b/drivers/mtd/maps/dc21285.c | |||
@@ -196,7 +196,7 @@ static int __init init_dc21285(void) | |||
196 | 196 | ||
197 | dc21285_mtd->owner = THIS_MODULE; | 197 | dc21285_mtd->owner = THIS_MODULE; |
198 | 198 | ||
199 | mtd_device_parse_register(dc21285_mtd, probes, 0, NULL, 0); | 199 | mtd_device_parse_register(dc21285_mtd, probes, NULL, NULL, 0); |
200 | 200 | ||
201 | if(machine_is_ebsa285()) { | 201 | if(machine_is_ebsa285()) { |
202 | /* | 202 | /* |
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c index 33cce895859f..e4de96ba52b3 100644 --- a/drivers/mtd/maps/gpio-addr-flash.c +++ b/drivers/mtd/maps/gpio-addr-flash.c | |||
@@ -252,8 +252,8 @@ static int __devinit gpio_flash_probe(struct platform_device *pdev) | |||
252 | } | 252 | } |
253 | 253 | ||
254 | 254 | ||
255 | mtd_device_parse_register(state->mtd, part_probe_types, 0, | 255 | mtd_device_parse_register(state->mtd, part_probe_types, NULL, |
256 | pdata->parts, pdata->nr_parts); | 256 | pdata->parts, pdata->nr_parts); |
257 | 257 | ||
258 | return 0; | 258 | return 0; |
259 | } | 259 | } |
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c index 49c14187fc66..8ed6cb4529d8 100644 --- a/drivers/mtd/maps/h720x-flash.c +++ b/drivers/mtd/maps/h720x-flash.c | |||
@@ -85,8 +85,8 @@ static int __init h720x_mtd_init(void) | |||
85 | if (mymtd) { | 85 | if (mymtd) { |
86 | mymtd->owner = THIS_MODULE; | 86 | mymtd->owner = THIS_MODULE; |
87 | 87 | ||
88 | mtd_device_parse_register(mymtd, NULL, 0, | 88 | mtd_device_parse_register(mymtd, NULL, NULL, |
89 | h720x_partitions, NUM_PARTITIONS); | 89 | h720x_partitions, NUM_PARTITIONS); |
90 | return 0; | 90 | return 0; |
91 | } | 91 | } |
92 | 92 | ||
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c index f47aedb24366..834a06c56f56 100644 --- a/drivers/mtd/maps/impa7.c +++ b/drivers/mtd/maps/impa7.c | |||
@@ -91,7 +91,7 @@ static int __init init_impa7(void) | |||
91 | if (impa7_mtd[i]) { | 91 | if (impa7_mtd[i]) { |
92 | impa7_mtd[i]->owner = THIS_MODULE; | 92 | impa7_mtd[i]->owner = THIS_MODULE; |
93 | devicesfound++; | 93 | devicesfound++; |
94 | mtd_device_parse_register(impa7_mtd[i], NULL, 0, | 94 | mtd_device_parse_register(impa7_mtd[i], NULL, NULL, |
95 | partitions, | 95 | partitions, |
96 | ARRAY_SIZE(partitions)); | 96 | ARRAY_SIZE(partitions)); |
97 | } | 97 | } |
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c index 08c239604ee4..92e1f41634c7 100644 --- a/drivers/mtd/maps/intel_vr_nor.c +++ b/drivers/mtd/maps/intel_vr_nor.c | |||
@@ -72,7 +72,7 @@ static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p) | |||
72 | { | 72 | { |
73 | /* register the flash bank */ | 73 | /* register the flash bank */ |
74 | /* partition the flash bank */ | 74 | /* partition the flash bank */ |
75 | return mtd_device_parse_register(p->info, NULL, 0, NULL, 0); | 75 | return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0); |
76 | } | 76 | } |
77 | 77 | ||
78 | static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p) | 78 | static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p) |
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c index fc7d4d0d9a4e..4a41ced0f710 100644 --- a/drivers/mtd/maps/ixp2000.c +++ b/drivers/mtd/maps/ixp2000.c | |||
@@ -226,7 +226,7 @@ static int ixp2000_flash_probe(struct platform_device *dev) | |||
226 | } | 226 | } |
227 | info->mtd->owner = THIS_MODULE; | 227 | info->mtd->owner = THIS_MODULE; |
228 | 228 | ||
229 | err = mtd_device_parse_register(info->mtd, probes, 0, NULL, 0); | 229 | err = mtd_device_parse_register(info->mtd, probes, NULL, NULL, 0); |
230 | if (err) | 230 | if (err) |
231 | goto Error; | 231 | goto Error; |
232 | 232 | ||
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c index 8b5410162d70..e864fc6c58f9 100644 --- a/drivers/mtd/maps/ixp4xx.c +++ b/drivers/mtd/maps/ixp4xx.c | |||
@@ -182,6 +182,9 @@ static int ixp4xx_flash_probe(struct platform_device *dev) | |||
182 | { | 182 | { |
183 | struct flash_platform_data *plat = dev->dev.platform_data; | 183 | struct flash_platform_data *plat = dev->dev.platform_data; |
184 | struct ixp4xx_flash_info *info; | 184 | struct ixp4xx_flash_info *info; |
185 | struct mtd_part_parser_data ppdata = { | ||
186 | .origin = dev->resource->start, | ||
187 | }; | ||
185 | int err = -1; | 188 | int err = -1; |
186 | 189 | ||
187 | if (!plat) | 190 | if (!plat) |
@@ -247,7 +250,7 @@ static int ixp4xx_flash_probe(struct platform_device *dev) | |||
247 | /* Use the fast version */ | 250 | /* Use the fast version */ |
248 | info->map.write = ixp4xx_write16; | 251 | info->map.write = ixp4xx_write16; |
249 | 252 | ||
250 | err = mtd_device_parse_register(info->mtd, probes, dev->resource->start, | 253 | err = mtd_device_parse_register(info->mtd, probes, &ppdata, |
251 | plat->parts, plat->nr_parts); | 254 | plat->parts, plat->nr_parts); |
252 | if (err) { | 255 | if (err) { |
253 | printk(KERN_ERR "Could not parse partitions\n"); | 256 | printk(KERN_ERR "Could not parse partitions\n"); |
diff --git a/drivers/mtd/maps/l440gx.c b/drivers/mtd/maps/l440gx.c index dd0360ba2412..74bd98ee635f 100644 --- a/drivers/mtd/maps/l440gx.c +++ b/drivers/mtd/maps/l440gx.c | |||
@@ -27,17 +27,21 @@ static struct mtd_info *mymtd; | |||
27 | 27 | ||
28 | 28 | ||
29 | /* Is this really the vpp port? */ | 29 | /* Is this really the vpp port? */ |
30 | static DEFINE_SPINLOCK(l440gx_vpp_lock); | ||
31 | static int l440gx_vpp_refcnt; | ||
30 | static void l440gx_set_vpp(struct map_info *map, int vpp) | 32 | static void l440gx_set_vpp(struct map_info *map, int vpp) |
31 | { | 33 | { |
32 | unsigned long l; | 34 | unsigned long flags; |
33 | 35 | ||
34 | l = inl(VPP_PORT); | 36 | spin_lock_irqsave(&l440gx_vpp_lock, flags); |
35 | if (vpp) { | 37 | if (vpp) { |
36 | l |= 1; | 38 | if (++l440gx_vpp_refcnt == 1) /* first nested 'on' */ |
39 | outl(inl(VPP_PORT) | 1, VPP_PORT); | ||
37 | } else { | 40 | } else { |
38 | l &= ~1; | 41 | if (--l440gx_vpp_refcnt == 0) /* last nested 'off' */ |
42 | outl(inl(VPP_PORT) & ~1, VPP_PORT); | ||
39 | } | 43 | } |
40 | outl(l, VPP_PORT); | 44 | spin_unlock_irqrestore(&l440gx_vpp_lock, flags); |
41 | } | 45 | } |
42 | 46 | ||
43 | static struct map_info l440gx_map = { | 47 | static struct map_info l440gx_map = { |
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c index 7b889de9477b..b5401e355745 100644 --- a/drivers/mtd/maps/lantiq-flash.c +++ b/drivers/mtd/maps/lantiq-flash.c | |||
@@ -45,6 +45,7 @@ struct ltq_mtd { | |||
45 | }; | 45 | }; |
46 | 46 | ||
47 | static char ltq_map_name[] = "ltq_nor"; | 47 | static char ltq_map_name[] = "ltq_nor"; |
48 | static const char *ltq_probe_types[] __devinitconst = { "cmdlinepart", NULL }; | ||
48 | 49 | ||
49 | static map_word | 50 | static map_word |
50 | ltq_read16(struct map_info *map, unsigned long adr) | 51 | ltq_read16(struct map_info *map, unsigned long adr) |
@@ -168,8 +169,9 @@ ltq_mtd_probe(struct platform_device *pdev) | |||
168 | cfi->addr_unlock1 ^= 1; | 169 | cfi->addr_unlock1 ^= 1; |
169 | cfi->addr_unlock2 ^= 1; | 170 | cfi->addr_unlock2 ^= 1; |
170 | 171 | ||
171 | err = mtd_device_parse_register(ltq_mtd->mtd, NULL, 0, | 172 | err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types, NULL, |
172 | ltq_mtd_data->parts, ltq_mtd_data->nr_parts); | 173 | ltq_mtd_data->parts, |
174 | ltq_mtd_data->nr_parts); | ||
173 | if (err) { | 175 | if (err) { |
174 | dev_err(&pdev->dev, "failed to add partitions\n"); | 176 | dev_err(&pdev->dev, "failed to add partitions\n"); |
175 | goto err_destroy; | 177 | goto err_destroy; |
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c index 8fed58e3a4a8..3c7ad17fca78 100644 --- a/drivers/mtd/maps/latch-addr-flash.c +++ b/drivers/mtd/maps/latch-addr-flash.c | |||
@@ -199,8 +199,9 @@ static int __devinit latch_addr_flash_probe(struct platform_device *dev) | |||
199 | } | 199 | } |
200 | info->mtd->owner = THIS_MODULE; | 200 | info->mtd->owner = THIS_MODULE; |
201 | 201 | ||
202 | mtd_device_parse_register(info->mtd, NULL, 0, | 202 | mtd_device_parse_register(info->mtd, NULL, NULL, |
203 | latch_addr_data->parts, latch_addr_data->nr_parts); | 203 | latch_addr_data->parts, |
204 | latch_addr_data->nr_parts); | ||
204 | return 0; | 205 | return 0; |
205 | 206 | ||
206 | iounmap: | 207 | iounmap: |
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c index 0259cf583022..a3cfad392ed6 100644 --- a/drivers/mtd/maps/pcmciamtd.c +++ b/drivers/mtd/maps/pcmciamtd.c | |||
@@ -294,13 +294,24 @@ static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *f | |||
294 | } | 294 | } |
295 | 295 | ||
296 | 296 | ||
297 | static DEFINE_SPINLOCK(pcmcia_vpp_lock); | ||
298 | static int pcmcia_vpp_refcnt; | ||
297 | static void pcmciamtd_set_vpp(struct map_info *map, int on) | 299 | static void pcmciamtd_set_vpp(struct map_info *map, int on) |
298 | { | 300 | { |
299 | struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; | 301 | struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; |
300 | struct pcmcia_device *link = dev->p_dev; | 302 | struct pcmcia_device *link = dev->p_dev; |
303 | unsigned long flags; | ||
301 | 304 | ||
302 | pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp); | 305 | pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp); |
303 | pcmcia_fixup_vpp(link, on ? dev->vpp : 0); | 306 | spin_lock_irqsave(&pcmcia_vpp_lock, flags); |
307 | if (on) { | ||
308 | if (++pcmcia_vpp_refcnt == 1) /* first nested 'on' */ | ||
309 | pcmcia_fixup_vpp(link, dev->vpp); | ||
310 | } else { | ||
311 | if (--pcmcia_vpp_refcnt == 0) /* last nested 'off' */ | ||
312 | pcmcia_fixup_vpp(link, 0); | ||
313 | } | ||
314 | spin_unlock_irqrestore(&pcmcia_vpp_lock, flags); | ||
304 | } | 315 | } |
305 | 316 | ||
306 | 317 | ||
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c index abc562653b31..21b0b713cacb 100644 --- a/drivers/mtd/maps/physmap.c +++ b/drivers/mtd/maps/physmap.c | |||
@@ -27,6 +27,8 @@ struct physmap_flash_info { | |||
27 | struct mtd_info *mtd[MAX_RESOURCES]; | 27 | struct mtd_info *mtd[MAX_RESOURCES]; |
28 | struct mtd_info *cmtd; | 28 | struct mtd_info *cmtd; |
29 | struct map_info map[MAX_RESOURCES]; | 29 | struct map_info map[MAX_RESOURCES]; |
30 | spinlock_t vpp_lock; | ||
31 | int vpp_refcnt; | ||
30 | }; | 32 | }; |
31 | 33 | ||
32 | static int physmap_flash_remove(struct platform_device *dev) | 34 | static int physmap_flash_remove(struct platform_device *dev) |
@@ -63,12 +65,26 @@ static void physmap_set_vpp(struct map_info *map, int state) | |||
63 | { | 65 | { |
64 | struct platform_device *pdev; | 66 | struct platform_device *pdev; |
65 | struct physmap_flash_data *physmap_data; | 67 | struct physmap_flash_data *physmap_data; |
68 | struct physmap_flash_info *info; | ||
69 | unsigned long flags; | ||
66 | 70 | ||
67 | pdev = (struct platform_device *)map->map_priv_1; | 71 | pdev = (struct platform_device *)map->map_priv_1; |
68 | physmap_data = pdev->dev.platform_data; | 72 | physmap_data = pdev->dev.platform_data; |
69 | 73 | ||
70 | if (physmap_data->set_vpp) | 74 | if (!physmap_data->set_vpp) |
71 | physmap_data->set_vpp(pdev, state); | 75 | return; |
76 | |||
77 | info = platform_get_drvdata(pdev); | ||
78 | |||
79 | spin_lock_irqsave(&info->vpp_lock, flags); | ||
80 | if (state) { | ||
81 | if (++info->vpp_refcnt == 1) /* first nested 'on' */ | ||
82 | physmap_data->set_vpp(pdev, 1); | ||
83 | } else { | ||
84 | if (--info->vpp_refcnt == 0) /* last nested 'off' */ | ||
85 | physmap_data->set_vpp(pdev, 0); | ||
86 | } | ||
87 | spin_unlock_irqrestore(&info->vpp_lock, flags); | ||
72 | } | 88 | } |
73 | 89 | ||
74 | static const char *rom_probe_types[] = { | 90 | static const char *rom_probe_types[] = { |
@@ -172,9 +188,11 @@ static int physmap_flash_probe(struct platform_device *dev) | |||
172 | if (err) | 188 | if (err) |
173 | goto err_out; | 189 | goto err_out; |
174 | 190 | ||
191 | spin_lock_init(&info->vpp_lock); | ||
192 | |||
175 | part_types = physmap_data->part_probe_types ? : part_probe_types; | 193 | part_types = physmap_data->part_probe_types ? : part_probe_types; |
176 | 194 | ||
177 | mtd_device_parse_register(info->cmtd, part_types, 0, | 195 | mtd_device_parse_register(info->cmtd, part_types, NULL, |
178 | physmap_data->parts, physmap_data->nr_parts); | 196 | physmap_data->parts, physmap_data->nr_parts); |
179 | return 0; | 197 | return 0; |
180 | 198 | ||
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c index 45876d0e5b8e..891558de3ec1 100644 --- a/drivers/mtd/maps/plat-ram.c +++ b/drivers/mtd/maps/plat-ram.c | |||
@@ -222,8 +222,9 @@ static int platram_probe(struct platform_device *pdev) | |||
222 | /* check to see if there are any available partitions, or wether | 222 | /* check to see if there are any available partitions, or wether |
223 | * to add this device whole */ | 223 | * to add this device whole */ |
224 | 224 | ||
225 | err = mtd_device_parse_register(info->mtd, pdata->probes, 0, | 225 | err = mtd_device_parse_register(info->mtd, pdata->probes, NULL, |
226 | pdata->partitions, pdata->nr_partitions); | 226 | pdata->partitions, |
227 | pdata->nr_partitions); | ||
227 | if (!err) | 228 | if (!err) |
228 | dev_info(&pdev->dev, "registered mtd device\n"); | 229 | dev_info(&pdev->dev, "registered mtd device\n"); |
229 | 230 | ||
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c index 436d121185b1..81884c277405 100644 --- a/drivers/mtd/maps/pxa2xx-flash.c +++ b/drivers/mtd/maps/pxa2xx-flash.c | |||
@@ -98,7 +98,8 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev) | |||
98 | } | 98 | } |
99 | info->mtd->owner = THIS_MODULE; | 99 | info->mtd->owner = THIS_MODULE; |
100 | 100 | ||
101 | mtd_device_parse_register(info->mtd, probes, 0, flash->parts, flash->nr_parts); | 101 | mtd_device_parse_register(info->mtd, probes, NULL, flash->parts, |
102 | flash->nr_parts); | ||
102 | 103 | ||
103 | platform_set_drvdata(pdev, info); | 104 | platform_set_drvdata(pdev, info); |
104 | return 0; | 105 | return 0; |
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c index 3da63fc6f16e..6f52e1f288b6 100644 --- a/drivers/mtd/maps/rbtx4939-flash.c +++ b/drivers/mtd/maps/rbtx4939-flash.c | |||
@@ -102,8 +102,8 @@ static int rbtx4939_flash_probe(struct platform_device *dev) | |||
102 | info->mtd->owner = THIS_MODULE; | 102 | info->mtd->owner = THIS_MODULE; |
103 | if (err) | 103 | if (err) |
104 | goto err_out; | 104 | goto err_out; |
105 | err = mtd_device_parse_register(info->mtd, NULL, 0, | 105 | err = mtd_device_parse_register(info->mtd, NULL, NULL, pdata->parts, |
106 | pdata->parts, pdata->nr_parts); | 106 | pdata->nr_parts); |
107 | 107 | ||
108 | if (err) | 108 | if (err) |
109 | goto err_out; | 109 | goto err_out; |
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c index cbc3b7867910..a675bdbcb0fe 100644 --- a/drivers/mtd/maps/sa1100-flash.c +++ b/drivers/mtd/maps/sa1100-flash.c | |||
@@ -36,10 +36,22 @@ struct sa_info { | |||
36 | struct sa_subdev_info subdev[0]; | 36 | struct sa_subdev_info subdev[0]; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | static DEFINE_SPINLOCK(sa1100_vpp_lock); | ||
40 | static int sa1100_vpp_refcnt; | ||
39 | static void sa1100_set_vpp(struct map_info *map, int on) | 41 | static void sa1100_set_vpp(struct map_info *map, int on) |
40 | { | 42 | { |
41 | struct sa_subdev_info *subdev = container_of(map, struct sa_subdev_info, map); | 43 | struct sa_subdev_info *subdev = container_of(map, struct sa_subdev_info, map); |
42 | subdev->plat->set_vpp(on); | 44 | unsigned long flags; |
45 | |||
46 | spin_lock_irqsave(&sa1100_vpp_lock, flags); | ||
47 | if (on) { | ||
48 | if (++sa1100_vpp_refcnt == 1) /* first nested 'on' */ | ||
49 | subdev->plat->set_vpp(1); | ||
50 | } else { | ||
51 | if (--sa1100_vpp_refcnt == 0) /* last nested 'off' */ | ||
52 | subdev->plat->set_vpp(0); | ||
53 | } | ||
54 | spin_unlock_irqrestore(&sa1100_vpp_lock, flags); | ||
43 | } | 55 | } |
44 | 56 | ||
45 | static void sa1100_destroy_subdev(struct sa_subdev_info *subdev) | 57 | static void sa1100_destroy_subdev(struct sa_subdev_info *subdev) |
@@ -252,8 +264,8 @@ static int __devinit sa1100_mtd_probe(struct platform_device *pdev) | |||
252 | /* | 264 | /* |
253 | * Partition selection stuff. | 265 | * Partition selection stuff. |
254 | */ | 266 | */ |
255 | mtd_device_parse_register(info->mtd, part_probes, 0, | 267 | mtd_device_parse_register(info->mtd, part_probes, NULL, plat->parts, |
256 | plat->parts, plat->nr_parts); | 268 | plat->nr_parts); |
257 | 269 | ||
258 | platform_set_drvdata(pdev, info); | 270 | platform_set_drvdata(pdev, info); |
259 | err = 0; | 271 | err = 0; |
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c index 496c40704aff..9d900ada6708 100644 --- a/drivers/mtd/maps/solutionengine.c +++ b/drivers/mtd/maps/solutionengine.c | |||
@@ -92,8 +92,8 @@ static int __init init_soleng_maps(void) | |||
92 | mtd_device_register(eprom_mtd, NULL, 0); | 92 | mtd_device_register(eprom_mtd, NULL, 0); |
93 | } | 93 | } |
94 | 94 | ||
95 | mtd_device_parse_register(flash_mtd, probes, 0, | 95 | mtd_device_parse_register(flash_mtd, probes, NULL, |
96 | superh_se_partitions, NUM_PARTITIONS); | 96 | superh_se_partitions, NUM_PARTITIONS); |
97 | 97 | ||
98 | return 0; | 98 | return 0; |
99 | } | 99 | } |
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c index 6793074f3f40..cfff454f628b 100644 --- a/drivers/mtd/maps/uclinux.c +++ b/drivers/mtd/maps/uclinux.c | |||
@@ -85,7 +85,7 @@ static int __init uclinux_mtd_init(void) | |||
85 | } | 85 | } |
86 | 86 | ||
87 | mtd->owner = THIS_MODULE; | 87 | mtd->owner = THIS_MODULE; |
88 | mtd->point = uclinux_point; | 88 | mtd->_point = uclinux_point; |
89 | mtd->priv = mapp; | 89 | mtd->priv = mapp; |
90 | 90 | ||
91 | uclinux_ram_mtdinfo = mtd; | 91 | uclinux_ram_mtdinfo = mtd; |
diff --git a/drivers/mtd/maps/vmu-flash.c b/drivers/mtd/maps/vmu-flash.c index 3a04b078576a..2e2b0945edc7 100644 --- a/drivers/mtd/maps/vmu-flash.c +++ b/drivers/mtd/maps/vmu-flash.c | |||
@@ -360,9 +360,6 @@ static int vmu_flash_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
360 | int index = 0, retval, partition, leftover, numblocks; | 360 | int index = 0, retval, partition, leftover, numblocks; |
361 | unsigned char cx; | 361 | unsigned char cx; |
362 | 362 | ||
363 | if (len < 1) | ||
364 | return -EIO; | ||
365 | |||
366 | mpart = mtd->priv; | 363 | mpart = mtd->priv; |
367 | mdev = mpart->mdev; | 364 | mdev = mpart->mdev; |
368 | partition = mpart->partition; | 365 | partition = mpart->partition; |
@@ -434,11 +431,6 @@ static int vmu_flash_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
434 | partition = mpart->partition; | 431 | partition = mpart->partition; |
435 | card = maple_get_drvdata(mdev); | 432 | card = maple_get_drvdata(mdev); |
436 | 433 | ||
437 | /* simple sanity checks */ | ||
438 | if (len < 1) { | ||
439 | error = -EIO; | ||
440 | goto failed; | ||
441 | } | ||
442 | numblocks = card->parts[partition].numblocks; | 434 | numblocks = card->parts[partition].numblocks; |
443 | if (to + len > numblocks * card->blocklen) | 435 | if (to + len > numblocks * card->blocklen) |
444 | len = numblocks * card->blocklen - to; | 436 | len = numblocks * card->blocklen - to; |
@@ -544,9 +536,9 @@ static void vmu_queryblocks(struct mapleq *mq) | |||
544 | mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE; | 536 | mtd_cur->flags = MTD_WRITEABLE|MTD_NO_ERASE; |
545 | mtd_cur->size = part_cur->numblocks * card->blocklen; | 537 | mtd_cur->size = part_cur->numblocks * card->blocklen; |
546 | mtd_cur->erasesize = card->blocklen; | 538 | mtd_cur->erasesize = card->blocklen; |
547 | mtd_cur->write = vmu_flash_write; | 539 | mtd_cur->_write = vmu_flash_write; |
548 | mtd_cur->read = vmu_flash_read; | 540 | mtd_cur->_read = vmu_flash_read; |
549 | mtd_cur->sync = vmu_flash_sync; | 541 | mtd_cur->_sync = vmu_flash_sync; |
550 | mtd_cur->writesize = card->blocklen; | 542 | mtd_cur->writesize = card->blocklen; |
551 | 543 | ||
552 | mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL); | 544 | mpart = kmalloc(sizeof(struct mdev_part), GFP_KERNEL); |
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c index aa7e0cb2893c..71b0ba797912 100644 --- a/drivers/mtd/maps/wr_sbc82xx_flash.c +++ b/drivers/mtd/maps/wr_sbc82xx_flash.c | |||
@@ -142,7 +142,7 @@ static int __init init_sbc82xx_flash(void) | |||
142 | nr_parts = ARRAY_SIZE(smallflash_parts); | 142 | nr_parts = ARRAY_SIZE(smallflash_parts); |
143 | } | 143 | } |
144 | 144 | ||
145 | mtd_device_parse_register(sbcmtd[i], part_probes, 0, | 145 | mtd_device_parse_register(sbcmtd[i], part_probes, NULL, |
146 | defparts, nr_parts); | 146 | defparts, nr_parts); |
147 | } | 147 | } |
148 | return 0; | 148 | return 0; |
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 424ca5f93c6c..f1f06715d4e0 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -233,6 +233,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode) | |||
233 | ret = __get_mtd_device(dev->mtd); | 233 | ret = __get_mtd_device(dev->mtd); |
234 | if (ret) | 234 | if (ret) |
235 | goto error_release; | 235 | goto error_release; |
236 | dev->file_mode = mode; | ||
236 | 237 | ||
237 | unlock: | 238 | unlock: |
238 | dev->open++; | 239 | dev->open++; |
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c index af6591237b9b..6c6d80736fad 100644 --- a/drivers/mtd/mtdblock.c +++ b/drivers/mtd/mtdblock.c | |||
@@ -321,8 +321,12 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd) | |||
321 | mutex_unlock(&mtdblk->cache_mutex); | 321 | mutex_unlock(&mtdblk->cache_mutex); |
322 | 322 | ||
323 | if (!--mtdblk->count) { | 323 | if (!--mtdblk->count) { |
324 | /* It was the last usage. Free the cache */ | 324 | /* |
325 | mtd_sync(mbd->mtd); | 325 | * It was the last usage. Free the cache, but only sync if |
326 | * opened for writing. | ||
327 | */ | ||
328 | if (mbd->file_mode & FMODE_WRITE) | ||
329 | mtd_sync(mbd->mtd); | ||
326 | vfree(mtdblk->cache_data); | 330 | vfree(mtdblk->cache_data); |
327 | } | 331 | } |
328 | 332 | ||
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index c57ae92ebda4..55d82321d307 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c | |||
@@ -405,7 +405,7 @@ static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd, | |||
405 | if (length > 4096) | 405 | if (length > 4096) |
406 | return -EINVAL; | 406 | return -EINVAL; |
407 | 407 | ||
408 | if (!mtd->write_oob) | 408 | if (!mtd->_write_oob) |
409 | ret = -EOPNOTSUPP; | 409 | ret = -EOPNOTSUPP; |
410 | else | 410 | else |
411 | ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT; | 411 | ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT; |
@@ -576,7 +576,7 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd, | |||
576 | !access_ok(VERIFY_READ, req.usr_data, req.len) || | 576 | !access_ok(VERIFY_READ, req.usr_data, req.len) || |
577 | !access_ok(VERIFY_READ, req.usr_oob, req.ooblen)) | 577 | !access_ok(VERIFY_READ, req.usr_oob, req.ooblen)) |
578 | return -EFAULT; | 578 | return -EFAULT; |
579 | if (!mtd->write_oob) | 579 | if (!mtd->_write_oob) |
580 | return -EOPNOTSUPP; | 580 | return -EOPNOTSUPP; |
581 | 581 | ||
582 | ops.mode = req.mode; | 582 | ops.mode = req.mode; |
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c index 1ed5103b219b..b9000563b9f4 100644 --- a/drivers/mtd/mtdconcat.c +++ b/drivers/mtd/mtdconcat.c | |||
@@ -72,8 +72,6 @@ concat_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
72 | int ret = 0, err; | 72 | int ret = 0, err; |
73 | int i; | 73 | int i; |
74 | 74 | ||
75 | *retlen = 0; | ||
76 | |||
77 | for (i = 0; i < concat->num_subdev; i++) { | 75 | for (i = 0; i < concat->num_subdev; i++) { |
78 | struct mtd_info *subdev = concat->subdev[i]; | 76 | struct mtd_info *subdev = concat->subdev[i]; |
79 | size_t size, retsize; | 77 | size_t size, retsize; |
@@ -126,11 +124,6 @@ concat_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
126 | int err = -EINVAL; | 124 | int err = -EINVAL; |
127 | int i; | 125 | int i; |
128 | 126 | ||
129 | if (!(mtd->flags & MTD_WRITEABLE)) | ||
130 | return -EROFS; | ||
131 | |||
132 | *retlen = 0; | ||
133 | |||
134 | for (i = 0; i < concat->num_subdev; i++) { | 127 | for (i = 0; i < concat->num_subdev; i++) { |
135 | struct mtd_info *subdev = concat->subdev[i]; | 128 | struct mtd_info *subdev = concat->subdev[i]; |
136 | size_t size, retsize; | 129 | size_t size, retsize; |
@@ -145,11 +138,7 @@ concat_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
145 | else | 138 | else |
146 | size = len; | 139 | size = len; |
147 | 140 | ||
148 | if (!(subdev->flags & MTD_WRITEABLE)) | 141 | err = mtd_write(subdev, to, size, &retsize, buf); |
149 | err = -EROFS; | ||
150 | else | ||
151 | err = mtd_write(subdev, to, size, &retsize, buf); | ||
152 | |||
153 | if (err) | 142 | if (err) |
154 | break; | 143 | break; |
155 | 144 | ||
@@ -176,19 +165,10 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs, | |||
176 | int i; | 165 | int i; |
177 | int err = -EINVAL; | 166 | int err = -EINVAL; |
178 | 167 | ||
179 | if (!(mtd->flags & MTD_WRITEABLE)) | ||
180 | return -EROFS; | ||
181 | |||
182 | *retlen = 0; | ||
183 | |||
184 | /* Calculate total length of data */ | 168 | /* Calculate total length of data */ |
185 | for (i = 0; i < count; i++) | 169 | for (i = 0; i < count; i++) |
186 | total_len += vecs[i].iov_len; | 170 | total_len += vecs[i].iov_len; |
187 | 171 | ||
188 | /* Do not allow write past end of device */ | ||
189 | if ((to + total_len) > mtd->size) | ||
190 | return -EINVAL; | ||
191 | |||
192 | /* Check alignment */ | 172 | /* Check alignment */ |
193 | if (mtd->writesize > 1) { | 173 | if (mtd->writesize > 1) { |
194 | uint64_t __to = to; | 174 | uint64_t __to = to; |
@@ -224,12 +204,8 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs, | |||
224 | old_iov_len = vecs_copy[entry_high].iov_len; | 204 | old_iov_len = vecs_copy[entry_high].iov_len; |
225 | vecs_copy[entry_high].iov_len = size; | 205 | vecs_copy[entry_high].iov_len = size; |
226 | 206 | ||
227 | if (!(subdev->flags & MTD_WRITEABLE)) | 207 | err = mtd_writev(subdev, &vecs_copy[entry_low], |
228 | err = -EROFS; | 208 | entry_high - entry_low + 1, to, &retsize); |
229 | else | ||
230 | err = mtd_writev(subdev, &vecs_copy[entry_low], | ||
231 | entry_high - entry_low + 1, to, | ||
232 | &retsize); | ||
233 | 209 | ||
234 | vecs_copy[entry_high].iov_len = old_iov_len - size; | 210 | vecs_copy[entry_high].iov_len = old_iov_len - size; |
235 | vecs_copy[entry_high].iov_base += size; | 211 | vecs_copy[entry_high].iov_base += size; |
@@ -403,15 +379,6 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
403 | uint64_t length, offset = 0; | 379 | uint64_t length, offset = 0; |
404 | struct erase_info *erase; | 380 | struct erase_info *erase; |
405 | 381 | ||
406 | if (!(mtd->flags & MTD_WRITEABLE)) | ||
407 | return -EROFS; | ||
408 | |||
409 | if (instr->addr > concat->mtd.size) | ||
410 | return -EINVAL; | ||
411 | |||
412 | if (instr->len + instr->addr > concat->mtd.size) | ||
413 | return -EINVAL; | ||
414 | |||
415 | /* | 382 | /* |
416 | * Check for proper erase block alignment of the to-be-erased area. | 383 | * Check for proper erase block alignment of the to-be-erased area. |
417 | * It is easier to do this based on the super device's erase | 384 | * It is easier to do this based on the super device's erase |
@@ -459,8 +426,6 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
459 | return -EINVAL; | 426 | return -EINVAL; |
460 | } | 427 | } |
461 | 428 | ||
462 | instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; | ||
463 | |||
464 | /* make a local copy of instr to avoid modifying the caller's struct */ | 429 | /* make a local copy of instr to avoid modifying the caller's struct */ |
465 | erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL); | 430 | erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL); |
466 | 431 | ||
@@ -499,10 +464,6 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
499 | else | 464 | else |
500 | erase->len = length; | 465 | erase->len = length; |
501 | 466 | ||
502 | if (!(subdev->flags & MTD_WRITEABLE)) { | ||
503 | err = -EROFS; | ||
504 | break; | ||
505 | } | ||
506 | length -= erase->len; | 467 | length -= erase->len; |
507 | if ((err = concat_dev_erase(subdev, erase))) { | 468 | if ((err = concat_dev_erase(subdev, erase))) { |
508 | /* sanity check: should never happen since | 469 | /* sanity check: should never happen since |
@@ -538,9 +499,6 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | |||
538 | struct mtd_concat *concat = CONCAT(mtd); | 499 | struct mtd_concat *concat = CONCAT(mtd); |
539 | int i, err = -EINVAL; | 500 | int i, err = -EINVAL; |
540 | 501 | ||
541 | if ((len + ofs) > mtd->size) | ||
542 | return -EINVAL; | ||
543 | |||
544 | for (i = 0; i < concat->num_subdev; i++) { | 502 | for (i = 0; i < concat->num_subdev; i++) { |
545 | struct mtd_info *subdev = concat->subdev[i]; | 503 | struct mtd_info *subdev = concat->subdev[i]; |
546 | uint64_t size; | 504 | uint64_t size; |
@@ -575,9 +533,6 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | |||
575 | struct mtd_concat *concat = CONCAT(mtd); | 533 | struct mtd_concat *concat = CONCAT(mtd); |
576 | int i, err = 0; | 534 | int i, err = 0; |
577 | 535 | ||
578 | if ((len + ofs) > mtd->size) | ||
579 | return -EINVAL; | ||
580 | |||
581 | for (i = 0; i < concat->num_subdev; i++) { | 536 | for (i = 0; i < concat->num_subdev; i++) { |
582 | struct mtd_info *subdev = concat->subdev[i]; | 537 | struct mtd_info *subdev = concat->subdev[i]; |
583 | uint64_t size; | 538 | uint64_t size; |
@@ -650,9 +605,6 @@ static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs) | |||
650 | if (!mtd_can_have_bb(concat->subdev[0])) | 605 | if (!mtd_can_have_bb(concat->subdev[0])) |
651 | return res; | 606 | return res; |
652 | 607 | ||
653 | if (ofs > mtd->size) | ||
654 | return -EINVAL; | ||
655 | |||
656 | for (i = 0; i < concat->num_subdev; i++) { | 608 | for (i = 0; i < concat->num_subdev; i++) { |
657 | struct mtd_info *subdev = concat->subdev[i]; | 609 | struct mtd_info *subdev = concat->subdev[i]; |
658 | 610 | ||
@@ -673,12 +625,6 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs) | |||
673 | struct mtd_concat *concat = CONCAT(mtd); | 625 | struct mtd_concat *concat = CONCAT(mtd); |
674 | int i, err = -EINVAL; | 626 | int i, err = -EINVAL; |
675 | 627 | ||
676 | if (!mtd_can_have_bb(concat->subdev[0])) | ||
677 | return 0; | ||
678 | |||
679 | if (ofs > mtd->size) | ||
680 | return -EINVAL; | ||
681 | |||
682 | for (i = 0; i < concat->num_subdev; i++) { | 628 | for (i = 0; i < concat->num_subdev; i++) { |
683 | struct mtd_info *subdev = concat->subdev[i]; | 629 | struct mtd_info *subdev = concat->subdev[i]; |
684 | 630 | ||
@@ -716,10 +662,6 @@ static unsigned long concat_get_unmapped_area(struct mtd_info *mtd, | |||
716 | continue; | 662 | continue; |
717 | } | 663 | } |
718 | 664 | ||
719 | /* we've found the subdev over which the mapping will reside */ | ||
720 | if (offset + len > subdev->size) | ||
721 | return (unsigned long) -EINVAL; | ||
722 | |||
723 | return mtd_get_unmapped_area(subdev, len, offset, flags); | 665 | return mtd_get_unmapped_area(subdev, len, offset, flags); |
724 | } | 666 | } |
725 | 667 | ||
@@ -777,16 +719,16 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c | |||
777 | concat->mtd.subpage_sft = subdev[0]->subpage_sft; | 719 | concat->mtd.subpage_sft = subdev[0]->subpage_sft; |
778 | concat->mtd.oobsize = subdev[0]->oobsize; | 720 | concat->mtd.oobsize = subdev[0]->oobsize; |
779 | concat->mtd.oobavail = subdev[0]->oobavail; | 721 | concat->mtd.oobavail = subdev[0]->oobavail; |
780 | if (subdev[0]->writev) | 722 | if (subdev[0]->_writev) |
781 | concat->mtd.writev = concat_writev; | 723 | concat->mtd._writev = concat_writev; |
782 | if (subdev[0]->read_oob) | 724 | if (subdev[0]->_read_oob) |
783 | concat->mtd.read_oob = concat_read_oob; | 725 | concat->mtd._read_oob = concat_read_oob; |
784 | if (subdev[0]->write_oob) | 726 | if (subdev[0]->_write_oob) |
785 | concat->mtd.write_oob = concat_write_oob; | 727 | concat->mtd._write_oob = concat_write_oob; |
786 | if (subdev[0]->block_isbad) | 728 | if (subdev[0]->_block_isbad) |
787 | concat->mtd.block_isbad = concat_block_isbad; | 729 | concat->mtd._block_isbad = concat_block_isbad; |
788 | if (subdev[0]->block_markbad) | 730 | if (subdev[0]->_block_markbad) |
789 | concat->mtd.block_markbad = concat_block_markbad; | 731 | concat->mtd._block_markbad = concat_block_markbad; |
790 | 732 | ||
791 | concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks; | 733 | concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks; |
792 | 734 | ||
@@ -833,8 +775,8 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c | |||
833 | if (concat->mtd.writesize != subdev[i]->writesize || | 775 | if (concat->mtd.writesize != subdev[i]->writesize || |
834 | concat->mtd.subpage_sft != subdev[i]->subpage_sft || | 776 | concat->mtd.subpage_sft != subdev[i]->subpage_sft || |
835 | concat->mtd.oobsize != subdev[i]->oobsize || | 777 | concat->mtd.oobsize != subdev[i]->oobsize || |
836 | !concat->mtd.read_oob != !subdev[i]->read_oob || | 778 | !concat->mtd._read_oob != !subdev[i]->_read_oob || |
837 | !concat->mtd.write_oob != !subdev[i]->write_oob) { | 779 | !concat->mtd._write_oob != !subdev[i]->_write_oob) { |
838 | kfree(concat); | 780 | kfree(concat); |
839 | printk("Incompatible OOB or ECC data on \"%s\"\n", | 781 | printk("Incompatible OOB or ECC data on \"%s\"\n", |
840 | subdev[i]->name); | 782 | subdev[i]->name); |
@@ -849,15 +791,15 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c | |||
849 | concat->num_subdev = num_devs; | 791 | concat->num_subdev = num_devs; |
850 | concat->mtd.name = name; | 792 | concat->mtd.name = name; |
851 | 793 | ||
852 | concat->mtd.erase = concat_erase; | 794 | concat->mtd._erase = concat_erase; |
853 | concat->mtd.read = concat_read; | 795 | concat->mtd._read = concat_read; |
854 | concat->mtd.write = concat_write; | 796 | concat->mtd._write = concat_write; |
855 | concat->mtd.sync = concat_sync; | 797 | concat->mtd._sync = concat_sync; |
856 | concat->mtd.lock = concat_lock; | 798 | concat->mtd._lock = concat_lock; |
857 | concat->mtd.unlock = concat_unlock; | 799 | concat->mtd._unlock = concat_unlock; |
858 | concat->mtd.suspend = concat_suspend; | 800 | concat->mtd._suspend = concat_suspend; |
859 | concat->mtd.resume = concat_resume; | 801 | concat->mtd._resume = concat_resume; |
860 | concat->mtd.get_unmapped_area = concat_get_unmapped_area; | 802 | concat->mtd._get_unmapped_area = concat_get_unmapped_area; |
861 | 803 | ||
862 | /* | 804 | /* |
863 | * Combine the erase block size info of the subdevices: | 805 | * Combine the erase block size info of the subdevices: |
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 9a9ce71a71fc..c837507dfb1c 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c | |||
@@ -107,7 +107,7 @@ static LIST_HEAD(mtd_notifiers); | |||
107 | */ | 107 | */ |
108 | static void mtd_release(struct device *dev) | 108 | static void mtd_release(struct device *dev) |
109 | { | 109 | { |
110 | struct mtd_info *mtd = dev_get_drvdata(dev); | 110 | struct mtd_info __maybe_unused *mtd = dev_get_drvdata(dev); |
111 | dev_t index = MTD_DEVT(mtd->index); | 111 | dev_t index = MTD_DEVT(mtd->index); |
112 | 112 | ||
113 | /* remove /dev/mtdXro node if needed */ | 113 | /* remove /dev/mtdXro node if needed */ |
@@ -126,7 +126,7 @@ static int mtd_cls_resume(struct device *dev) | |||
126 | { | 126 | { |
127 | struct mtd_info *mtd = dev_get_drvdata(dev); | 127 | struct mtd_info *mtd = dev_get_drvdata(dev); |
128 | 128 | ||
129 | if (mtd && mtd->resume) | 129 | if (mtd) |
130 | mtd_resume(mtd); | 130 | mtd_resume(mtd); |
131 | return 0; | 131 | return 0; |
132 | } | 132 | } |
@@ -610,8 +610,8 @@ int __get_mtd_device(struct mtd_info *mtd) | |||
610 | if (!try_module_get(mtd->owner)) | 610 | if (!try_module_get(mtd->owner)) |
611 | return -ENODEV; | 611 | return -ENODEV; |
612 | 612 | ||
613 | if (mtd->get_device) { | 613 | if (mtd->_get_device) { |
614 | err = mtd->get_device(mtd); | 614 | err = mtd->_get_device(mtd); |
615 | 615 | ||
616 | if (err) { | 616 | if (err) { |
617 | module_put(mtd->owner); | 617 | module_put(mtd->owner); |
@@ -675,14 +675,267 @@ void __put_mtd_device(struct mtd_info *mtd) | |||
675 | --mtd->usecount; | 675 | --mtd->usecount; |
676 | BUG_ON(mtd->usecount < 0); | 676 | BUG_ON(mtd->usecount < 0); |
677 | 677 | ||
678 | if (mtd->put_device) | 678 | if (mtd->_put_device) |
679 | mtd->put_device(mtd); | 679 | mtd->_put_device(mtd); |
680 | 680 | ||
681 | module_put(mtd->owner); | 681 | module_put(mtd->owner); |
682 | } | 682 | } |
683 | EXPORT_SYMBOL_GPL(__put_mtd_device); | 683 | EXPORT_SYMBOL_GPL(__put_mtd_device); |
684 | 684 | ||
685 | /* | 685 | /* |
686 | * Erase is an asynchronous operation. Device drivers are supposed | ||
687 | * to call instr->callback() whenever the operation completes, even | ||
688 | * if it completes with a failure. | ||
689 | * Callers are supposed to pass a callback function and wait for it | ||
690 | * to be called before writing to the block. | ||
691 | */ | ||
692 | int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) | ||
693 | { | ||
694 | if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr) | ||
695 | return -EINVAL; | ||
696 | if (!(mtd->flags & MTD_WRITEABLE)) | ||
697 | return -EROFS; | ||
698 | instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; | ||
699 | if (!instr->len) { | ||
700 | instr->state = MTD_ERASE_DONE; | ||
701 | mtd_erase_callback(instr); | ||
702 | return 0; | ||
703 | } | ||
704 | return mtd->_erase(mtd, instr); | ||
705 | } | ||
706 | EXPORT_SYMBOL_GPL(mtd_erase); | ||
707 | |||
708 | /* | ||
709 | * This stuff for eXecute-In-Place. phys is optional and may be set to NULL. | ||
710 | */ | ||
711 | int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, | ||
712 | void **virt, resource_size_t *phys) | ||
713 | { | ||
714 | *retlen = 0; | ||
715 | *virt = NULL; | ||
716 | if (phys) | ||
717 | *phys = 0; | ||
718 | if (!mtd->_point) | ||
719 | return -EOPNOTSUPP; | ||
720 | if (from < 0 || from > mtd->size || len > mtd->size - from) | ||
721 | return -EINVAL; | ||
722 | if (!len) | ||
723 | return 0; | ||
724 | return mtd->_point(mtd, from, len, retlen, virt, phys); | ||
725 | } | ||
726 | EXPORT_SYMBOL_GPL(mtd_point); | ||
727 | |||
728 | /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ | ||
729 | int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | ||
730 | { | ||
731 | if (!mtd->_point) | ||
732 | return -EOPNOTSUPP; | ||
733 | if (from < 0 || from > mtd->size || len > mtd->size - from) | ||
734 | return -EINVAL; | ||
735 | if (!len) | ||
736 | return 0; | ||
737 | return mtd->_unpoint(mtd, from, len); | ||
738 | } | ||
739 | EXPORT_SYMBOL_GPL(mtd_unpoint); | ||
740 | |||
741 | /* | ||
742 | * Allow NOMMU mmap() to directly map the device (if not NULL) | ||
743 | * - return the address to which the offset maps | ||
744 | * - return -ENOSYS to indicate refusal to do the mapping | ||
745 | */ | ||
746 | unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, | ||
747 | unsigned long offset, unsigned long flags) | ||
748 | { | ||
749 | if (!mtd->_get_unmapped_area) | ||
750 | return -EOPNOTSUPP; | ||
751 | if (offset > mtd->size || len > mtd->size - offset) | ||
752 | return -EINVAL; | ||
753 | return mtd->_get_unmapped_area(mtd, len, offset, flags); | ||
754 | } | ||
755 | EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); | ||
756 | |||
757 | int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, | ||
758 | u_char *buf) | ||
759 | { | ||
760 | *retlen = 0; | ||
761 | if (from < 0 || from > mtd->size || len > mtd->size - from) | ||
762 | return -EINVAL; | ||
763 | if (!len) | ||
764 | return 0; | ||
765 | return mtd->_read(mtd, from, len, retlen, buf); | ||
766 | } | ||
767 | EXPORT_SYMBOL_GPL(mtd_read); | ||
768 | |||
769 | int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, | ||
770 | const u_char *buf) | ||
771 | { | ||
772 | *retlen = 0; | ||
773 | if (to < 0 || to > mtd->size || len > mtd->size - to) | ||
774 | return -EINVAL; | ||
775 | if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE)) | ||
776 | return -EROFS; | ||
777 | if (!len) | ||
778 | return 0; | ||
779 | return mtd->_write(mtd, to, len, retlen, buf); | ||
780 | } | ||
781 | EXPORT_SYMBOL_GPL(mtd_write); | ||
782 | |||
783 | /* | ||
784 | * In blackbox flight recorder like scenarios we want to make successful writes | ||
785 | * in interrupt context. panic_write() is only intended to be called when its | ||
786 | * known the kernel is about to panic and we need the write to succeed. Since | ||
787 | * the kernel is not going to be running for much longer, this function can | ||
788 | * break locks and delay to ensure the write succeeds (but not sleep). | ||
789 | */ | ||
790 | int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, | ||
791 | const u_char *buf) | ||
792 | { | ||
793 | *retlen = 0; | ||
794 | if (!mtd->_panic_write) | ||
795 | return -EOPNOTSUPP; | ||
796 | if (to < 0 || to > mtd->size || len > mtd->size - to) | ||
797 | return -EINVAL; | ||
798 | if (!(mtd->flags & MTD_WRITEABLE)) | ||
799 | return -EROFS; | ||
800 | if (!len) | ||
801 | return 0; | ||
802 | return mtd->_panic_write(mtd, to, len, retlen, buf); | ||
803 | } | ||
804 | EXPORT_SYMBOL_GPL(mtd_panic_write); | ||
805 | |||
806 | /* | ||
807 | * Method to access the protection register area, present in some flash | ||
808 | * devices. The user data is one time programmable but the factory data is read | ||
809 | * only. | ||
810 | */ | ||
811 | int mtd_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, | ||
812 | size_t len) | ||
813 | { | ||
814 | if (!mtd->_get_fact_prot_info) | ||
815 | return -EOPNOTSUPP; | ||
816 | if (!len) | ||
817 | return 0; | ||
818 | return mtd->_get_fact_prot_info(mtd, buf, len); | ||
819 | } | ||
820 | EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info); | ||
821 | |||
822 | int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, | ||
823 | size_t *retlen, u_char *buf) | ||
824 | { | ||
825 | *retlen = 0; | ||
826 | if (!mtd->_read_fact_prot_reg) | ||
827 | return -EOPNOTSUPP; | ||
828 | if (!len) | ||
829 | return 0; | ||
830 | return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf); | ||
831 | } | ||
832 | EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg); | ||
833 | |||
834 | int mtd_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf, | ||
835 | size_t len) | ||
836 | { | ||
837 | if (!mtd->_get_user_prot_info) | ||
838 | return -EOPNOTSUPP; | ||
839 | if (!len) | ||
840 | return 0; | ||
841 | return mtd->_get_user_prot_info(mtd, buf, len); | ||
842 | } | ||
843 | EXPORT_SYMBOL_GPL(mtd_get_user_prot_info); | ||
844 | |||
845 | int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, | ||
846 | size_t *retlen, u_char *buf) | ||
847 | { | ||
848 | *retlen = 0; | ||
849 | if (!mtd->_read_user_prot_reg) | ||
850 | return -EOPNOTSUPP; | ||
851 | if (!len) | ||
852 | return 0; | ||
853 | return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf); | ||
854 | } | ||
855 | EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg); | ||
856 | |||
857 | int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, | ||
858 | size_t *retlen, u_char *buf) | ||
859 | { | ||
860 | *retlen = 0; | ||
861 | if (!mtd->_write_user_prot_reg) | ||
862 | return -EOPNOTSUPP; | ||
863 | if (!len) | ||
864 | return 0; | ||
865 | return mtd->_write_user_prot_reg(mtd, to, len, retlen, buf); | ||
866 | } | ||
867 | EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg); | ||
868 | |||
869 | int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) | ||
870 | { | ||
871 | if (!mtd->_lock_user_prot_reg) | ||
872 | return -EOPNOTSUPP; | ||
873 | if (!len) | ||
874 | return 0; | ||
875 | return mtd->_lock_user_prot_reg(mtd, from, len); | ||
876 | } | ||
877 | EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg); | ||
878 | |||
879 | /* Chip-supported device locking */ | ||
880 | int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | ||
881 | { | ||
882 | if (!mtd->_lock) | ||
883 | return -EOPNOTSUPP; | ||
884 | if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs) | ||
885 | return -EINVAL; | ||
886 | if (!len) | ||
887 | return 0; | ||
888 | return mtd->_lock(mtd, ofs, len); | ||
889 | } | ||
890 | EXPORT_SYMBOL_GPL(mtd_lock); | ||
891 | |||
892 | int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | ||
893 | { | ||
894 | if (!mtd->_unlock) | ||
895 | return -EOPNOTSUPP; | ||
896 | if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs) | ||
897 | return -EINVAL; | ||
898 | if (!len) | ||
899 | return 0; | ||
900 | return mtd->_unlock(mtd, ofs, len); | ||
901 | } | ||
902 | EXPORT_SYMBOL_GPL(mtd_unlock); | ||
903 | |||
904 | int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) | ||
905 | { | ||
906 | if (!mtd->_is_locked) | ||
907 | return -EOPNOTSUPP; | ||
908 | if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs) | ||
909 | return -EINVAL; | ||
910 | if (!len) | ||
911 | return 0; | ||
912 | return mtd->_is_locked(mtd, ofs, len); | ||
913 | } | ||
914 | EXPORT_SYMBOL_GPL(mtd_is_locked); | ||
915 | |||
916 | int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) | ||
917 | { | ||
918 | if (!mtd->_block_isbad) | ||
919 | return 0; | ||
920 | if (ofs < 0 || ofs > mtd->size) | ||
921 | return -EINVAL; | ||
922 | return mtd->_block_isbad(mtd, ofs); | ||
923 | } | ||
924 | EXPORT_SYMBOL_GPL(mtd_block_isbad); | ||
925 | |||
926 | int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) | ||
927 | { | ||
928 | if (!mtd->_block_markbad) | ||
929 | return -EOPNOTSUPP; | ||
930 | if (ofs < 0 || ofs > mtd->size) | ||
931 | return -EINVAL; | ||
932 | if (!(mtd->flags & MTD_WRITEABLE)) | ||
933 | return -EROFS; | ||
934 | return mtd->_block_markbad(mtd, ofs); | ||
935 | } | ||
936 | EXPORT_SYMBOL_GPL(mtd_block_markbad); | ||
937 | |||
938 | /* | ||
686 | * default_mtd_writev - the default writev method | 939 | * default_mtd_writev - the default writev method |
687 | * @mtd: mtd device description object pointer | 940 | * @mtd: mtd device description object pointer |
688 | * @vecs: the vectors to write | 941 | * @vecs: the vectors to write |
@@ -729,9 +982,11 @@ int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, | |||
729 | unsigned long count, loff_t to, size_t *retlen) | 982 | unsigned long count, loff_t to, size_t *retlen) |
730 | { | 983 | { |
731 | *retlen = 0; | 984 | *retlen = 0; |
732 | if (!mtd->writev) | 985 | if (!(mtd->flags & MTD_WRITEABLE)) |
986 | return -EROFS; | ||
987 | if (!mtd->_writev) | ||
733 | return default_mtd_writev(mtd, vecs, count, to, retlen); | 988 | return default_mtd_writev(mtd, vecs, count, to, retlen); |
734 | return mtd->writev(mtd, vecs, count, to, retlen); | 989 | return mtd->_writev(mtd, vecs, count, to, retlen); |
735 | } | 990 | } |
736 | EXPORT_SYMBOL_GPL(mtd_writev); | 991 | EXPORT_SYMBOL_GPL(mtd_writev); |
737 | 992 | ||
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index 3ce99e00a49e..ae36d7e1e913 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c | |||
@@ -169,7 +169,7 @@ static void mtdoops_workfunc_erase(struct work_struct *work) | |||
169 | cxt->nextpage = 0; | 169 | cxt->nextpage = 0; |
170 | } | 170 | } |
171 | 171 | ||
172 | while (mtd_can_have_bb(mtd)) { | 172 | while (1) { |
173 | ret = mtd_block_isbad(mtd, cxt->nextpage * record_size); | 173 | ret = mtd_block_isbad(mtd, cxt->nextpage * record_size); |
174 | if (!ret) | 174 | if (!ret) |
175 | break; | 175 | break; |
@@ -199,9 +199,9 @@ badblock: | |||
199 | return; | 199 | return; |
200 | } | 200 | } |
201 | 201 | ||
202 | if (mtd_can_have_bb(mtd) && ret == -EIO) { | 202 | if (ret == -EIO) { |
203 | ret = mtd_block_markbad(mtd, cxt->nextpage * record_size); | 203 | ret = mtd_block_markbad(mtd, cxt->nextpage * record_size); |
204 | if (ret < 0) { | 204 | if (ret < 0 && ret != -EOPNOTSUPP) { |
205 | printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n"); | 205 | printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n"); |
206 | return; | 206 | return; |
207 | } | 207 | } |
@@ -257,8 +257,7 @@ static void find_next_position(struct mtdoops_context *cxt) | |||
257 | size_t retlen; | 257 | size_t retlen; |
258 | 258 | ||
259 | for (page = 0; page < cxt->oops_pages; page++) { | 259 | for (page = 0; page < cxt->oops_pages; page++) { |
260 | if (mtd_can_have_bb(mtd) && | 260 | if (mtd_block_isbad(mtd, page * record_size)) |
261 | mtd_block_isbad(mtd, page * record_size)) | ||
262 | continue; | 261 | continue; |
263 | /* Assume the page is used */ | 262 | /* Assume the page is used */ |
264 | mark_page_used(cxt, page); | 263 | mark_page_used(cxt, page); |
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index a3d44c3416b4..9651c06de0a9 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c | |||
@@ -65,12 +65,8 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
65 | int res; | 65 | int res; |
66 | 66 | ||
67 | stats = part->master->ecc_stats; | 67 | stats = part->master->ecc_stats; |
68 | 68 | res = part->master->_read(part->master, from + part->offset, len, | |
69 | if (from >= mtd->size) | 69 | retlen, buf); |
70 | len = 0; | ||
71 | else if (from + len > mtd->size) | ||
72 | len = mtd->size - from; | ||
73 | res = mtd_read(part->master, from + part->offset, len, retlen, buf); | ||
74 | if (unlikely(res)) { | 70 | if (unlikely(res)) { |
75 | if (mtd_is_bitflip(res)) | 71 | if (mtd_is_bitflip(res)) |
76 | mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected; | 72 | mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected; |
@@ -84,19 +80,16 @@ static int part_point(struct mtd_info *mtd, loff_t from, size_t len, | |||
84 | size_t *retlen, void **virt, resource_size_t *phys) | 80 | size_t *retlen, void **virt, resource_size_t *phys) |
85 | { | 81 | { |
86 | struct mtd_part *part = PART(mtd); | 82 | struct mtd_part *part = PART(mtd); |
87 | if (from >= mtd->size) | 83 | |
88 | len = 0; | 84 | return part->master->_point(part->master, from + part->offset, len, |
89 | else if (from + len > mtd->size) | 85 | retlen, virt, phys); |
90 | len = mtd->size - from; | ||
91 | return mtd_point(part->master, from + part->offset, len, retlen, | ||
92 | virt, phys); | ||
93 | } | 86 | } |
94 | 87 | ||
95 | static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | 88 | static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) |
96 | { | 89 | { |
97 | struct mtd_part *part = PART(mtd); | 90 | struct mtd_part *part = PART(mtd); |
98 | 91 | ||
99 | mtd_unpoint(part->master, from + part->offset, len); | 92 | return part->master->_unpoint(part->master, from + part->offset, len); |
100 | } | 93 | } |
101 | 94 | ||
102 | static unsigned long part_get_unmapped_area(struct mtd_info *mtd, | 95 | static unsigned long part_get_unmapped_area(struct mtd_info *mtd, |
@@ -107,7 +100,8 @@ static unsigned long part_get_unmapped_area(struct mtd_info *mtd, | |||
107 | struct mtd_part *part = PART(mtd); | 100 | struct mtd_part *part = PART(mtd); |
108 | 101 | ||
109 | offset += part->offset; | 102 | offset += part->offset; |
110 | return mtd_get_unmapped_area(part->master, len, offset, flags); | 103 | return part->master->_get_unmapped_area(part->master, len, offset, |
104 | flags); | ||
111 | } | 105 | } |
112 | 106 | ||
113 | static int part_read_oob(struct mtd_info *mtd, loff_t from, | 107 | static int part_read_oob(struct mtd_info *mtd, loff_t from, |
@@ -138,7 +132,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from, | |||
138 | return -EINVAL; | 132 | return -EINVAL; |
139 | } | 133 | } |
140 | 134 | ||
141 | res = mtd_read_oob(part->master, from + part->offset, ops); | 135 | res = part->master->_read_oob(part->master, from + part->offset, ops); |
142 | if (unlikely(res)) { | 136 | if (unlikely(res)) { |
143 | if (mtd_is_bitflip(res)) | 137 | if (mtd_is_bitflip(res)) |
144 | mtd->ecc_stats.corrected++; | 138 | mtd->ecc_stats.corrected++; |
@@ -152,55 +146,46 @@ static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from, | |||
152 | size_t len, size_t *retlen, u_char *buf) | 146 | size_t len, size_t *retlen, u_char *buf) |
153 | { | 147 | { |
154 | struct mtd_part *part = PART(mtd); | 148 | struct mtd_part *part = PART(mtd); |
155 | return mtd_read_user_prot_reg(part->master, from, len, retlen, buf); | 149 | return part->master->_read_user_prot_reg(part->master, from, len, |
150 | retlen, buf); | ||
156 | } | 151 | } |
157 | 152 | ||
158 | static int part_get_user_prot_info(struct mtd_info *mtd, | 153 | static int part_get_user_prot_info(struct mtd_info *mtd, |
159 | struct otp_info *buf, size_t len) | 154 | struct otp_info *buf, size_t len) |
160 | { | 155 | { |
161 | struct mtd_part *part = PART(mtd); | 156 | struct mtd_part *part = PART(mtd); |
162 | return mtd_get_user_prot_info(part->master, buf, len); | 157 | return part->master->_get_user_prot_info(part->master, buf, len); |
163 | } | 158 | } |
164 | 159 | ||
165 | static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, | 160 | static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, |
166 | size_t len, size_t *retlen, u_char *buf) | 161 | size_t len, size_t *retlen, u_char *buf) |
167 | { | 162 | { |
168 | struct mtd_part *part = PART(mtd); | 163 | struct mtd_part *part = PART(mtd); |
169 | return mtd_read_fact_prot_reg(part->master, from, len, retlen, buf); | 164 | return part->master->_read_fact_prot_reg(part->master, from, len, |
165 | retlen, buf); | ||
170 | } | 166 | } |
171 | 167 | ||
172 | static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, | 168 | static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, |
173 | size_t len) | 169 | size_t len) |
174 | { | 170 | { |
175 | struct mtd_part *part = PART(mtd); | 171 | struct mtd_part *part = PART(mtd); |
176 | return mtd_get_fact_prot_info(part->master, buf, len); | 172 | return part->master->_get_fact_prot_info(part->master, buf, len); |
177 | } | 173 | } |
178 | 174 | ||
179 | static int part_write(struct mtd_info *mtd, loff_t to, size_t len, | 175 | static int part_write(struct mtd_info *mtd, loff_t to, size_t len, |
180 | size_t *retlen, const u_char *buf) | 176 | size_t *retlen, const u_char *buf) |
181 | { | 177 | { |
182 | struct mtd_part *part = PART(mtd); | 178 | struct mtd_part *part = PART(mtd); |
183 | if (!(mtd->flags & MTD_WRITEABLE)) | 179 | return part->master->_write(part->master, to + part->offset, len, |
184 | return -EROFS; | 180 | retlen, buf); |
185 | if (to >= mtd->size) | ||
186 | len = 0; | ||
187 | else if (to + len > mtd->size) | ||
188 | len = mtd->size - to; | ||
189 | return mtd_write(part->master, to + part->offset, len, retlen, buf); | ||
190 | } | 181 | } |
191 | 182 | ||
192 | static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, | 183 | static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, |
193 | size_t *retlen, const u_char *buf) | 184 | size_t *retlen, const u_char *buf) |
194 | { | 185 | { |
195 | struct mtd_part *part = PART(mtd); | 186 | struct mtd_part *part = PART(mtd); |
196 | if (!(mtd->flags & MTD_WRITEABLE)) | 187 | return part->master->_panic_write(part->master, to + part->offset, len, |
197 | return -EROFS; | 188 | retlen, buf); |
198 | if (to >= mtd->size) | ||
199 | len = 0; | ||
200 | else if (to + len > mtd->size) | ||
201 | len = mtd->size - to; | ||
202 | return mtd_panic_write(part->master, to + part->offset, len, retlen, | ||
203 | buf); | ||
204 | } | 189 | } |
205 | 190 | ||
206 | static int part_write_oob(struct mtd_info *mtd, loff_t to, | 191 | static int part_write_oob(struct mtd_info *mtd, loff_t to, |
@@ -208,50 +193,43 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to, | |||
208 | { | 193 | { |
209 | struct mtd_part *part = PART(mtd); | 194 | struct mtd_part *part = PART(mtd); |
210 | 195 | ||
211 | if (!(mtd->flags & MTD_WRITEABLE)) | ||
212 | return -EROFS; | ||
213 | |||
214 | if (to >= mtd->size) | 196 | if (to >= mtd->size) |
215 | return -EINVAL; | 197 | return -EINVAL; |
216 | if (ops->datbuf && to + ops->len > mtd->size) | 198 | if (ops->datbuf && to + ops->len > mtd->size) |
217 | return -EINVAL; | 199 | return -EINVAL; |
218 | return mtd_write_oob(part->master, to + part->offset, ops); | 200 | return part->master->_write_oob(part->master, to + part->offset, ops); |
219 | } | 201 | } |
220 | 202 | ||
221 | static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, | 203 | static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, |
222 | size_t len, size_t *retlen, u_char *buf) | 204 | size_t len, size_t *retlen, u_char *buf) |
223 | { | 205 | { |
224 | struct mtd_part *part = PART(mtd); | 206 | struct mtd_part *part = PART(mtd); |
225 | return mtd_write_user_prot_reg(part->master, from, len, retlen, buf); | 207 | return part->master->_write_user_prot_reg(part->master, from, len, |
208 | retlen, buf); | ||
226 | } | 209 | } |
227 | 210 | ||
228 | static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, | 211 | static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, |
229 | size_t len) | 212 | size_t len) |
230 | { | 213 | { |
231 | struct mtd_part *part = PART(mtd); | 214 | struct mtd_part *part = PART(mtd); |
232 | return mtd_lock_user_prot_reg(part->master, from, len); | 215 | return part->master->_lock_user_prot_reg(part->master, from, len); |
233 | } | 216 | } |
234 | 217 | ||
235 | static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, | 218 | static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, |
236 | unsigned long count, loff_t to, size_t *retlen) | 219 | unsigned long count, loff_t to, size_t *retlen) |
237 | { | 220 | { |
238 | struct mtd_part *part = PART(mtd); | 221 | struct mtd_part *part = PART(mtd); |
239 | if (!(mtd->flags & MTD_WRITEABLE)) | 222 | return part->master->_writev(part->master, vecs, count, |
240 | return -EROFS; | 223 | to + part->offset, retlen); |
241 | return mtd_writev(part->master, vecs, count, to + part->offset, | ||
242 | retlen); | ||
243 | } | 224 | } |
244 | 225 | ||
245 | static int part_erase(struct mtd_info *mtd, struct erase_info *instr) | 226 | static int part_erase(struct mtd_info *mtd, struct erase_info *instr) |
246 | { | 227 | { |
247 | struct mtd_part *part = PART(mtd); | 228 | struct mtd_part *part = PART(mtd); |
248 | int ret; | 229 | int ret; |
249 | if (!(mtd->flags & MTD_WRITEABLE)) | 230 | |
250 | return -EROFS; | ||
251 | if (instr->addr >= mtd->size) | ||
252 | return -EINVAL; | ||
253 | instr->addr += part->offset; | 231 | instr->addr += part->offset; |
254 | ret = mtd_erase(part->master, instr); | 232 | ret = part->master->_erase(part->master, instr); |
255 | if (ret) { | 233 | if (ret) { |
256 | if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) | 234 | if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) |
257 | instr->fail_addr -= part->offset; | 235 | instr->fail_addr -= part->offset; |
@@ -262,7 +240,7 @@ static int part_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
262 | 240 | ||
263 | void mtd_erase_callback(struct erase_info *instr) | 241 | void mtd_erase_callback(struct erase_info *instr) |
264 | { | 242 | { |
265 | if (instr->mtd->erase == part_erase) { | 243 | if (instr->mtd->_erase == part_erase) { |
266 | struct mtd_part *part = PART(instr->mtd); | 244 | struct mtd_part *part = PART(instr->mtd); |
267 | 245 | ||
268 | if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) | 246 | if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) |
@@ -277,52 +255,44 @@ EXPORT_SYMBOL_GPL(mtd_erase_callback); | |||
277 | static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | 255 | static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
278 | { | 256 | { |
279 | struct mtd_part *part = PART(mtd); | 257 | struct mtd_part *part = PART(mtd); |
280 | if ((len + ofs) > mtd->size) | 258 | return part->master->_lock(part->master, ofs + part->offset, len); |
281 | return -EINVAL; | ||
282 | return mtd_lock(part->master, ofs + part->offset, len); | ||
283 | } | 259 | } |
284 | 260 | ||
285 | static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | 261 | static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
286 | { | 262 | { |
287 | struct mtd_part *part = PART(mtd); | 263 | struct mtd_part *part = PART(mtd); |
288 | if ((len + ofs) > mtd->size) | 264 | return part->master->_unlock(part->master, ofs + part->offset, len); |
289 | return -EINVAL; | ||
290 | return mtd_unlock(part->master, ofs + part->offset, len); | ||
291 | } | 265 | } |
292 | 266 | ||
293 | static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) | 267 | static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) |
294 | { | 268 | { |
295 | struct mtd_part *part = PART(mtd); | 269 | struct mtd_part *part = PART(mtd); |
296 | if ((len + ofs) > mtd->size) | 270 | return part->master->_is_locked(part->master, ofs + part->offset, len); |
297 | return -EINVAL; | ||
298 | return mtd_is_locked(part->master, ofs + part->offset, len); | ||
299 | } | 271 | } |
300 | 272 | ||
301 | static void part_sync(struct mtd_info *mtd) | 273 | static void part_sync(struct mtd_info *mtd) |
302 | { | 274 | { |
303 | struct mtd_part *part = PART(mtd); | 275 | struct mtd_part *part = PART(mtd); |
304 | mtd_sync(part->master); | 276 | part->master->_sync(part->master); |
305 | } | 277 | } |
306 | 278 | ||
307 | static int part_suspend(struct mtd_info *mtd) | 279 | static int part_suspend(struct mtd_info *mtd) |
308 | { | 280 | { |
309 | struct mtd_part *part = PART(mtd); | 281 | struct mtd_part *part = PART(mtd); |
310 | return mtd_suspend(part->master); | 282 | return part->master->_suspend(part->master); |
311 | } | 283 | } |
312 | 284 | ||
313 | static void part_resume(struct mtd_info *mtd) | 285 | static void part_resume(struct mtd_info *mtd) |
314 | { | 286 | { |
315 | struct mtd_part *part = PART(mtd); | 287 | struct mtd_part *part = PART(mtd); |
316 | mtd_resume(part->master); | 288 | part->master->_resume(part->master); |
317 | } | 289 | } |
318 | 290 | ||
319 | static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) | 291 | static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) |
320 | { | 292 | { |
321 | struct mtd_part *part = PART(mtd); | 293 | struct mtd_part *part = PART(mtd); |
322 | if (ofs >= mtd->size) | ||
323 | return -EINVAL; | ||
324 | ofs += part->offset; | 294 | ofs += part->offset; |
325 | return mtd_block_isbad(part->master, ofs); | 295 | return part->master->_block_isbad(part->master, ofs); |
326 | } | 296 | } |
327 | 297 | ||
328 | static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) | 298 | static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) |
@@ -330,12 +300,8 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) | |||
330 | struct mtd_part *part = PART(mtd); | 300 | struct mtd_part *part = PART(mtd); |
331 | int res; | 301 | int res; |
332 | 302 | ||
333 | if (!(mtd->flags & MTD_WRITEABLE)) | ||
334 | return -EROFS; | ||
335 | if (ofs >= mtd->size) | ||
336 | return -EINVAL; | ||
337 | ofs += part->offset; | 303 | ofs += part->offset; |
338 | res = mtd_block_markbad(part->master, ofs); | 304 | res = part->master->_block_markbad(part->master, ofs); |
339 | if (!res) | 305 | if (!res) |
340 | mtd->ecc_stats.badblocks++; | 306 | mtd->ecc_stats.badblocks++; |
341 | return res; | 307 | return res; |
@@ -410,54 +376,55 @@ static struct mtd_part *allocate_partition(struct mtd_info *master, | |||
410 | */ | 376 | */ |
411 | slave->mtd.dev.parent = master->dev.parent; | 377 | slave->mtd.dev.parent = master->dev.parent; |
412 | 378 | ||
413 | slave->mtd.read = part_read; | 379 | slave->mtd._read = part_read; |
414 | slave->mtd.write = part_write; | 380 | slave->mtd._write = part_write; |
415 | 381 | ||
416 | if (master->panic_write) | 382 | if (master->_panic_write) |
417 | slave->mtd.panic_write = part_panic_write; | 383 | slave->mtd._panic_write = part_panic_write; |
418 | 384 | ||
419 | if (master->point && master->unpoint) { | 385 | if (master->_point && master->_unpoint) { |
420 | slave->mtd.point = part_point; | 386 | slave->mtd._point = part_point; |
421 | slave->mtd.unpoint = part_unpoint; | 387 | slave->mtd._unpoint = part_unpoint; |
422 | } | 388 | } |
423 | 389 | ||
424 | if (master->get_unmapped_area) | 390 | if (master->_get_unmapped_area) |
425 | slave->mtd.get_unmapped_area = part_get_unmapped_area; | 391 | slave->mtd._get_unmapped_area = part_get_unmapped_area; |
426 | if (master->read_oob) | 392 | if (master->_read_oob) |
427 | slave->mtd.read_oob = part_read_oob; | 393 | slave->mtd._read_oob = part_read_oob; |
428 | if (master->write_oob) | 394 | if (master->_write_oob) |
429 | slave->mtd.write_oob = part_write_oob; | 395 | slave->mtd._write_oob = part_write_oob; |
430 | if (master->read_user_prot_reg) | 396 | if (master->_read_user_prot_reg) |
431 | slave->mtd.read_user_prot_reg = part_read_user_prot_reg; | 397 | slave->mtd._read_user_prot_reg = part_read_user_prot_reg; |
432 | if (master->read_fact_prot_reg) | 398 | if (master->_read_fact_prot_reg) |
433 | slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg; | 399 | slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg; |
434 | if (master->write_user_prot_reg) | 400 | if (master->_write_user_prot_reg) |
435 | slave->mtd.write_user_prot_reg = part_write_user_prot_reg; | 401 | slave->mtd._write_user_prot_reg = part_write_user_prot_reg; |
436 | if (master->lock_user_prot_reg) | 402 | if (master->_lock_user_prot_reg) |
437 | slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg; | 403 | slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg; |
438 | if (master->get_user_prot_info) | 404 | if (master->_get_user_prot_info) |
439 | slave->mtd.get_user_prot_info = part_get_user_prot_info; | 405 | slave->mtd._get_user_prot_info = part_get_user_prot_info; |
440 | if (master->get_fact_prot_info) | 406 | if (master->_get_fact_prot_info) |
441 | slave->mtd.get_fact_prot_info = part_get_fact_prot_info; | 407 | slave->mtd._get_fact_prot_info = part_get_fact_prot_info; |
442 | if (master->sync) | 408 | if (master->_sync) |
443 | slave->mtd.sync = part_sync; | 409 | slave->mtd._sync = part_sync; |
444 | if (!partno && !master->dev.class && master->suspend && master->resume) { | 410 | if (!partno && !master->dev.class && master->_suspend && |
445 | slave->mtd.suspend = part_suspend; | 411 | master->_resume) { |
446 | slave->mtd.resume = part_resume; | 412 | slave->mtd._suspend = part_suspend; |
413 | slave->mtd._resume = part_resume; | ||
447 | } | 414 | } |
448 | if (master->writev) | 415 | if (master->_writev) |
449 | slave->mtd.writev = part_writev; | 416 | slave->mtd._writev = part_writev; |
450 | if (master->lock) | 417 | if (master->_lock) |
451 | slave->mtd.lock = part_lock; | 418 | slave->mtd._lock = part_lock; |
452 | if (master->unlock) | 419 | if (master->_unlock) |
453 | slave->mtd.unlock = part_unlock; | 420 | slave->mtd._unlock = part_unlock; |
454 | if (master->is_locked) | 421 | if (master->_is_locked) |
455 | slave->mtd.is_locked = part_is_locked; | 422 | slave->mtd._is_locked = part_is_locked; |
456 | if (master->block_isbad) | 423 | if (master->_block_isbad) |
457 | slave->mtd.block_isbad = part_block_isbad; | 424 | slave->mtd._block_isbad = part_block_isbad; |
458 | if (master->block_markbad) | 425 | if (master->_block_markbad) |
459 | slave->mtd.block_markbad = part_block_markbad; | 426 | slave->mtd._block_markbad = part_block_markbad; |
460 | slave->mtd.erase = part_erase; | 427 | slave->mtd._erase = part_erase; |
461 | slave->master = master; | 428 | slave->master = master; |
462 | slave->offset = part->offset; | 429 | slave->offset = part->offset; |
463 | 430 | ||
@@ -549,7 +516,8 @@ static struct mtd_part *allocate_partition(struct mtd_info *master, | |||
549 | } | 516 | } |
550 | 517 | ||
551 | slave->mtd.ecclayout = master->ecclayout; | 518 | slave->mtd.ecclayout = master->ecclayout; |
552 | if (master->block_isbad) { | 519 | slave->mtd.ecc_strength = master->ecc_strength; |
520 | if (master->_block_isbad) { | ||
553 | uint64_t offs = 0; | 521 | uint64_t offs = 0; |
554 | 522 | ||
555 | while (offs < slave->mtd.size) { | 523 | while (offs < slave->mtd.size) { |
@@ -761,7 +729,7 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types, | |||
761 | for ( ; ret <= 0 && *types; types++) { | 729 | for ( ; ret <= 0 && *types; types++) { |
762 | parser = get_partition_parser(*types); | 730 | parser = get_partition_parser(*types); |
763 | if (!parser && !request_module("%s", *types)) | 731 | if (!parser && !request_module("%s", *types)) |
764 | parser = get_partition_parser(*types); | 732 | parser = get_partition_parser(*types); |
765 | if (!parser) | 733 | if (!parser) |
766 | continue; | 734 | continue; |
767 | ret = (*parser->parse_fn)(master, pparts, data); | 735 | ret = (*parser->parse_fn)(master, pparts, data); |
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index a3c4de551ebe..7d17cecad69d 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
@@ -314,6 +314,26 @@ config MTD_NAND_DISKONCHIP_BBTWRITE | |||
314 | load time (assuming you build diskonchip as a module) with the module | 314 | load time (assuming you build diskonchip as a module) with the module |
315 | parameter "inftl_bbt_write=1". | 315 | parameter "inftl_bbt_write=1". |
316 | 316 | ||
317 | config MTD_NAND_DOCG4 | ||
318 | tristate "Support for DiskOnChip G4 (EXPERIMENTAL)" | ||
319 | depends on EXPERIMENTAL | ||
320 | select BCH | ||
321 | select BITREVERSE | ||
322 | help | ||
323 | Support for diskonchip G4 nand flash, found in various smartphones and | ||
324 | PDAs, among them the Palm Treo680, HTC Prophet and Wizard, Toshiba | ||
325 | Portege G900, Asus P526, and O2 XDA Zinc. | ||
326 | |||
327 | With this driver you will be able to use UBI and create a ubifs on the | ||
328 | device, so you may wish to consider enabling UBI and UBIFS as well. | ||
329 | |||
330 | These devices ship with the Mys/Sandisk SAFTL formatting, for which | ||
331 | there is currently no mtd parser, so you may want to use command line | ||
332 | partitioning to segregate write-protected blocks. On the Treo680, the | ||
333 | first five erase blocks (256KiB each) are write-protected, followed | ||
334 | by the block containing the saftl partition table. This is probably | ||
335 | typical. | ||
336 | |||
317 | config MTD_NAND_SHARPSL | 337 | config MTD_NAND_SHARPSL |
318 | tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)" | 338 | tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)" |
319 | depends on ARCH_PXA | 339 | depends on ARCH_PXA |
@@ -421,7 +441,6 @@ config MTD_NAND_NANDSIM | |||
421 | config MTD_NAND_GPMI_NAND | 441 | config MTD_NAND_GPMI_NAND |
422 | bool "GPMI NAND Flash Controller driver" | 442 | bool "GPMI NAND Flash Controller driver" |
423 | depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28) | 443 | depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28) |
424 | select MTD_CMDLINE_PARTS | ||
425 | help | 444 | help |
426 | Enables NAND Flash support for IMX23 or IMX28. | 445 | Enables NAND Flash support for IMX23 or IMX28. |
427 | The GPMI controller is very powerful, with the help of BCH | 446 | The GPMI controller is very powerful, with the help of BCH |
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index 19bc8cb1d187..d4b4d8739bd8 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile | |||
@@ -19,6 +19,7 @@ obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o | |||
19 | obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o | 19 | obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o |
20 | obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o | 20 | obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o |
21 | obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o | 21 | obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o |
22 | obj-$(CONFIG_MTD_NAND_DOCG4) += docg4.o | ||
22 | obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o | 23 | obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o |
23 | obj-$(CONFIG_MTD_NAND_H1900) += h1910.o | 24 | obj-$(CONFIG_MTD_NAND_H1900) += h1910.o |
24 | obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o | 25 | obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o |
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c index 6a5ff64a139e..4f20e1d8bef1 100644 --- a/drivers/mtd/nand/alauda.c +++ b/drivers/mtd/nand/alauda.c | |||
@@ -585,12 +585,13 @@ static int alauda_init_media(struct alauda *al) | |||
585 | mtd->writesize = 1<<card->pageshift; | 585 | mtd->writesize = 1<<card->pageshift; |
586 | mtd->type = MTD_NANDFLASH; | 586 | mtd->type = MTD_NANDFLASH; |
587 | mtd->flags = MTD_CAP_NANDFLASH; | 587 | mtd->flags = MTD_CAP_NANDFLASH; |
588 | mtd->read = alauda_read; | 588 | mtd->_read = alauda_read; |
589 | mtd->write = alauda_write; | 589 | mtd->_write = alauda_write; |
590 | mtd->erase = alauda_erase; | 590 | mtd->_erase = alauda_erase; |
591 | mtd->block_isbad = alauda_isbad; | 591 | mtd->_block_isbad = alauda_isbad; |
592 | mtd->priv = al; | 592 | mtd->priv = al; |
593 | mtd->owner = THIS_MODULE; | 593 | mtd->owner = THIS_MODULE; |
594 | mtd->ecc_strength = 1; | ||
594 | 595 | ||
595 | err = mtd_device_register(mtd, NULL, 0); | 596 | err = mtd_device_register(mtd, NULL, 0); |
596 | if (err) { | 597 | if (err) { |
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index ae7e37d9ac17..2165576a1c67 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c | |||
@@ -603,6 +603,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev) | |||
603 | nand_chip->ecc.hwctl = atmel_nand_hwctl; | 603 | nand_chip->ecc.hwctl = atmel_nand_hwctl; |
604 | nand_chip->ecc.read_page = atmel_nand_read_page; | 604 | nand_chip->ecc.read_page = atmel_nand_read_page; |
605 | nand_chip->ecc.bytes = 4; | 605 | nand_chip->ecc.bytes = 4; |
606 | nand_chip->ecc.strength = 1; | ||
606 | } | 607 | } |
607 | 608 | ||
608 | nand_chip->chip_delay = 20; /* 20us command delay time */ | 609 | nand_chip->chip_delay = 20; /* 20us command delay time */ |
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c index 64c9cbaf86a1..6908cdde3065 100644 --- a/drivers/mtd/nand/bcm_umi_nand.c +++ b/drivers/mtd/nand/bcm_umi_nand.c | |||
@@ -475,6 +475,14 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev) | |||
475 | largepage_bbt.options = NAND_BBT_SCAN2NDPAGE; | 475 | largepage_bbt.options = NAND_BBT_SCAN2NDPAGE; |
476 | this->badblock_pattern = &largepage_bbt; | 476 | this->badblock_pattern = &largepage_bbt; |
477 | } | 477 | } |
478 | |||
479 | /* | ||
480 | * FIXME: ecc strength value of 6 bits per 512 bytes of data is a | ||
481 | * conservative guess, given 13 ecc bytes and using bch alg. | ||
482 | * (Assume Galois field order m=15 to allow a margin of error.) | ||
483 | */ | ||
484 | this->ecc.strength = 6; | ||
485 | |||
478 | #endif | 486 | #endif |
479 | 487 | ||
480 | /* Now finish off the scan, now that ecc.layout has been initialized. */ | 488 | /* Now finish off the scan, now that ecc.layout has been initialized. */ |
@@ -487,7 +495,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev) | |||
487 | 495 | ||
488 | /* Register the partitions */ | 496 | /* Register the partitions */ |
489 | board_mtd->name = "bcm_umi-nand"; | 497 | board_mtd->name = "bcm_umi-nand"; |
490 | mtd_device_parse_register(board_mtd, NULL, 0, NULL, 0); | 498 | mtd_device_parse_register(board_mtd, NULL, NULL, NULL, 0); |
491 | 499 | ||
492 | /* Return happy */ | 500 | /* Return happy */ |
493 | return 0; | 501 | return 0; |
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c index dd899cb5d366..d7b86b925de5 100644 --- a/drivers/mtd/nand/bf5xx_nand.c +++ b/drivers/mtd/nand/bf5xx_nand.c | |||
@@ -702,9 +702,11 @@ static int bf5xx_nand_scan(struct mtd_info *mtd) | |||
702 | if (likely(mtd->writesize >= 512)) { | 702 | if (likely(mtd->writesize >= 512)) { |
703 | chip->ecc.size = 512; | 703 | chip->ecc.size = 512; |
704 | chip->ecc.bytes = 6; | 704 | chip->ecc.bytes = 6; |
705 | chip->ecc.strength = 2; | ||
705 | } else { | 706 | } else { |
706 | chip->ecc.size = 256; | 707 | chip->ecc.size = 256; |
707 | chip->ecc.bytes = 3; | 708 | chip->ecc.bytes = 3; |
709 | chip->ecc.strength = 1; | ||
708 | bfin_write_NFC_CTL(bfin_read_NFC_CTL() & ~(1 << NFC_PG_SIZE_OFFSET)); | 710 | bfin_write_NFC_CTL(bfin_read_NFC_CTL() & ~(1 << NFC_PG_SIZE_OFFSET)); |
709 | SSYNC(); | 711 | SSYNC(); |
710 | } | 712 | } |
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c index 72d3f23490c5..2a96e1a12062 100644 --- a/drivers/mtd/nand/cafe_nand.c +++ b/drivers/mtd/nand/cafe_nand.c | |||
@@ -783,6 +783,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev, | |||
783 | cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME; | 783 | cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME; |
784 | cafe->nand.ecc.size = mtd->writesize; | 784 | cafe->nand.ecc.size = mtd->writesize; |
785 | cafe->nand.ecc.bytes = 14; | 785 | cafe->nand.ecc.bytes = 14; |
786 | cafe->nand.ecc.strength = 4; | ||
786 | cafe->nand.ecc.hwctl = (void *)cafe_nand_bug; | 787 | cafe->nand.ecc.hwctl = (void *)cafe_nand_bug; |
787 | cafe->nand.ecc.calculate = (void *)cafe_nand_bug; | 788 | cafe->nand.ecc.calculate = (void *)cafe_nand_bug; |
788 | cafe->nand.ecc.correct = (void *)cafe_nand_bug; | 789 | cafe->nand.ecc.correct = (void *)cafe_nand_bug; |
@@ -799,7 +800,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev, | |||
799 | pci_set_drvdata(pdev, mtd); | 800 | pci_set_drvdata(pdev, mtd); |
800 | 801 | ||
801 | mtd->name = "cafe_nand"; | 802 | mtd->name = "cafe_nand"; |
802 | mtd_device_parse_register(mtd, part_probes, 0, NULL, 0); | 803 | mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0); |
803 | 804 | ||
804 | goto out; | 805 | goto out; |
805 | 806 | ||
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c index 737ef9a04fdb..1024bfc05c86 100644 --- a/drivers/mtd/nand/cmx270_nand.c +++ b/drivers/mtd/nand/cmx270_nand.c | |||
@@ -219,7 +219,7 @@ static int __init cmx270_init(void) | |||
219 | } | 219 | } |
220 | 220 | ||
221 | /* Register the partitions */ | 221 | /* Register the partitions */ |
222 | ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, 0, | 222 | ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, NULL, |
223 | partition_info, NUM_PARTITIONS); | 223 | partition_info, NUM_PARTITIONS); |
224 | if (ret) | 224 | if (ret) |
225 | goto err_scan; | 225 | goto err_scan; |
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c index 414afa793563..821c34c62500 100644 --- a/drivers/mtd/nand/cs553x_nand.c +++ b/drivers/mtd/nand/cs553x_nand.c | |||
@@ -248,6 +248,8 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr) | |||
248 | goto out_ior; | 248 | goto out_ior; |
249 | } | 249 | } |
250 | 250 | ||
251 | this->ecc.strength = 1; | ||
252 | |||
251 | new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs); | 253 | new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs); |
252 | 254 | ||
253 | cs553x_mtd[cs] = new_mtd; | 255 | cs553x_mtd[cs] = new_mtd; |
@@ -313,7 +315,7 @@ static int __init cs553x_init(void) | |||
313 | for (i = 0; i < NR_CS553X_CONTROLLERS; i++) { | 315 | for (i = 0; i < NR_CS553X_CONTROLLERS; i++) { |
314 | if (cs553x_mtd[i]) { | 316 | if (cs553x_mtd[i]) { |
315 | /* If any devices registered, return success. Else the last error. */ | 317 | /* If any devices registered, return success. Else the last error. */ |
316 | mtd_device_parse_register(cs553x_mtd[i], NULL, 0, | 318 | mtd_device_parse_register(cs553x_mtd[i], NULL, NULL, |
317 | NULL, 0); | 319 | NULL, 0); |
318 | err = 0; | 320 | err = 0; |
319 | } | 321 | } |
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index 6e566156956f..d94b03c207af 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c | |||
@@ -641,6 +641,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev) | |||
641 | info->chip.ecc.bytes = 3; | 641 | info->chip.ecc.bytes = 3; |
642 | } | 642 | } |
643 | info->chip.ecc.size = 512; | 643 | info->chip.ecc.size = 512; |
644 | info->chip.ecc.strength = pdata->ecc_bits; | ||
644 | break; | 645 | break; |
645 | default: | 646 | default: |
646 | ret = -EINVAL; | 647 | ret = -EINVAL; |
@@ -752,8 +753,8 @@ syndrome_done: | |||
752 | if (ret < 0) | 753 | if (ret < 0) |
753 | goto err_scan; | 754 | goto err_scan; |
754 | 755 | ||
755 | ret = mtd_device_parse_register(&info->mtd, NULL, 0, | 756 | ret = mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts, |
756 | pdata->parts, pdata->nr_parts); | 757 | pdata->nr_parts); |
757 | 758 | ||
758 | if (ret < 0) | 759 | if (ret < 0) |
759 | goto err_scan; | 760 | goto err_scan; |
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index 3984d488f9ab..a9e57d686297 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c | |||
@@ -1590,6 +1590,7 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
1590 | ECC_15BITS * (denali->mtd.writesize / | 1590 | ECC_15BITS * (denali->mtd.writesize / |
1591 | ECC_SECTOR_SIZE)))) { | 1591 | ECC_SECTOR_SIZE)))) { |
1592 | /* if MLC OOB size is large enough, use 15bit ECC*/ | 1592 | /* if MLC OOB size is large enough, use 15bit ECC*/ |
1593 | denali->nand.ecc.strength = 15; | ||
1593 | denali->nand.ecc.layout = &nand_15bit_oob; | 1594 | denali->nand.ecc.layout = &nand_15bit_oob; |
1594 | denali->nand.ecc.bytes = ECC_15BITS; | 1595 | denali->nand.ecc.bytes = ECC_15BITS; |
1595 | iowrite32(15, denali->flash_reg + ECC_CORRECTION); | 1596 | iowrite32(15, denali->flash_reg + ECC_CORRECTION); |
@@ -1600,12 +1601,14 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
1600 | " contain 8bit ECC correction codes"); | 1601 | " contain 8bit ECC correction codes"); |
1601 | goto failed_req_irq; | 1602 | goto failed_req_irq; |
1602 | } else { | 1603 | } else { |
1604 | denali->nand.ecc.strength = 8; | ||
1603 | denali->nand.ecc.layout = &nand_8bit_oob; | 1605 | denali->nand.ecc.layout = &nand_8bit_oob; |
1604 | denali->nand.ecc.bytes = ECC_8BITS; | 1606 | denali->nand.ecc.bytes = ECC_8BITS; |
1605 | iowrite32(8, denali->flash_reg + ECC_CORRECTION); | 1607 | iowrite32(8, denali->flash_reg + ECC_CORRECTION); |
1606 | } | 1608 | } |
1607 | 1609 | ||
1608 | denali->nand.ecc.bytes *= denali->devnum; | 1610 | denali->nand.ecc.bytes *= denali->devnum; |
1611 | denali->nand.ecc.strength *= denali->devnum; | ||
1609 | denali->nand.ecc.layout->eccbytes *= | 1612 | denali->nand.ecc.layout->eccbytes *= |
1610 | denali->mtd.writesize / ECC_SECTOR_SIZE; | 1613 | denali->mtd.writesize / ECC_SECTOR_SIZE; |
1611 | denali->nand.ecc.layout->oobfree[0].offset = | 1614 | denali->nand.ecc.layout->oobfree[0].offset = |
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c index df921e7a496c..e2ca067631cf 100644 --- a/drivers/mtd/nand/diskonchip.c +++ b/drivers/mtd/nand/diskonchip.c | |||
@@ -1653,6 +1653,7 @@ static int __init doc_probe(unsigned long physadr) | |||
1653 | nand->ecc.mode = NAND_ECC_HW_SYNDROME; | 1653 | nand->ecc.mode = NAND_ECC_HW_SYNDROME; |
1654 | nand->ecc.size = 512; | 1654 | nand->ecc.size = 512; |
1655 | nand->ecc.bytes = 6; | 1655 | nand->ecc.bytes = 6; |
1656 | nand->ecc.strength = 2; | ||
1656 | nand->bbt_options = NAND_BBT_USE_FLASH; | 1657 | nand->bbt_options = NAND_BBT_USE_FLASH; |
1657 | 1658 | ||
1658 | doc->physadr = physadr; | 1659 | doc->physadr = physadr; |
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c new file mode 100644 index 000000000000..b08202664543 --- /dev/null +++ b/drivers/mtd/nand/docg4.c | |||
@@ -0,0 +1,1377 @@ | |||
1 | /* | ||
2 | * Copyright © 2012 Mike Dunn <mikedunn@newsguy.com> | ||
3 | * | ||
4 | * mtd nand driver for M-Systems DiskOnChip G4 | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * Tested on the Palm Treo 680. The G4 is also present on Toshiba Portege, Asus | ||
12 | * P526, some HTC smartphones (Wizard, Prophet, ...), O2 XDA Zinc, maybe others. | ||
13 | * Should work on these as well. Let me know! | ||
14 | * | ||
15 | * TODO: | ||
16 | * | ||
17 | * Mechanism for management of password-protected areas | ||
18 | * | ||
19 | * Hamming ecc when reading oob only | ||
20 | * | ||
21 | * According to the M-Sys documentation, this device is also available in a | ||
22 | * "dual-die" configuration having a 256MB capacity, but no mechanism for | ||
23 | * detecting this variant is documented. Currently this driver assumes 128MB | ||
24 | * capacity. | ||
25 | * | ||
26 | * Support for multiple cascaded devices ("floors"). Not sure which gadgets | ||
27 | * contain multiple G4s in a cascaded configuration, if any. | ||
28 | * | ||
29 | */ | ||
30 | |||
31 | #include <linux/kernel.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <linux/init.h> | ||
34 | #include <linux/string.h> | ||
35 | #include <linux/sched.h> | ||
36 | #include <linux/delay.h> | ||
37 | #include <linux/module.h> | ||
38 | #include <linux/export.h> | ||
39 | #include <linux/platform_device.h> | ||
40 | #include <linux/io.h> | ||
41 | #include <linux/bitops.h> | ||
42 | #include <linux/mtd/partitions.h> | ||
43 | #include <linux/mtd/mtd.h> | ||
44 | #include <linux/mtd/nand.h> | ||
45 | #include <linux/bch.h> | ||
46 | #include <linux/bitrev.h> | ||
47 | |||
48 | /* | ||
49 | * You'll want to ignore badblocks if you're reading a partition that contains | ||
50 | * data written by the TrueFFS library (i.e., by PalmOS, Windows, etc), since | ||
51 | * it does not use mtd nand's method for marking bad blocks (using oob area). | ||
52 | * This will also skip the check of the "page written" flag. | ||
53 | */ | ||
54 | static bool ignore_badblocks; | ||
55 | module_param(ignore_badblocks, bool, 0); | ||
56 | MODULE_PARM_DESC(ignore_badblocks, "no badblock checking performed"); | ||
57 | |||
58 | struct docg4_priv { | ||
59 | struct mtd_info *mtd; | ||
60 | struct device *dev; | ||
61 | void __iomem *virtadr; | ||
62 | int status; | ||
63 | struct { | ||
64 | unsigned int command; | ||
65 | int column; | ||
66 | int page; | ||
67 | } last_command; | ||
68 | uint8_t oob_buf[16]; | ||
69 | uint8_t ecc_buf[7]; | ||
70 | int oob_page; | ||
71 | struct bch_control *bch; | ||
72 | }; | ||
73 | |||
74 | /* | ||
75 | * Defines prefixed with DOCG4 are unique to the diskonchip G4. All others are | ||
76 | * shared with other diskonchip devices (P3, G3 at least). | ||
77 | * | ||
78 | * Functions with names prefixed with docg4_ are mtd / nand interface functions | ||
79 | * (though they may also be called internally). All others are internal. | ||
80 | */ | ||
81 | |||
82 | #define DOC_IOSPACE_DATA 0x0800 | ||
83 | |||
84 | /* register offsets */ | ||
85 | #define DOC_CHIPID 0x1000 | ||
86 | #define DOC_DEVICESELECT 0x100a | ||
87 | #define DOC_ASICMODE 0x100c | ||
88 | #define DOC_DATAEND 0x101e | ||
89 | #define DOC_NOP 0x103e | ||
90 | |||
91 | #define DOC_FLASHSEQUENCE 0x1032 | ||
92 | #define DOC_FLASHCOMMAND 0x1034 | ||
93 | #define DOC_FLASHADDRESS 0x1036 | ||
94 | #define DOC_FLASHCONTROL 0x1038 | ||
95 | #define DOC_ECCCONF0 0x1040 | ||
96 | #define DOC_ECCCONF1 0x1042 | ||
97 | #define DOC_HAMMINGPARITY 0x1046 | ||
98 | #define DOC_BCH_SYNDROM(idx) (0x1048 + idx) | ||
99 | |||
100 | #define DOC_ASICMODECONFIRM 0x1072 | ||
101 | #define DOC_CHIPID_INV 0x1074 | ||
102 | #define DOC_POWERMODE 0x107c | ||
103 | |||
104 | #define DOCG4_MYSTERY_REG 0x1050 | ||
105 | |||
106 | /* apparently used only to write oob bytes 6 and 7 */ | ||
107 | #define DOCG4_OOB_6_7 0x1052 | ||
108 | |||
109 | /* DOC_FLASHSEQUENCE register commands */ | ||
110 | #define DOC_SEQ_RESET 0x00 | ||
111 | #define DOCG4_SEQ_PAGE_READ 0x03 | ||
112 | #define DOCG4_SEQ_FLUSH 0x29 | ||
113 | #define DOCG4_SEQ_PAGEWRITE 0x16 | ||
114 | #define DOCG4_SEQ_PAGEPROG 0x1e | ||
115 | #define DOCG4_SEQ_BLOCKERASE 0x24 | ||
116 | |||
117 | /* DOC_FLASHCOMMAND register commands */ | ||
118 | #define DOCG4_CMD_PAGE_READ 0x00 | ||
119 | #define DOC_CMD_ERASECYCLE2 0xd0 | ||
120 | #define DOCG4_CMD_FLUSH 0x70 | ||
121 | #define DOCG4_CMD_READ2 0x30 | ||
122 | #define DOC_CMD_PROG_BLOCK_ADDR 0x60 | ||
123 | #define DOCG4_CMD_PAGEWRITE 0x80 | ||
124 | #define DOC_CMD_PROG_CYCLE2 0x10 | ||
125 | #define DOC_CMD_RESET 0xff | ||
126 | |||
127 | /* DOC_POWERMODE register bits */ | ||
128 | #define DOC_POWERDOWN_READY 0x80 | ||
129 | |||
130 | /* DOC_FLASHCONTROL register bits */ | ||
131 | #define DOC_CTRL_CE 0x10 | ||
132 | #define DOC_CTRL_UNKNOWN 0x40 | ||
133 | #define DOC_CTRL_FLASHREADY 0x01 | ||
134 | |||
135 | /* DOC_ECCCONF0 register bits */ | ||
136 | #define DOC_ECCCONF0_READ_MODE 0x8000 | ||
137 | #define DOC_ECCCONF0_UNKNOWN 0x2000 | ||
138 | #define DOC_ECCCONF0_ECC_ENABLE 0x1000 | ||
139 | #define DOC_ECCCONF0_DATA_BYTES_MASK 0x07ff | ||
140 | |||
141 | /* DOC_ECCCONF1 register bits */ | ||
142 | #define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80 | ||
143 | #define DOC_ECCCONF1_ECC_ENABLE 0x07 | ||
144 | #define DOC_ECCCONF1_PAGE_IS_WRITTEN 0x20 | ||
145 | |||
146 | /* DOC_ASICMODE register bits */ | ||
147 | #define DOC_ASICMODE_RESET 0x00 | ||
148 | #define DOC_ASICMODE_NORMAL 0x01 | ||
149 | #define DOC_ASICMODE_POWERDOWN 0x02 | ||
150 | #define DOC_ASICMODE_MDWREN 0x04 | ||
151 | #define DOC_ASICMODE_BDETCT_RESET 0x08 | ||
152 | #define DOC_ASICMODE_RSTIN_RESET 0x10 | ||
153 | #define DOC_ASICMODE_RAM_WE 0x20 | ||
154 | |||
155 | /* good status values read after read/write/erase operations */ | ||
156 | #define DOCG4_PROGSTATUS_GOOD 0x51 | ||
157 | #define DOCG4_PROGSTATUS_GOOD_2 0xe0 | ||
158 | |||
159 | /* | ||
160 | * On read operations (page and oob-only), the first byte read from I/O reg is a | ||
161 | * status. On error, it reads 0x73; otherwise, it reads either 0x71 (first read | ||
162 | * after reset only) or 0x51, so bit 1 is presumed to be an error indicator. | ||
163 | */ | ||
164 | #define DOCG4_READ_ERROR 0x02 /* bit 1 indicates read error */ | ||
165 | |||
166 | /* anatomy of the device */ | ||
167 | #define DOCG4_CHIP_SIZE 0x8000000 | ||
168 | #define DOCG4_PAGE_SIZE 0x200 | ||
169 | #define DOCG4_PAGES_PER_BLOCK 0x200 | ||
170 | #define DOCG4_BLOCK_SIZE (DOCG4_PAGES_PER_BLOCK * DOCG4_PAGE_SIZE) | ||
171 | #define DOCG4_NUMBLOCKS (DOCG4_CHIP_SIZE / DOCG4_BLOCK_SIZE) | ||
172 | #define DOCG4_OOB_SIZE 0x10 | ||
173 | #define DOCG4_CHIP_SHIFT 27 /* log_2(DOCG4_CHIP_SIZE) */ | ||
174 | #define DOCG4_PAGE_SHIFT 9 /* log_2(DOCG4_PAGE_SIZE) */ | ||
175 | #define DOCG4_ERASE_SHIFT 18 /* log_2(DOCG4_BLOCK_SIZE) */ | ||
176 | |||
177 | /* all but the last byte is included in ecc calculation */ | ||
178 | #define DOCG4_BCH_SIZE (DOCG4_PAGE_SIZE + DOCG4_OOB_SIZE - 1) | ||
179 | |||
180 | #define DOCG4_USERDATA_LEN 520 /* 512 byte page plus 8 oob avail to user */ | ||
181 | |||
182 | /* expected values from the ID registers */ | ||
183 | #define DOCG4_IDREG1_VALUE 0x0400 | ||
184 | #define DOCG4_IDREG2_VALUE 0xfbff | ||
185 | |||
186 | /* primitive polynomial used to build the Galois field used by hw ecc gen */ | ||
187 | #define DOCG4_PRIMITIVE_POLY 0x4443 | ||
188 | |||
189 | #define DOCG4_M 14 /* Galois field is of order 2^14 */ | ||
190 | #define DOCG4_T 4 /* BCH alg corrects up to 4 bit errors */ | ||
191 | |||
192 | #define DOCG4_FACTORY_BBT_PAGE 16 /* page where read-only factory bbt lives */ | ||
193 | |||
194 | /* | ||
195 | * Oob bytes 0 - 6 are available to the user. | ||
196 | * Byte 7 is hamming ecc for first 7 bytes. Bytes 8 - 14 are hw-generated ecc. | ||
197 | * Byte 15 (the last) is used by the driver as a "page written" flag. | ||
198 | */ | ||
199 | static struct nand_ecclayout docg4_oobinfo = { | ||
200 | .eccbytes = 9, | ||
201 | .eccpos = {7, 8, 9, 10, 11, 12, 13, 14, 15}, | ||
202 | .oobavail = 7, | ||
203 | .oobfree = { {0, 7} } | ||
204 | }; | ||
205 | |||
206 | /* | ||
207 | * The device has a nop register which M-Sys claims is for the purpose of | ||
208 | * inserting precise delays. But beware; at least some operations fail if the | ||
209 | * nop writes are replaced with a generic delay! | ||
210 | */ | ||
211 | static inline void write_nop(void __iomem *docptr) | ||
212 | { | ||
213 | writew(0, docptr + DOC_NOP); | ||
214 | } | ||
215 | |||
216 | static void docg4_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | ||
217 | { | ||
218 | int i; | ||
219 | struct nand_chip *nand = mtd->priv; | ||
220 | uint16_t *p = (uint16_t *) buf; | ||
221 | len >>= 1; | ||
222 | |||
223 | for (i = 0; i < len; i++) | ||
224 | p[i] = readw(nand->IO_ADDR_R); | ||
225 | } | ||
226 | |||
227 | static void docg4_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) | ||
228 | { | ||
229 | int i; | ||
230 | struct nand_chip *nand = mtd->priv; | ||
231 | uint16_t *p = (uint16_t *) buf; | ||
232 | len >>= 1; | ||
233 | |||
234 | for (i = 0; i < len; i++) | ||
235 | writew(p[i], nand->IO_ADDR_W); | ||
236 | } | ||
237 | |||
238 | static int poll_status(struct docg4_priv *doc) | ||
239 | { | ||
240 | /* | ||
241 | * Busy-wait for the FLASHREADY bit to be set in the FLASHCONTROL | ||
242 | * register. Operations known to take a long time (e.g., block erase) | ||
243 | * should sleep for a while before calling this. | ||
244 | */ | ||
245 | |||
246 | uint16_t flash_status; | ||
247 | unsigned int timeo; | ||
248 | void __iomem *docptr = doc->virtadr; | ||
249 | |||
250 | dev_dbg(doc->dev, "%s...\n", __func__); | ||
251 | |||
252 | /* hardware quirk requires reading twice initially */ | ||
253 | flash_status = readw(docptr + DOC_FLASHCONTROL); | ||
254 | |||
255 | timeo = 1000; | ||
256 | do { | ||
257 | cpu_relax(); | ||
258 | flash_status = readb(docptr + DOC_FLASHCONTROL); | ||
259 | } while (!(flash_status & DOC_CTRL_FLASHREADY) && --timeo); | ||
260 | |||
261 | |||
262 | if (!timeo) { | ||
263 | dev_err(doc->dev, "%s: timed out!\n", __func__); | ||
264 | return NAND_STATUS_FAIL; | ||
265 | } | ||
266 | |||
267 | if (unlikely(timeo < 50)) | ||
268 | dev_warn(doc->dev, "%s: nearly timed out; %d remaining\n", | ||
269 | __func__, timeo); | ||
270 | |||
271 | return 0; | ||
272 | } | ||
273 | |||
274 | |||
275 | static int docg4_wait(struct mtd_info *mtd, struct nand_chip *nand) | ||
276 | { | ||
277 | |||
278 | struct docg4_priv *doc = nand->priv; | ||
279 | int status = NAND_STATUS_WP; /* inverse logic?? */ | ||
280 | dev_dbg(doc->dev, "%s...\n", __func__); | ||
281 | |||
282 | /* report any previously unreported error */ | ||
283 | if (doc->status) { | ||
284 | status |= doc->status; | ||
285 | doc->status = 0; | ||
286 | return status; | ||
287 | } | ||
288 | |||
289 | status |= poll_status(doc); | ||
290 | return status; | ||
291 | } | ||
292 | |||
293 | static void docg4_select_chip(struct mtd_info *mtd, int chip) | ||
294 | { | ||
295 | /* | ||
296 | * Select among multiple cascaded chips ("floors"). Multiple floors are | ||
297 | * not yet supported, so the only valid non-negative value is 0. | ||
298 | */ | ||
299 | struct nand_chip *nand = mtd->priv; | ||
300 | struct docg4_priv *doc = nand->priv; | ||
301 | void __iomem *docptr = doc->virtadr; | ||
302 | |||
303 | dev_dbg(doc->dev, "%s: chip %d\n", __func__, chip); | ||
304 | |||
305 | if (chip < 0) | ||
306 | return; /* deselected */ | ||
307 | |||
308 | if (chip > 0) | ||
309 | dev_warn(doc->dev, "multiple floors currently unsupported\n"); | ||
310 | |||
311 | writew(0, docptr + DOC_DEVICESELECT); | ||
312 | } | ||
313 | |||
314 | static void reset(struct mtd_info *mtd) | ||
315 | { | ||
316 | /* full device reset */ | ||
317 | |||
318 | struct nand_chip *nand = mtd->priv; | ||
319 | struct docg4_priv *doc = nand->priv; | ||
320 | void __iomem *docptr = doc->virtadr; | ||
321 | |||
322 | writew(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN, | ||
323 | docptr + DOC_ASICMODE); | ||
324 | writew(~(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN), | ||
325 | docptr + DOC_ASICMODECONFIRM); | ||
326 | write_nop(docptr); | ||
327 | |||
328 | writew(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN, | ||
329 | docptr + DOC_ASICMODE); | ||
330 | writew(~(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN), | ||
331 | docptr + DOC_ASICMODECONFIRM); | ||
332 | |||
333 | writew(DOC_ECCCONF1_ECC_ENABLE, docptr + DOC_ECCCONF1); | ||
334 | |||
335 | poll_status(doc); | ||
336 | } | ||
337 | |||
338 | static void read_hw_ecc(void __iomem *docptr, uint8_t *ecc_buf) | ||
339 | { | ||
340 | /* read the 7 hw-generated ecc bytes */ | ||
341 | |||
342 | int i; | ||
343 | for (i = 0; i < 7; i++) { /* hw quirk; read twice */ | ||
344 | ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i)); | ||
345 | ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i)); | ||
346 | } | ||
347 | } | ||
348 | |||
349 | static int correct_data(struct mtd_info *mtd, uint8_t *buf, int page) | ||
350 | { | ||
351 | /* | ||
352 | * Called after a page read when hardware reports bitflips. | ||
353 | * Up to four bitflips can be corrected. | ||
354 | */ | ||
355 | |||
356 | struct nand_chip *nand = mtd->priv; | ||
357 | struct docg4_priv *doc = nand->priv; | ||
358 | void __iomem *docptr = doc->virtadr; | ||
359 | int i, numerrs, errpos[4]; | ||
360 | const uint8_t blank_read_hwecc[8] = { | ||
361 | 0xcf, 0x72, 0xfc, 0x1b, 0xa9, 0xc7, 0xb9, 0 }; | ||
362 | |||
363 | read_hw_ecc(docptr, doc->ecc_buf); /* read 7 hw-generated ecc bytes */ | ||
364 | |||
365 | /* check if read error is due to a blank page */ | ||
366 | if (!memcmp(doc->ecc_buf, blank_read_hwecc, 7)) | ||
367 | return 0; /* yes */ | ||
368 | |||
369 | /* skip additional check of "written flag" if ignore_badblocks */ | ||
370 | if (ignore_badblocks == false) { | ||
371 | |||
372 | /* | ||
373 | * If the hw ecc bytes are not those of a blank page, there's | ||
374 | * still a chance that the page is blank, but was read with | ||
375 | * errors. Check the "written flag" in last oob byte, which | ||
376 | * is set to zero when a page is written. If more than half | ||
377 | * the bits are set, assume a blank page. Unfortunately, the | ||
378 | * bit flips(s) are not reported in stats. | ||
379 | */ | ||
380 | |||
381 | if (doc->oob_buf[15]) { | ||
382 | int bit, numsetbits = 0; | ||
383 | unsigned long written_flag = doc->oob_buf[15]; | ||
384 | for_each_set_bit(bit, &written_flag, 8) | ||
385 | numsetbits++; | ||
386 | if (numsetbits > 4) { /* assume blank */ | ||
387 | dev_warn(doc->dev, | ||
388 | "error(s) in blank page " | ||
389 | "at offset %08x\n", | ||
390 | page * DOCG4_PAGE_SIZE); | ||
391 | return 0; | ||
392 | } | ||
393 | } | ||
394 | } | ||
395 | |||
396 | /* | ||
397 | * The hardware ecc unit produces oob_ecc ^ calc_ecc. The kernel's bch | ||
398 | * algorithm is used to decode this. However the hw operates on page | ||
399 | * data in a bit order that is the reverse of that of the bch alg, | ||
400 | * requiring that the bits be reversed on the result. Thanks to Ivan | ||
401 | * Djelic for his analysis! | ||
402 | */ | ||
403 | for (i = 0; i < 7; i++) | ||
404 | doc->ecc_buf[i] = bitrev8(doc->ecc_buf[i]); | ||
405 | |||
406 | numerrs = decode_bch(doc->bch, NULL, DOCG4_USERDATA_LEN, NULL, | ||
407 | doc->ecc_buf, NULL, errpos); | ||
408 | |||
409 | if (numerrs == -EBADMSG) { | ||
410 | dev_warn(doc->dev, "uncorrectable errors at offset %08x\n", | ||
411 | page * DOCG4_PAGE_SIZE); | ||
412 | return -EBADMSG; | ||
413 | } | ||
414 | |||
415 | BUG_ON(numerrs < 0); /* -EINVAL, or anything other than -EBADMSG */ | ||
416 | |||
417 | /* undo last step in BCH alg (modulo mirroring not needed) */ | ||
418 | for (i = 0; i < numerrs; i++) | ||
419 | errpos[i] = (errpos[i] & ~7)|(7-(errpos[i] & 7)); | ||
420 | |||
421 | /* fix the errors */ | ||
422 | for (i = 0; i < numerrs; i++) { | ||
423 | |||
424 | /* ignore if error within oob ecc bytes */ | ||
425 | if (errpos[i] > DOCG4_USERDATA_LEN * 8) | ||
426 | continue; | ||
427 | |||
428 | /* if error within oob area preceeding ecc bytes... */ | ||
429 | if (errpos[i] > DOCG4_PAGE_SIZE * 8) | ||
430 | change_bit(errpos[i] - DOCG4_PAGE_SIZE * 8, | ||
431 | (unsigned long *)doc->oob_buf); | ||
432 | |||
433 | else /* error in page data */ | ||
434 | change_bit(errpos[i], (unsigned long *)buf); | ||
435 | } | ||
436 | |||
437 | dev_notice(doc->dev, "%d error(s) corrected at offset %08x\n", | ||
438 | numerrs, page * DOCG4_PAGE_SIZE); | ||
439 | |||
440 | return numerrs; | ||
441 | } | ||
442 | |||
443 | static uint8_t docg4_read_byte(struct mtd_info *mtd) | ||
444 | { | ||
445 | struct nand_chip *nand = mtd->priv; | ||
446 | struct docg4_priv *doc = nand->priv; | ||
447 | |||
448 | dev_dbg(doc->dev, "%s\n", __func__); | ||
449 | |||
450 | if (doc->last_command.command == NAND_CMD_STATUS) { | ||
451 | int status; | ||
452 | |||
453 | /* | ||
454 | * Previous nand command was status request, so nand | ||
455 | * infrastructure code expects to read the status here. If an | ||
456 | * error occurred in a previous operation, report it. | ||
457 | */ | ||
458 | doc->last_command.command = 0; | ||
459 | |||
460 | if (doc->status) { | ||
461 | status = doc->status; | ||
462 | doc->status = 0; | ||
463 | } | ||
464 | |||
465 | /* why is NAND_STATUS_WP inverse logic?? */ | ||
466 | else | ||
467 | status = NAND_STATUS_WP | NAND_STATUS_READY; | ||
468 | |||
469 | return status; | ||
470 | } | ||
471 | |||
472 | dev_warn(doc->dev, "unexpectd call to read_byte()\n"); | ||
473 | |||
474 | return 0; | ||
475 | } | ||
476 | |||
477 | static void write_addr(struct docg4_priv *doc, uint32_t docg4_addr) | ||
478 | { | ||
479 | /* write the four address bytes packed in docg4_addr to the device */ | ||
480 | |||
481 | void __iomem *docptr = doc->virtadr; | ||
482 | writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS); | ||
483 | docg4_addr >>= 8; | ||
484 | writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS); | ||
485 | docg4_addr >>= 8; | ||
486 | writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS); | ||
487 | docg4_addr >>= 8; | ||
488 | writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS); | ||
489 | } | ||
490 | |||
491 | static int read_progstatus(struct docg4_priv *doc) | ||
492 | { | ||
493 | /* | ||
494 | * This apparently checks the status of programming. Done after an | ||
495 | * erasure, and after page data is written. On error, the status is | ||
496 | * saved, to be later retrieved by the nand infrastructure code. | ||
497 | */ | ||
498 | void __iomem *docptr = doc->virtadr; | ||
499 | |||
500 | /* status is read from the I/O reg */ | ||
501 | uint16_t status1 = readw(docptr + DOC_IOSPACE_DATA); | ||
502 | uint16_t status2 = readw(docptr + DOC_IOSPACE_DATA); | ||
503 | uint16_t status3 = readw(docptr + DOCG4_MYSTERY_REG); | ||
504 | |||
505 | dev_dbg(doc->dev, "docg4: %s: %02x %02x %02x\n", | ||
506 | __func__, status1, status2, status3); | ||
507 | |||
508 | if (status1 != DOCG4_PROGSTATUS_GOOD | ||
509 | || status2 != DOCG4_PROGSTATUS_GOOD_2 | ||
510 | || status3 != DOCG4_PROGSTATUS_GOOD_2) { | ||
511 | doc->status = NAND_STATUS_FAIL; | ||
512 | dev_warn(doc->dev, "read_progstatus failed: " | ||
513 | "%02x, %02x, %02x\n", status1, status2, status3); | ||
514 | return -EIO; | ||
515 | } | ||
516 | return 0; | ||
517 | } | ||
518 | |||
519 | static int pageprog(struct mtd_info *mtd) | ||
520 | { | ||
521 | /* | ||
522 | * Final step in writing a page. Writes the contents of its | ||
523 | * internal buffer out to the flash array, or some such. | ||
524 | */ | ||
525 | |||
526 | struct nand_chip *nand = mtd->priv; | ||
527 | struct docg4_priv *doc = nand->priv; | ||
528 | void __iomem *docptr = doc->virtadr; | ||
529 | int retval = 0; | ||
530 | |||
531 | dev_dbg(doc->dev, "docg4: %s\n", __func__); | ||
532 | |||
533 | writew(DOCG4_SEQ_PAGEPROG, docptr + DOC_FLASHSEQUENCE); | ||
534 | writew(DOC_CMD_PROG_CYCLE2, docptr + DOC_FLASHCOMMAND); | ||
535 | write_nop(docptr); | ||
536 | write_nop(docptr); | ||
537 | |||
538 | /* Just busy-wait; usleep_range() slows things down noticeably. */ | ||
539 | poll_status(doc); | ||
540 | |||
541 | writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE); | ||
542 | writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND); | ||
543 | writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0); | ||
544 | write_nop(docptr); | ||
545 | write_nop(docptr); | ||
546 | write_nop(docptr); | ||
547 | write_nop(docptr); | ||
548 | write_nop(docptr); | ||
549 | |||
550 | retval = read_progstatus(doc); | ||
551 | writew(0, docptr + DOC_DATAEND); | ||
552 | write_nop(docptr); | ||
553 | poll_status(doc); | ||
554 | write_nop(docptr); | ||
555 | |||
556 | return retval; | ||
557 | } | ||
558 | |||
559 | static void sequence_reset(struct mtd_info *mtd) | ||
560 | { | ||
561 | /* common starting sequence for all operations */ | ||
562 | |||
563 | struct nand_chip *nand = mtd->priv; | ||
564 | struct docg4_priv *doc = nand->priv; | ||
565 | void __iomem *docptr = doc->virtadr; | ||
566 | |||
567 | writew(DOC_CTRL_UNKNOWN | DOC_CTRL_CE, docptr + DOC_FLASHCONTROL); | ||
568 | writew(DOC_SEQ_RESET, docptr + DOC_FLASHSEQUENCE); | ||
569 | writew(DOC_CMD_RESET, docptr + DOC_FLASHCOMMAND); | ||
570 | write_nop(docptr); | ||
571 | write_nop(docptr); | ||
572 | poll_status(doc); | ||
573 | write_nop(docptr); | ||
574 | } | ||
575 | |||
576 | static void read_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr) | ||
577 | { | ||
578 | /* first step in reading a page */ | ||
579 | |||
580 | struct nand_chip *nand = mtd->priv; | ||
581 | struct docg4_priv *doc = nand->priv; | ||
582 | void __iomem *docptr = doc->virtadr; | ||
583 | |||
584 | dev_dbg(doc->dev, | ||
585 | "docg4: %s: g4 page %08x\n", __func__, docg4_addr); | ||
586 | |||
587 | sequence_reset(mtd); | ||
588 | |||
589 | writew(DOCG4_SEQ_PAGE_READ, docptr + DOC_FLASHSEQUENCE); | ||
590 | writew(DOCG4_CMD_PAGE_READ, docptr + DOC_FLASHCOMMAND); | ||
591 | write_nop(docptr); | ||
592 | |||
593 | write_addr(doc, docg4_addr); | ||
594 | |||
595 | write_nop(docptr); | ||
596 | writew(DOCG4_CMD_READ2, docptr + DOC_FLASHCOMMAND); | ||
597 | write_nop(docptr); | ||
598 | write_nop(docptr); | ||
599 | |||
600 | poll_status(doc); | ||
601 | } | ||
602 | |||
603 | static void write_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr) | ||
604 | { | ||
605 | /* first step in writing a page */ | ||
606 | |||
607 | struct nand_chip *nand = mtd->priv; | ||
608 | struct docg4_priv *doc = nand->priv; | ||
609 | void __iomem *docptr = doc->virtadr; | ||
610 | |||
611 | dev_dbg(doc->dev, | ||
612 | "docg4: %s: g4 addr: %x\n", __func__, docg4_addr); | ||
613 | sequence_reset(mtd); | ||
614 | writew(DOCG4_SEQ_PAGEWRITE, docptr + DOC_FLASHSEQUENCE); | ||
615 | writew(DOCG4_CMD_PAGEWRITE, docptr + DOC_FLASHCOMMAND); | ||
616 | write_nop(docptr); | ||
617 | write_addr(doc, docg4_addr); | ||
618 | write_nop(docptr); | ||
619 | write_nop(docptr); | ||
620 | poll_status(doc); | ||
621 | } | ||
622 | |||
623 | static uint32_t mtd_to_docg4_address(int page, int column) | ||
624 | { | ||
625 | /* | ||
626 | * Convert mtd address to format used by the device, 32 bit packed. | ||
627 | * | ||
628 | * Some notes on G4 addressing... The M-Sys documentation on this device | ||
629 | * claims that pages are 2K in length, and indeed, the format of the | ||
630 | * address used by the device reflects that. But within each page are | ||
631 | * four 512 byte "sub-pages", each with its own oob data that is | ||
632 | * read/written immediately after the 512 bytes of page data. This oob | ||
633 | * data contains the ecc bytes for the preceeding 512 bytes. | ||
634 | * | ||
635 | * Rather than tell the mtd nand infrastructure that page size is 2k, | ||
636 | * with four sub-pages each, we engage in a little subterfuge and tell | ||
637 | * the infrastructure code that pages are 512 bytes in size. This is | ||
638 | * done because during the course of reverse-engineering the device, I | ||
639 | * never observed an instance where an entire 2K "page" was read or | ||
640 | * written as a unit. Each "sub-page" is always addressed individually, | ||
641 | * its data read/written, and ecc handled before the next "sub-page" is | ||
642 | * addressed. | ||
643 | * | ||
644 | * This requires us to convert addresses passed by the mtd nand | ||
645 | * infrastructure code to those used by the device. | ||
646 | * | ||
647 | * The address that is written to the device consists of four bytes: the | ||
648 | * first two are the 2k page number, and the second is the index into | ||
649 | * the page. The index is in terms of 16-bit half-words and includes | ||
650 | * the preceeding oob data, so e.g., the index into the second | ||
651 | * "sub-page" is 0x108, and the full device address of the start of mtd | ||
652 | * page 0x201 is 0x00800108. | ||
653 | */ | ||
654 | int g4_page = page / 4; /* device's 2K page */ | ||
655 | int g4_index = (page % 4) * 0x108 + column/2; /* offset into page */ | ||
656 | return (g4_page << 16) | g4_index; /* pack */ | ||
657 | } | ||
658 | |||
659 | static void docg4_command(struct mtd_info *mtd, unsigned command, int column, | ||
660 | int page_addr) | ||
661 | { | ||
662 | /* handle standard nand commands */ | ||
663 | |||
664 | struct nand_chip *nand = mtd->priv; | ||
665 | struct docg4_priv *doc = nand->priv; | ||
666 | uint32_t g4_addr = mtd_to_docg4_address(page_addr, column); | ||
667 | |||
668 | dev_dbg(doc->dev, "%s %x, page_addr=%x, column=%x\n", | ||
669 | __func__, command, page_addr, column); | ||
670 | |||
671 | /* | ||
672 | * Save the command and its arguments. This enables emulation of | ||
673 | * standard flash devices, and also some optimizations. | ||
674 | */ | ||
675 | doc->last_command.command = command; | ||
676 | doc->last_command.column = column; | ||
677 | doc->last_command.page = page_addr; | ||
678 | |||
679 | switch (command) { | ||
680 | |||
681 | case NAND_CMD_RESET: | ||
682 | reset(mtd); | ||
683 | break; | ||
684 | |||
685 | case NAND_CMD_READ0: | ||
686 | read_page_prologue(mtd, g4_addr); | ||
687 | break; | ||
688 | |||
689 | case NAND_CMD_STATUS: | ||
690 | /* next call to read_byte() will expect a status */ | ||
691 | break; | ||
692 | |||
693 | case NAND_CMD_SEQIN: | ||
694 | write_page_prologue(mtd, g4_addr); | ||
695 | |||
696 | /* hack for deferred write of oob bytes */ | ||
697 | if (doc->oob_page == page_addr) | ||
698 | memcpy(nand->oob_poi, doc->oob_buf, 16); | ||
699 | break; | ||
700 | |||
701 | case NAND_CMD_PAGEPROG: | ||
702 | pageprog(mtd); | ||
703 | break; | ||
704 | |||
705 | /* we don't expect these, based on review of nand_base.c */ | ||
706 | case NAND_CMD_READOOB: | ||
707 | case NAND_CMD_READID: | ||
708 | case NAND_CMD_ERASE1: | ||
709 | case NAND_CMD_ERASE2: | ||
710 | dev_warn(doc->dev, "docg4_command: " | ||
711 | "unexpected nand command 0x%x\n", command); | ||
712 | break; | ||
713 | |||
714 | } | ||
715 | } | ||
716 | |||
717 | static int read_page(struct mtd_info *mtd, struct nand_chip *nand, | ||
718 | uint8_t *buf, int page, bool use_ecc) | ||
719 | { | ||
720 | struct docg4_priv *doc = nand->priv; | ||
721 | void __iomem *docptr = doc->virtadr; | ||
722 | uint16_t status, edc_err, *buf16; | ||
723 | |||
724 | dev_dbg(doc->dev, "%s: page %08x\n", __func__, page); | ||
725 | |||
726 | writew(DOC_ECCCONF0_READ_MODE | | ||
727 | DOC_ECCCONF0_ECC_ENABLE | | ||
728 | DOC_ECCCONF0_UNKNOWN | | ||
729 | DOCG4_BCH_SIZE, | ||
730 | docptr + DOC_ECCCONF0); | ||
731 | write_nop(docptr); | ||
732 | write_nop(docptr); | ||
733 | write_nop(docptr); | ||
734 | write_nop(docptr); | ||
735 | write_nop(docptr); | ||
736 | |||
737 | /* the 1st byte from the I/O reg is a status; the rest is page data */ | ||
738 | status = readw(docptr + DOC_IOSPACE_DATA); | ||
739 | if (status & DOCG4_READ_ERROR) { | ||
740 | dev_err(doc->dev, | ||
741 | "docg4_read_page: bad status: 0x%02x\n", status); | ||
742 | writew(0, docptr + DOC_DATAEND); | ||
743 | return -EIO; | ||
744 | } | ||
745 | |||
746 | dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status); | ||
747 | |||
748 | docg4_read_buf(mtd, buf, DOCG4_PAGE_SIZE); /* read the page data */ | ||
749 | |||
750 | /* | ||
751 | * Diskonchips read oob immediately after a page read. Mtd | ||
752 | * infrastructure issues a separate command for reading oob after the | ||
753 | * page is read. So we save the oob bytes in a local buffer and just | ||
754 | * copy it if the next command reads oob from the same page. | ||
755 | */ | ||
756 | |||
757 | /* first 14 oob bytes read from I/O reg */ | ||
758 | docg4_read_buf(mtd, doc->oob_buf, 14); | ||
759 | |||
760 | /* last 2 read from another reg */ | ||
761 | buf16 = (uint16_t *)(doc->oob_buf + 14); | ||
762 | *buf16 = readw(docptr + DOCG4_MYSTERY_REG); | ||
763 | |||
764 | write_nop(docptr); | ||
765 | |||
766 | if (likely(use_ecc == true)) { | ||
767 | |||
768 | /* read the register that tells us if bitflip(s) detected */ | ||
769 | edc_err = readw(docptr + DOC_ECCCONF1); | ||
770 | edc_err = readw(docptr + DOC_ECCCONF1); | ||
771 | dev_dbg(doc->dev, "%s: edc_err = 0x%02x\n", __func__, edc_err); | ||
772 | |||
773 | /* If bitflips are reported, attempt to correct with ecc */ | ||
774 | if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) { | ||
775 | int bits_corrected = correct_data(mtd, buf, page); | ||
776 | if (bits_corrected == -EBADMSG) | ||
777 | mtd->ecc_stats.failed++; | ||
778 | else | ||
779 | mtd->ecc_stats.corrected += bits_corrected; | ||
780 | } | ||
781 | } | ||
782 | |||
783 | writew(0, docptr + DOC_DATAEND); | ||
784 | return 0; | ||
785 | } | ||
786 | |||
787 | |||
788 | static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand, | ||
789 | uint8_t *buf, int page) | ||
790 | { | ||
791 | return read_page(mtd, nand, buf, page, false); | ||
792 | } | ||
793 | |||
794 | static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand, | ||
795 | uint8_t *buf, int page) | ||
796 | { | ||
797 | return read_page(mtd, nand, buf, page, true); | ||
798 | } | ||
799 | |||
800 | static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand, | ||
801 | int page, int sndcmd) | ||
802 | { | ||
803 | struct docg4_priv *doc = nand->priv; | ||
804 | void __iomem *docptr = doc->virtadr; | ||
805 | uint16_t status; | ||
806 | |||
807 | dev_dbg(doc->dev, "%s: page %x\n", __func__, page); | ||
808 | |||
809 | /* | ||
810 | * Oob bytes are read as part of a normal page read. If the previous | ||
811 | * nand command was a read of the page whose oob is now being read, just | ||
812 | * copy the oob bytes that we saved in a local buffer and avoid a | ||
813 | * separate oob read. | ||
814 | */ | ||
815 | if (doc->last_command.command == NAND_CMD_READ0 && | ||
816 | doc->last_command.page == page) { | ||
817 | memcpy(nand->oob_poi, doc->oob_buf, 16); | ||
818 | return 0; | ||
819 | } | ||
820 | |||
821 | /* | ||
822 | * Separate read of oob data only. | ||
823 | */ | ||
824 | docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page); | ||
825 | |||
826 | writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0); | ||
827 | write_nop(docptr); | ||
828 | write_nop(docptr); | ||
829 | write_nop(docptr); | ||
830 | write_nop(docptr); | ||
831 | write_nop(docptr); | ||
832 | |||
833 | /* the 1st byte from the I/O reg is a status; the rest is oob data */ | ||
834 | status = readw(docptr + DOC_IOSPACE_DATA); | ||
835 | if (status & DOCG4_READ_ERROR) { | ||
836 | dev_warn(doc->dev, | ||
837 | "docg4_read_oob failed: status = 0x%02x\n", status); | ||
838 | return -EIO; | ||
839 | } | ||
840 | |||
841 | dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status); | ||
842 | |||
843 | docg4_read_buf(mtd, nand->oob_poi, 16); | ||
844 | |||
845 | write_nop(docptr); | ||
846 | write_nop(docptr); | ||
847 | write_nop(docptr); | ||
848 | writew(0, docptr + DOC_DATAEND); | ||
849 | write_nop(docptr); | ||
850 | |||
851 | return 0; | ||
852 | } | ||
853 | |||
854 | static void docg4_erase_block(struct mtd_info *mtd, int page) | ||
855 | { | ||
856 | struct nand_chip *nand = mtd->priv; | ||
857 | struct docg4_priv *doc = nand->priv; | ||
858 | void __iomem *docptr = doc->virtadr; | ||
859 | uint16_t g4_page; | ||
860 | |||
861 | dev_dbg(doc->dev, "%s: page %04x\n", __func__, page); | ||
862 | |||
863 | sequence_reset(mtd); | ||
864 | |||
865 | writew(DOCG4_SEQ_BLOCKERASE, docptr + DOC_FLASHSEQUENCE); | ||
866 | writew(DOC_CMD_PROG_BLOCK_ADDR, docptr + DOC_FLASHCOMMAND); | ||
867 | write_nop(docptr); | ||
868 | |||
869 | /* only 2 bytes of address are written to specify erase block */ | ||
870 | g4_page = (uint16_t)(page / 4); /* to g4's 2k page addressing */ | ||
871 | writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS); | ||
872 | g4_page >>= 8; | ||
873 | writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS); | ||
874 | write_nop(docptr); | ||
875 | |||
876 | /* start the erasure */ | ||
877 | writew(DOC_CMD_ERASECYCLE2, docptr + DOC_FLASHCOMMAND); | ||
878 | write_nop(docptr); | ||
879 | write_nop(docptr); | ||
880 | |||
881 | usleep_range(500, 1000); /* erasure is long; take a snooze */ | ||
882 | poll_status(doc); | ||
883 | writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE); | ||
884 | writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND); | ||
885 | writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0); | ||
886 | write_nop(docptr); | ||
887 | write_nop(docptr); | ||
888 | write_nop(docptr); | ||
889 | write_nop(docptr); | ||
890 | write_nop(docptr); | ||
891 | |||
892 | read_progstatus(doc); | ||
893 | |||
894 | writew(0, docptr + DOC_DATAEND); | ||
895 | write_nop(docptr); | ||
896 | poll_status(doc); | ||
897 | write_nop(docptr); | ||
898 | } | ||
899 | |||
900 | static void write_page(struct mtd_info *mtd, struct nand_chip *nand, | ||
901 | const uint8_t *buf, bool use_ecc) | ||
902 | { | ||
903 | struct docg4_priv *doc = nand->priv; | ||
904 | void __iomem *docptr = doc->virtadr; | ||
905 | uint8_t ecc_buf[8]; | ||
906 | |||
907 | dev_dbg(doc->dev, "%s...\n", __func__); | ||
908 | |||
909 | writew(DOC_ECCCONF0_ECC_ENABLE | | ||
910 | DOC_ECCCONF0_UNKNOWN | | ||
911 | DOCG4_BCH_SIZE, | ||
912 | docptr + DOC_ECCCONF0); | ||
913 | write_nop(docptr); | ||
914 | |||
915 | /* write the page data */ | ||
916 | docg4_write_buf16(mtd, buf, DOCG4_PAGE_SIZE); | ||
917 | |||
918 | /* oob bytes 0 through 5 are written to I/O reg */ | ||
919 | docg4_write_buf16(mtd, nand->oob_poi, 6); | ||
920 | |||
921 | /* oob byte 6 written to a separate reg */ | ||
922 | writew(nand->oob_poi[6], docptr + DOCG4_OOB_6_7); | ||
923 | |||
924 | write_nop(docptr); | ||
925 | write_nop(docptr); | ||
926 | |||
927 | /* write hw-generated ecc bytes to oob */ | ||
928 | if (likely(use_ecc == true)) { | ||
929 | /* oob byte 7 is hamming code */ | ||
930 | uint8_t hamming = readb(docptr + DOC_HAMMINGPARITY); | ||
931 | hamming = readb(docptr + DOC_HAMMINGPARITY); /* 2nd read */ | ||
932 | writew(hamming, docptr + DOCG4_OOB_6_7); | ||
933 | write_nop(docptr); | ||
934 | |||
935 | /* read the 7 bch bytes from ecc regs */ | ||
936 | read_hw_ecc(docptr, ecc_buf); | ||
937 | ecc_buf[7] = 0; /* clear the "page written" flag */ | ||
938 | } | ||
939 | |||
940 | /* write user-supplied bytes to oob */ | ||
941 | else { | ||
942 | writew(nand->oob_poi[7], docptr + DOCG4_OOB_6_7); | ||
943 | write_nop(docptr); | ||
944 | memcpy(ecc_buf, &nand->oob_poi[8], 8); | ||
945 | } | ||
946 | |||
947 | docg4_write_buf16(mtd, ecc_buf, 8); | ||
948 | write_nop(docptr); | ||
949 | write_nop(docptr); | ||
950 | writew(0, docptr + DOC_DATAEND); | ||
951 | write_nop(docptr); | ||
952 | } | ||
953 | |||
954 | static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand, | ||
955 | const uint8_t *buf) | ||
956 | { | ||
957 | return write_page(mtd, nand, buf, false); | ||
958 | } | ||
959 | |||
960 | static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand, | ||
961 | const uint8_t *buf) | ||
962 | { | ||
963 | return write_page(mtd, nand, buf, true); | ||
964 | } | ||
965 | |||
966 | static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand, | ||
967 | int page) | ||
968 | { | ||
969 | /* | ||
970 | * Writing oob-only is not really supported, because MLC nand must write | ||
971 | * oob bytes at the same time as page data. Nonetheless, we save the | ||
972 | * oob buffer contents here, and then write it along with the page data | ||
973 | * if the same page is subsequently written. This allows user space | ||
974 | * utilities that write the oob data prior to the page data to work | ||
975 | * (e.g., nandwrite). The disdvantage is that, if the intention was to | ||
976 | * write oob only, the operation is quietly ignored. Also, oob can get | ||
977 | * corrupted if two concurrent processes are running nandwrite. | ||
978 | */ | ||
979 | |||
980 | /* note that bytes 7..14 are hw generated hamming/ecc and overwritten */ | ||
981 | struct docg4_priv *doc = nand->priv; | ||
982 | doc->oob_page = page; | ||
983 | memcpy(doc->oob_buf, nand->oob_poi, 16); | ||
984 | return 0; | ||
985 | } | ||
986 | |||
987 | static int __init read_factory_bbt(struct mtd_info *mtd) | ||
988 | { | ||
989 | /* | ||
990 | * The device contains a read-only factory bad block table. Read it and | ||
991 | * update the memory-based bbt accordingly. | ||
992 | */ | ||
993 | |||
994 | struct nand_chip *nand = mtd->priv; | ||
995 | struct docg4_priv *doc = nand->priv; | ||
996 | uint32_t g4_addr = mtd_to_docg4_address(DOCG4_FACTORY_BBT_PAGE, 0); | ||
997 | uint8_t *buf; | ||
998 | int i, block, status; | ||
999 | |||
1000 | buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL); | ||
1001 | if (buf == NULL) | ||
1002 | return -ENOMEM; | ||
1003 | |||
1004 | read_page_prologue(mtd, g4_addr); | ||
1005 | status = docg4_read_page(mtd, nand, buf, DOCG4_FACTORY_BBT_PAGE); | ||
1006 | if (status) | ||
1007 | goto exit; | ||
1008 | |||
1009 | /* | ||
1010 | * If no memory-based bbt was created, exit. This will happen if module | ||
1011 | * parameter ignore_badblocks is set. Then why even call this function? | ||
1012 | * For an unknown reason, block erase always fails if it's the first | ||
1013 | * operation after device power-up. The above read ensures it never is. | ||
1014 | * Ugly, I know. | ||
1015 | */ | ||
1016 | if (nand->bbt == NULL) /* no memory-based bbt */ | ||
1017 | goto exit; | ||
1018 | |||
1019 | /* | ||
1020 | * Parse factory bbt and update memory-based bbt. Factory bbt format is | ||
1021 | * simple: one bit per block, block numbers increase left to right (msb | ||
1022 | * to lsb). Bit clear means bad block. | ||
1023 | */ | ||
1024 | for (i = block = 0; block < DOCG4_NUMBLOCKS; block += 8, i++) { | ||
1025 | int bitnum; | ||
1026 | unsigned long bits = ~buf[i]; | ||
1027 | for_each_set_bit(bitnum, &bits, 8) { | ||
1028 | int badblock = block + 7 - bitnum; | ||
1029 | nand->bbt[badblock / 4] |= | ||
1030 | 0x03 << ((badblock % 4) * 2); | ||
1031 | mtd->ecc_stats.badblocks++; | ||
1032 | dev_notice(doc->dev, "factory-marked bad block: %d\n", | ||
1033 | badblock); | ||
1034 | } | ||
1035 | } | ||
1036 | exit: | ||
1037 | kfree(buf); | ||
1038 | return status; | ||
1039 | } | ||
1040 | |||
1041 | static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs) | ||
1042 | { | ||
1043 | /* | ||
1044 | * Mark a block as bad. Bad blocks are marked in the oob area of the | ||
1045 | * first page of the block. The default scan_bbt() in the nand | ||
1046 | * infrastructure code works fine for building the memory-based bbt | ||
1047 | * during initialization, as does the nand infrastructure function that | ||
1048 | * checks if a block is bad by reading the bbt. This function replaces | ||
1049 | * the nand default because writes to oob-only are not supported. | ||
1050 | */ | ||
1051 | |||
1052 | int ret, i; | ||
1053 | uint8_t *buf; | ||
1054 | struct nand_chip *nand = mtd->priv; | ||
1055 | struct docg4_priv *doc = nand->priv; | ||
1056 | struct nand_bbt_descr *bbtd = nand->badblock_pattern; | ||
1057 | int block = (int)(ofs >> nand->bbt_erase_shift); | ||
1058 | int page = (int)(ofs >> nand->page_shift); | ||
1059 | uint32_t g4_addr = mtd_to_docg4_address(page, 0); | ||
1060 | |||
1061 | dev_dbg(doc->dev, "%s: %08llx\n", __func__, ofs); | ||
1062 | |||
1063 | if (unlikely(ofs & (DOCG4_BLOCK_SIZE - 1))) | ||
1064 | dev_warn(doc->dev, "%s: ofs %llx not start of block!\n", | ||
1065 | __func__, ofs); | ||
1066 | |||
1067 | /* allocate blank buffer for page data */ | ||
1068 | buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL); | ||
1069 | if (buf == NULL) | ||
1070 | return -ENOMEM; | ||
1071 | |||
1072 | /* update bbt in memory */ | ||
1073 | nand->bbt[block / 4] |= 0x01 << ((block & 0x03) * 2); | ||
1074 | |||
1075 | /* write bit-wise negation of pattern to oob buffer */ | ||
1076 | memset(nand->oob_poi, 0xff, mtd->oobsize); | ||
1077 | for (i = 0; i < bbtd->len; i++) | ||
1078 | nand->oob_poi[bbtd->offs + i] = ~bbtd->pattern[i]; | ||
1079 | |||
1080 | /* write first page of block */ | ||
1081 | write_page_prologue(mtd, g4_addr); | ||
1082 | docg4_write_page(mtd, nand, buf); | ||
1083 | ret = pageprog(mtd); | ||
1084 | if (!ret) | ||
1085 | mtd->ecc_stats.badblocks++; | ||
1086 | |||
1087 | kfree(buf); | ||
1088 | |||
1089 | return ret; | ||
1090 | } | ||
1091 | |||
1092 | static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs, int getchip) | ||
1093 | { | ||
1094 | /* only called when module_param ignore_badblocks is set */ | ||
1095 | return 0; | ||
1096 | } | ||
1097 | |||
1098 | static int docg4_suspend(struct platform_device *pdev, pm_message_t state) | ||
1099 | { | ||
1100 | /* | ||
1101 | * Put the device into "deep power-down" mode. Note that CE# must be | ||
1102 | * deasserted for this to take effect. The xscale, e.g., can be | ||
1103 | * configured to float this signal when the processor enters power-down, | ||
1104 | * and a suitable pull-up ensures its deassertion. | ||
1105 | */ | ||
1106 | |||
1107 | int i; | ||
1108 | uint8_t pwr_down; | ||
1109 | struct docg4_priv *doc = platform_get_drvdata(pdev); | ||
1110 | void __iomem *docptr = doc->virtadr; | ||
1111 | |||
1112 | dev_dbg(doc->dev, "%s...\n", __func__); | ||
1113 | |||
1114 | /* poll the register that tells us we're ready to go to sleep */ | ||
1115 | for (i = 0; i < 10; i++) { | ||
1116 | pwr_down = readb(docptr + DOC_POWERMODE); | ||
1117 | if (pwr_down & DOC_POWERDOWN_READY) | ||
1118 | break; | ||
1119 | usleep_range(1000, 4000); | ||
1120 | } | ||
1121 | |||
1122 | if (pwr_down & DOC_POWERDOWN_READY) { | ||
1123 | dev_err(doc->dev, "suspend failed; " | ||
1124 | "timeout polling DOC_POWERDOWN_READY\n"); | ||
1125 | return -EIO; | ||
1126 | } | ||
1127 | |||
1128 | writew(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN, | ||
1129 | docptr + DOC_ASICMODE); | ||
1130 | writew(~(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN), | ||
1131 | docptr + DOC_ASICMODECONFIRM); | ||
1132 | |||
1133 | write_nop(docptr); | ||
1134 | |||
1135 | return 0; | ||
1136 | } | ||
1137 | |||
1138 | static int docg4_resume(struct platform_device *pdev) | ||
1139 | { | ||
1140 | |||
1141 | /* | ||
1142 | * Exit power-down. Twelve consecutive reads of the address below | ||
1143 | * accomplishes this, assuming CE# has been asserted. | ||
1144 | */ | ||
1145 | |||
1146 | struct docg4_priv *doc = platform_get_drvdata(pdev); | ||
1147 | void __iomem *docptr = doc->virtadr; | ||
1148 | int i; | ||
1149 | |||
1150 | dev_dbg(doc->dev, "%s...\n", __func__); | ||
1151 | |||
1152 | for (i = 0; i < 12; i++) | ||
1153 | readb(docptr + 0x1fff); | ||
1154 | |||
1155 | return 0; | ||
1156 | } | ||
1157 | |||
1158 | static void __init init_mtd_structs(struct mtd_info *mtd) | ||
1159 | { | ||
1160 | /* initialize mtd and nand data structures */ | ||
1161 | |||
1162 | /* | ||
1163 | * Note that some of the following initializations are not usually | ||
1164 | * required within a nand driver because they are performed by the nand | ||
1165 | * infrastructure code as part of nand_scan(). In this case they need | ||
1166 | * to be initialized here because we skip call to nand_scan_ident() (the | ||
1167 | * first half of nand_scan()). The call to nand_scan_ident() is skipped | ||
1168 | * because for this device the chip id is not read in the manner of a | ||
1169 | * standard nand device. Unfortunately, nand_scan_ident() does other | ||
1170 | * things as well, such as call nand_set_defaults(). | ||
1171 | */ | ||
1172 | |||
1173 | struct nand_chip *nand = mtd->priv; | ||
1174 | struct docg4_priv *doc = nand->priv; | ||
1175 | |||
1176 | mtd->size = DOCG4_CHIP_SIZE; | ||
1177 | mtd->name = "Msys_Diskonchip_G4"; | ||
1178 | mtd->writesize = DOCG4_PAGE_SIZE; | ||
1179 | mtd->erasesize = DOCG4_BLOCK_SIZE; | ||
1180 | mtd->oobsize = DOCG4_OOB_SIZE; | ||
1181 | nand->chipsize = DOCG4_CHIP_SIZE; | ||
1182 | nand->chip_shift = DOCG4_CHIP_SHIFT; | ||
1183 | nand->bbt_erase_shift = nand->phys_erase_shift = DOCG4_ERASE_SHIFT; | ||
1184 | nand->chip_delay = 20; | ||
1185 | nand->page_shift = DOCG4_PAGE_SHIFT; | ||
1186 | nand->pagemask = 0x3ffff; | ||
1187 | nand->badblockpos = NAND_LARGE_BADBLOCK_POS; | ||
1188 | nand->badblockbits = 8; | ||
1189 | nand->ecc.layout = &docg4_oobinfo; | ||
1190 | nand->ecc.mode = NAND_ECC_HW_SYNDROME; | ||
1191 | nand->ecc.size = DOCG4_PAGE_SIZE; | ||
1192 | nand->ecc.prepad = 8; | ||
1193 | nand->ecc.bytes = 8; | ||
1194 | nand->ecc.strength = DOCG4_T; | ||
1195 | nand->options = | ||
1196 | NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE | NAND_NO_AUTOINCR; | ||
1197 | nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA; | ||
1198 | nand->controller = &nand->hwcontrol; | ||
1199 | spin_lock_init(&nand->controller->lock); | ||
1200 | init_waitqueue_head(&nand->controller->wq); | ||
1201 | |||
1202 | /* methods */ | ||
1203 | nand->cmdfunc = docg4_command; | ||
1204 | nand->waitfunc = docg4_wait; | ||
1205 | nand->select_chip = docg4_select_chip; | ||
1206 | nand->read_byte = docg4_read_byte; | ||
1207 | nand->block_markbad = docg4_block_markbad; | ||
1208 | nand->read_buf = docg4_read_buf; | ||
1209 | nand->write_buf = docg4_write_buf16; | ||
1210 | nand->scan_bbt = nand_default_bbt; | ||
1211 | nand->erase_cmd = docg4_erase_block; | ||
1212 | nand->ecc.read_page = docg4_read_page; | ||
1213 | nand->ecc.write_page = docg4_write_page; | ||
1214 | nand->ecc.read_page_raw = docg4_read_page_raw; | ||
1215 | nand->ecc.write_page_raw = docg4_write_page_raw; | ||
1216 | nand->ecc.read_oob = docg4_read_oob; | ||
1217 | nand->ecc.write_oob = docg4_write_oob; | ||
1218 | |||
1219 | /* | ||
1220 | * The way the nand infrastructure code is written, a memory-based bbt | ||
1221 | * is not created if NAND_SKIP_BBTSCAN is set. With no memory bbt, | ||
1222 | * nand->block_bad() is used. So when ignoring bad blocks, we skip the | ||
1223 | * scan and define a dummy block_bad() which always returns 0. | ||
1224 | */ | ||
1225 | if (ignore_badblocks) { | ||
1226 | nand->options |= NAND_SKIP_BBTSCAN; | ||
1227 | nand->block_bad = docg4_block_neverbad; | ||
1228 | } | ||
1229 | |||
1230 | } | ||
1231 | |||
1232 | static int __init read_id_reg(struct mtd_info *mtd) | ||
1233 | { | ||
1234 | struct nand_chip *nand = mtd->priv; | ||
1235 | struct docg4_priv *doc = nand->priv; | ||
1236 | void __iomem *docptr = doc->virtadr; | ||
1237 | uint16_t id1, id2; | ||
1238 | |||
1239 | /* check for presence of g4 chip by reading id registers */ | ||
1240 | id1 = readw(docptr + DOC_CHIPID); | ||
1241 | id1 = readw(docptr + DOCG4_MYSTERY_REG); | ||
1242 | id2 = readw(docptr + DOC_CHIPID_INV); | ||
1243 | id2 = readw(docptr + DOCG4_MYSTERY_REG); | ||
1244 | |||
1245 | if (id1 == DOCG4_IDREG1_VALUE && id2 == DOCG4_IDREG2_VALUE) { | ||
1246 | dev_info(doc->dev, | ||
1247 | "NAND device: 128MiB Diskonchip G4 detected\n"); | ||
1248 | return 0; | ||
1249 | } | ||
1250 | |||
1251 | return -ENODEV; | ||
1252 | } | ||
1253 | |||
1254 | static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL }; | ||
1255 | |||
1256 | static int __init probe_docg4(struct platform_device *pdev) | ||
1257 | { | ||
1258 | struct mtd_info *mtd; | ||
1259 | struct nand_chip *nand; | ||
1260 | void __iomem *virtadr; | ||
1261 | struct docg4_priv *doc; | ||
1262 | int len, retval; | ||
1263 | struct resource *r; | ||
1264 | struct device *dev = &pdev->dev; | ||
1265 | |||
1266 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1267 | if (r == NULL) { | ||
1268 | dev_err(dev, "no io memory resource defined!\n"); | ||
1269 | return -ENODEV; | ||
1270 | } | ||
1271 | |||
1272 | virtadr = ioremap(r->start, resource_size(r)); | ||
1273 | if (!virtadr) { | ||
1274 | dev_err(dev, "Diskonchip ioremap failed: %pR\n", r); | ||
1275 | return -EIO; | ||
1276 | } | ||
1277 | |||
1278 | len = sizeof(struct mtd_info) + sizeof(struct nand_chip) + | ||
1279 | sizeof(struct docg4_priv); | ||
1280 | mtd = kzalloc(len, GFP_KERNEL); | ||
1281 | if (mtd == NULL) { | ||
1282 | retval = -ENOMEM; | ||
1283 | goto fail; | ||
1284 | } | ||
1285 | nand = (struct nand_chip *) (mtd + 1); | ||
1286 | doc = (struct docg4_priv *) (nand + 1); | ||
1287 | mtd->priv = nand; | ||
1288 | nand->priv = doc; | ||
1289 | mtd->owner = THIS_MODULE; | ||
1290 | doc->virtadr = virtadr; | ||
1291 | doc->dev = dev; | ||
1292 | |||
1293 | init_mtd_structs(mtd); | ||
1294 | |||
1295 | /* initialize kernel bch algorithm */ | ||
1296 | doc->bch = init_bch(DOCG4_M, DOCG4_T, DOCG4_PRIMITIVE_POLY); | ||
1297 | if (doc->bch == NULL) { | ||
1298 | retval = -EINVAL; | ||
1299 | goto fail; | ||
1300 | } | ||
1301 | |||
1302 | platform_set_drvdata(pdev, doc); | ||
1303 | |||
1304 | reset(mtd); | ||
1305 | retval = read_id_reg(mtd); | ||
1306 | if (retval == -ENODEV) { | ||
1307 | dev_warn(dev, "No diskonchip G4 device found.\n"); | ||
1308 | goto fail; | ||
1309 | } | ||
1310 | |||
1311 | retval = nand_scan_tail(mtd); | ||
1312 | if (retval) | ||
1313 | goto fail; | ||
1314 | |||
1315 | retval = read_factory_bbt(mtd); | ||
1316 | if (retval) | ||
1317 | goto fail; | ||
1318 | |||
1319 | retval = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0); | ||
1320 | if (retval) | ||
1321 | goto fail; | ||
1322 | |||
1323 | doc->mtd = mtd; | ||
1324 | return 0; | ||
1325 | |||
1326 | fail: | ||
1327 | iounmap(virtadr); | ||
1328 | if (mtd) { | ||
1329 | /* re-declarations avoid compiler warning */ | ||
1330 | struct nand_chip *nand = mtd->priv; | ||
1331 | struct docg4_priv *doc = nand->priv; | ||
1332 | nand_release(mtd); /* deletes partitions and mtd devices */ | ||
1333 | platform_set_drvdata(pdev, NULL); | ||
1334 | free_bch(doc->bch); | ||
1335 | kfree(mtd); | ||
1336 | } | ||
1337 | |||
1338 | return retval; | ||
1339 | } | ||
1340 | |||
1341 | static int __exit cleanup_docg4(struct platform_device *pdev) | ||
1342 | { | ||
1343 | struct docg4_priv *doc = platform_get_drvdata(pdev); | ||
1344 | nand_release(doc->mtd); | ||
1345 | platform_set_drvdata(pdev, NULL); | ||
1346 | free_bch(doc->bch); | ||
1347 | kfree(doc->mtd); | ||
1348 | iounmap(doc->virtadr); | ||
1349 | return 0; | ||
1350 | } | ||
1351 | |||
1352 | static struct platform_driver docg4_driver = { | ||
1353 | .driver = { | ||
1354 | .name = "docg4", | ||
1355 | .owner = THIS_MODULE, | ||
1356 | }, | ||
1357 | .suspend = docg4_suspend, | ||
1358 | .resume = docg4_resume, | ||
1359 | .remove = __exit_p(cleanup_docg4), | ||
1360 | }; | ||
1361 | |||
1362 | static int __init docg4_init(void) | ||
1363 | { | ||
1364 | return platform_driver_probe(&docg4_driver, probe_docg4); | ||
1365 | } | ||
1366 | |||
1367 | static void __exit docg4_exit(void) | ||
1368 | { | ||
1369 | platform_driver_unregister(&docg4_driver); | ||
1370 | } | ||
1371 | |||
1372 | module_init(docg4_init); | ||
1373 | module_exit(docg4_exit); | ||
1374 | |||
1375 | MODULE_LICENSE("GPL"); | ||
1376 | MODULE_AUTHOR("Mike Dunn"); | ||
1377 | MODULE_DESCRIPTION("M-Systems DiskOnChip G4 device driver"); | ||
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c index 7195ee6efe12..80b5264f0a32 100644 --- a/drivers/mtd/nand/fsl_elbc_nand.c +++ b/drivers/mtd/nand/fsl_elbc_nand.c | |||
@@ -813,6 +813,12 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) | |||
813 | &fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0; | 813 | &fsl_elbc_oob_sp_eccm1 : &fsl_elbc_oob_sp_eccm0; |
814 | chip->ecc.size = 512; | 814 | chip->ecc.size = 512; |
815 | chip->ecc.bytes = 3; | 815 | chip->ecc.bytes = 3; |
816 | chip->ecc.strength = 1; | ||
817 | /* | ||
818 | * FIXME: can hardware ecc correct 4 bitflips if page size is | ||
819 | * 2k? Then does hardware report number of corrections for this | ||
820 | * case? If so, ecc_stats reporting needs to be fixed as well. | ||
821 | */ | ||
816 | } else { | 822 | } else { |
817 | /* otherwise fall back to default software ECC */ | 823 | /* otherwise fall back to default software ECC */ |
818 | chip->ecc.mode = NAND_ECC_SOFT; | 824 | chip->ecc.mode = NAND_ECC_SOFT; |
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index e53b76064133..1b8330e1155a 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c | |||
@@ -17,6 +17,10 @@ | |||
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
20 | #include <linux/completion.h> | ||
21 | #include <linux/dmaengine.h> | ||
22 | #include <linux/dma-direction.h> | ||
23 | #include <linux/dma-mapping.h> | ||
20 | #include <linux/err.h> | 24 | #include <linux/err.h> |
21 | #include <linux/init.h> | 25 | #include <linux/init.h> |
22 | #include <linux/module.h> | 26 | #include <linux/module.h> |
@@ -27,6 +31,7 @@ | |||
27 | #include <linux/mtd/nand.h> | 31 | #include <linux/mtd/nand.h> |
28 | #include <linux/mtd/nand_ecc.h> | 32 | #include <linux/mtd/nand_ecc.h> |
29 | #include <linux/platform_device.h> | 33 | #include <linux/platform_device.h> |
34 | #include <linux/of.h> | ||
30 | #include <linux/mtd/partitions.h> | 35 | #include <linux/mtd/partitions.h> |
31 | #include <linux/io.h> | 36 | #include <linux/io.h> |
32 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
@@ -34,7 +39,7 @@ | |||
34 | #include <linux/amba/bus.h> | 39 | #include <linux/amba/bus.h> |
35 | #include <mtd/mtd-abi.h> | 40 | #include <mtd/mtd-abi.h> |
36 | 41 | ||
37 | static struct nand_ecclayout fsmc_ecc1_layout = { | 42 | static struct nand_ecclayout fsmc_ecc1_128_layout = { |
38 | .eccbytes = 24, | 43 | .eccbytes = 24, |
39 | .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52, | 44 | .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52, |
40 | 66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116}, | 45 | 66, 67, 68, 82, 83, 84, 98, 99, 100, 114, 115, 116}, |
@@ -50,7 +55,127 @@ static struct nand_ecclayout fsmc_ecc1_layout = { | |||
50 | } | 55 | } |
51 | }; | 56 | }; |
52 | 57 | ||
53 | static struct nand_ecclayout fsmc_ecc4_lp_layout = { | 58 | static struct nand_ecclayout fsmc_ecc1_64_layout = { |
59 | .eccbytes = 12, | ||
60 | .eccpos = {2, 3, 4, 18, 19, 20, 34, 35, 36, 50, 51, 52}, | ||
61 | .oobfree = { | ||
62 | {.offset = 8, .length = 8}, | ||
63 | {.offset = 24, .length = 8}, | ||
64 | {.offset = 40, .length = 8}, | ||
65 | {.offset = 56, .length = 8}, | ||
66 | } | ||
67 | }; | ||
68 | |||
69 | static struct nand_ecclayout fsmc_ecc1_16_layout = { | ||
70 | .eccbytes = 3, | ||
71 | .eccpos = {2, 3, 4}, | ||
72 | .oobfree = { | ||
73 | {.offset = 8, .length = 8}, | ||
74 | } | ||
75 | }; | ||
76 | |||
77 | /* | ||
78 | * ECC4 layout for NAND of pagesize 8192 bytes & OOBsize 256 bytes. 13*16 bytes | ||
79 | * of OB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 46 | ||
80 | * bytes are free for use. | ||
81 | */ | ||
82 | static struct nand_ecclayout fsmc_ecc4_256_layout = { | ||
83 | .eccbytes = 208, | ||
84 | .eccpos = { 2, 3, 4, 5, 6, 7, 8, | ||
85 | 9, 10, 11, 12, 13, 14, | ||
86 | 18, 19, 20, 21, 22, 23, 24, | ||
87 | 25, 26, 27, 28, 29, 30, | ||
88 | 34, 35, 36, 37, 38, 39, 40, | ||
89 | 41, 42, 43, 44, 45, 46, | ||
90 | 50, 51, 52, 53, 54, 55, 56, | ||
91 | 57, 58, 59, 60, 61, 62, | ||
92 | 66, 67, 68, 69, 70, 71, 72, | ||
93 | 73, 74, 75, 76, 77, 78, | ||
94 | 82, 83, 84, 85, 86, 87, 88, | ||
95 | 89, 90, 91, 92, 93, 94, | ||
96 | 98, 99, 100, 101, 102, 103, 104, | ||
97 | 105, 106, 107, 108, 109, 110, | ||
98 | 114, 115, 116, 117, 118, 119, 120, | ||
99 | 121, 122, 123, 124, 125, 126, | ||
100 | 130, 131, 132, 133, 134, 135, 136, | ||
101 | 137, 138, 139, 140, 141, 142, | ||
102 | 146, 147, 148, 149, 150, 151, 152, | ||
103 | 153, 154, 155, 156, 157, 158, | ||
104 | 162, 163, 164, 165, 166, 167, 168, | ||
105 | 169, 170, 171, 172, 173, 174, | ||
106 | 178, 179, 180, 181, 182, 183, 184, | ||
107 | 185, 186, 187, 188, 189, 190, | ||
108 | 194, 195, 196, 197, 198, 199, 200, | ||
109 | 201, 202, 203, 204, 205, 206, | ||
110 | 210, 211, 212, 213, 214, 215, 216, | ||
111 | 217, 218, 219, 220, 221, 222, | ||
112 | 226, 227, 228, 229, 230, 231, 232, | ||
113 | 233, 234, 235, 236, 237, 238, | ||
114 | 242, 243, 244, 245, 246, 247, 248, | ||
115 | 249, 250, 251, 252, 253, 254 | ||
116 | }, | ||
117 | .oobfree = { | ||
118 | {.offset = 15, .length = 3}, | ||
119 | {.offset = 31, .length = 3}, | ||
120 | {.offset = 47, .length = 3}, | ||
121 | {.offset = 63, .length = 3}, | ||
122 | {.offset = 79, .length = 3}, | ||
123 | {.offset = 95, .length = 3}, | ||
124 | {.offset = 111, .length = 3}, | ||
125 | {.offset = 127, .length = 3}, | ||
126 | {.offset = 143, .length = 3}, | ||
127 | {.offset = 159, .length = 3}, | ||
128 | {.offset = 175, .length = 3}, | ||
129 | {.offset = 191, .length = 3}, | ||
130 | {.offset = 207, .length = 3}, | ||
131 | {.offset = 223, .length = 3}, | ||
132 | {.offset = 239, .length = 3}, | ||
133 | {.offset = 255, .length = 1} | ||
134 | } | ||
135 | }; | ||
136 | |||
137 | /* | ||
138 | * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 224 bytes. 13*8 bytes | ||
139 | * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 118 | ||
140 | * bytes are free for use. | ||
141 | */ | ||
142 | static struct nand_ecclayout fsmc_ecc4_224_layout = { | ||
143 | .eccbytes = 104, | ||
144 | .eccpos = { 2, 3, 4, 5, 6, 7, 8, | ||
145 | 9, 10, 11, 12, 13, 14, | ||
146 | 18, 19, 20, 21, 22, 23, 24, | ||
147 | 25, 26, 27, 28, 29, 30, | ||
148 | 34, 35, 36, 37, 38, 39, 40, | ||
149 | 41, 42, 43, 44, 45, 46, | ||
150 | 50, 51, 52, 53, 54, 55, 56, | ||
151 | 57, 58, 59, 60, 61, 62, | ||
152 | 66, 67, 68, 69, 70, 71, 72, | ||
153 | 73, 74, 75, 76, 77, 78, | ||
154 | 82, 83, 84, 85, 86, 87, 88, | ||
155 | 89, 90, 91, 92, 93, 94, | ||
156 | 98, 99, 100, 101, 102, 103, 104, | ||
157 | 105, 106, 107, 108, 109, 110, | ||
158 | 114, 115, 116, 117, 118, 119, 120, | ||
159 | 121, 122, 123, 124, 125, 126 | ||
160 | }, | ||
161 | .oobfree = { | ||
162 | {.offset = 15, .length = 3}, | ||
163 | {.offset = 31, .length = 3}, | ||
164 | {.offset = 47, .length = 3}, | ||
165 | {.offset = 63, .length = 3}, | ||
166 | {.offset = 79, .length = 3}, | ||
167 | {.offset = 95, .length = 3}, | ||
168 | {.offset = 111, .length = 3}, | ||
169 | {.offset = 127, .length = 97} | ||
170 | } | ||
171 | }; | ||
172 | |||
173 | /* | ||
174 | * ECC4 layout for NAND of pagesize 4096 bytes & OOBsize 128 bytes. 13*8 bytes | ||
175 | * of OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block & 22 | ||
176 | * bytes are free for use. | ||
177 | */ | ||
178 | static struct nand_ecclayout fsmc_ecc4_128_layout = { | ||
54 | .eccbytes = 104, | 179 | .eccbytes = 104, |
55 | .eccpos = { 2, 3, 4, 5, 6, 7, 8, | 180 | .eccpos = { 2, 3, 4, 5, 6, 7, 8, |
56 | 9, 10, 11, 12, 13, 14, | 181 | 9, 10, 11, 12, 13, 14, |
@@ -82,6 +207,45 @@ static struct nand_ecclayout fsmc_ecc4_lp_layout = { | |||
82 | }; | 207 | }; |
83 | 208 | ||
84 | /* | 209 | /* |
210 | * ECC4 layout for NAND of pagesize 2048 bytes & OOBsize 64 bytes. 13*4 bytes of | ||
211 | * OOB size is reserved for ECC, Byte no. 0 & 1 reserved for bad block and 10 | ||
212 | * bytes are free for use. | ||
213 | */ | ||
214 | static struct nand_ecclayout fsmc_ecc4_64_layout = { | ||
215 | .eccbytes = 52, | ||
216 | .eccpos = { 2, 3, 4, 5, 6, 7, 8, | ||
217 | 9, 10, 11, 12, 13, 14, | ||
218 | 18, 19, 20, 21, 22, 23, 24, | ||
219 | 25, 26, 27, 28, 29, 30, | ||
220 | 34, 35, 36, 37, 38, 39, 40, | ||
221 | 41, 42, 43, 44, 45, 46, | ||
222 | 50, 51, 52, 53, 54, 55, 56, | ||
223 | 57, 58, 59, 60, 61, 62, | ||
224 | }, | ||
225 | .oobfree = { | ||
226 | {.offset = 15, .length = 3}, | ||
227 | {.offset = 31, .length = 3}, | ||
228 | {.offset = 47, .length = 3}, | ||
229 | {.offset = 63, .length = 1}, | ||
230 | } | ||
231 | }; | ||
232 | |||
233 | /* | ||
234 | * ECC4 layout for NAND of pagesize 512 bytes & OOBsize 16 bytes. 13 bytes of | ||
235 | * OOB size is reserved for ECC, Byte no. 4 & 5 reserved for bad block and One | ||
236 | * byte is free for use. | ||
237 | */ | ||
238 | static struct nand_ecclayout fsmc_ecc4_16_layout = { | ||
239 | .eccbytes = 13, | ||
240 | .eccpos = { 0, 1, 2, 3, 6, 7, 8, | ||
241 | 9, 10, 11, 12, 13, 14 | ||
242 | }, | ||
243 | .oobfree = { | ||
244 | {.offset = 15, .length = 1}, | ||
245 | } | ||
246 | }; | ||
247 | |||
248 | /* | ||
85 | * ECC placement definitions in oobfree type format. | 249 | * ECC placement definitions in oobfree type format. |
86 | * There are 13 bytes of ecc for every 512 byte block and it has to be read | 250 | * There are 13 bytes of ecc for every 512 byte block and it has to be read |
87 | * consecutively and immediately after the 512 byte data block for hardware to | 251 | * consecutively and immediately after the 512 byte data block for hardware to |
@@ -103,16 +267,6 @@ static struct fsmc_eccplace fsmc_ecc4_lp_place = { | |||
103 | } | 267 | } |
104 | }; | 268 | }; |
105 | 269 | ||
106 | static struct nand_ecclayout fsmc_ecc4_sp_layout = { | ||
107 | .eccbytes = 13, | ||
108 | .eccpos = { 0, 1, 2, 3, 6, 7, 8, | ||
109 | 9, 10, 11, 12, 13, 14 | ||
110 | }, | ||
111 | .oobfree = { | ||
112 | {.offset = 15, .length = 1}, | ||
113 | } | ||
114 | }; | ||
115 | |||
116 | static struct fsmc_eccplace fsmc_ecc4_sp_place = { | 270 | static struct fsmc_eccplace fsmc_ecc4_sp_place = { |
117 | .eccplace = { | 271 | .eccplace = { |
118 | {.offset = 0, .length = 4}, | 272 | {.offset = 0, .length = 4}, |
@@ -120,75 +274,24 @@ static struct fsmc_eccplace fsmc_ecc4_sp_place = { | |||
120 | } | 274 | } |
121 | }; | 275 | }; |
122 | 276 | ||
123 | /* | ||
124 | * Default partition tables to be used if the partition information not | ||
125 | * provided through platform data. | ||
126 | * | ||
127 | * Default partition layout for small page(= 512 bytes) devices | ||
128 | * Size for "Root file system" is updated in driver based on actual device size | ||
129 | */ | ||
130 | static struct mtd_partition partition_info_16KB_blk[] = { | ||
131 | { | ||
132 | .name = "X-loader", | ||
133 | .offset = 0, | ||
134 | .size = 4*0x4000, | ||
135 | }, | ||
136 | { | ||
137 | .name = "U-Boot", | ||
138 | .offset = 0x10000, | ||
139 | .size = 20*0x4000, | ||
140 | }, | ||
141 | { | ||
142 | .name = "Kernel", | ||
143 | .offset = 0x60000, | ||
144 | .size = 256*0x4000, | ||
145 | }, | ||
146 | { | ||
147 | .name = "Root File System", | ||
148 | .offset = 0x460000, | ||
149 | .size = MTDPART_SIZ_FULL, | ||
150 | }, | ||
151 | }; | ||
152 | |||
153 | /* | ||
154 | * Default partition layout for large page(> 512 bytes) devices | ||
155 | * Size for "Root file system" is updated in driver based on actual device size | ||
156 | */ | ||
157 | static struct mtd_partition partition_info_128KB_blk[] = { | ||
158 | { | ||
159 | .name = "X-loader", | ||
160 | .offset = 0, | ||
161 | .size = 4*0x20000, | ||
162 | }, | ||
163 | { | ||
164 | .name = "U-Boot", | ||
165 | .offset = 0x80000, | ||
166 | .size = 12*0x20000, | ||
167 | }, | ||
168 | { | ||
169 | .name = "Kernel", | ||
170 | .offset = 0x200000, | ||
171 | .size = 48*0x20000, | ||
172 | }, | ||
173 | { | ||
174 | .name = "Root File System", | ||
175 | .offset = 0x800000, | ||
176 | .size = MTDPART_SIZ_FULL, | ||
177 | }, | ||
178 | }; | ||
179 | |||
180 | |||
181 | /** | 277 | /** |
182 | * struct fsmc_nand_data - structure for FSMC NAND device state | 278 | * struct fsmc_nand_data - structure for FSMC NAND device state |
183 | * | 279 | * |
184 | * @pid: Part ID on the AMBA PrimeCell format | 280 | * @pid: Part ID on the AMBA PrimeCell format |
185 | * @mtd: MTD info for a NAND flash. | 281 | * @mtd: MTD info for a NAND flash. |
186 | * @nand: Chip related info for a NAND flash. | 282 | * @nand: Chip related info for a NAND flash. |
283 | * @partitions: Partition info for a NAND Flash. | ||
284 | * @nr_partitions: Total number of partition of a NAND flash. | ||
187 | * | 285 | * |
188 | * @ecc_place: ECC placing locations in oobfree type format. | 286 | * @ecc_place: ECC placing locations in oobfree type format. |
189 | * @bank: Bank number for probed device. | 287 | * @bank: Bank number for probed device. |
190 | * @clk: Clock structure for FSMC. | 288 | * @clk: Clock structure for FSMC. |
191 | * | 289 | * |
290 | * @read_dma_chan: DMA channel for read access | ||
291 | * @write_dma_chan: DMA channel for write access to NAND | ||
292 | * @dma_access_complete: Completion structure | ||
293 | * | ||
294 | * @data_pa: NAND Physical port for Data. | ||
192 | * @data_va: NAND port for Data. | 295 | * @data_va: NAND port for Data. |
193 | * @cmd_va: NAND port for Command. | 296 | * @cmd_va: NAND port for Command. |
194 | * @addr_va: NAND port for Address. | 297 | * @addr_va: NAND port for Address. |
@@ -198,16 +301,23 @@ struct fsmc_nand_data { | |||
198 | u32 pid; | 301 | u32 pid; |
199 | struct mtd_info mtd; | 302 | struct mtd_info mtd; |
200 | struct nand_chip nand; | 303 | struct nand_chip nand; |
304 | struct mtd_partition *partitions; | ||
305 | unsigned int nr_partitions; | ||
201 | 306 | ||
202 | struct fsmc_eccplace *ecc_place; | 307 | struct fsmc_eccplace *ecc_place; |
203 | unsigned int bank; | 308 | unsigned int bank; |
309 | struct device *dev; | ||
310 | enum access_mode mode; | ||
204 | struct clk *clk; | 311 | struct clk *clk; |
205 | 312 | ||
206 | struct resource *resregs; | 313 | /* DMA related objects */ |
207 | struct resource *rescmd; | 314 | struct dma_chan *read_dma_chan; |
208 | struct resource *resaddr; | 315 | struct dma_chan *write_dma_chan; |
209 | struct resource *resdata; | 316 | struct completion dma_access_complete; |
317 | |||
318 | struct fsmc_nand_timings *dev_timings; | ||
210 | 319 | ||
320 | dma_addr_t data_pa; | ||
211 | void __iomem *data_va; | 321 | void __iomem *data_va; |
212 | void __iomem *cmd_va; | 322 | void __iomem *cmd_va; |
213 | void __iomem *addr_va; | 323 | void __iomem *addr_va; |
@@ -251,28 +361,29 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) | |||
251 | struct nand_chip *this = mtd->priv; | 361 | struct nand_chip *this = mtd->priv; |
252 | struct fsmc_nand_data *host = container_of(mtd, | 362 | struct fsmc_nand_data *host = container_of(mtd, |
253 | struct fsmc_nand_data, mtd); | 363 | struct fsmc_nand_data, mtd); |
254 | struct fsmc_regs *regs = host->regs_va; | 364 | void *__iomem *regs = host->regs_va; |
255 | unsigned int bank = host->bank; | 365 | unsigned int bank = host->bank; |
256 | 366 | ||
257 | if (ctrl & NAND_CTRL_CHANGE) { | 367 | if (ctrl & NAND_CTRL_CHANGE) { |
368 | u32 pc; | ||
369 | |||
258 | if (ctrl & NAND_CLE) { | 370 | if (ctrl & NAND_CLE) { |
259 | this->IO_ADDR_R = (void __iomem *)host->cmd_va; | 371 | this->IO_ADDR_R = host->cmd_va; |
260 | this->IO_ADDR_W = (void __iomem *)host->cmd_va; | 372 | this->IO_ADDR_W = host->cmd_va; |
261 | } else if (ctrl & NAND_ALE) { | 373 | } else if (ctrl & NAND_ALE) { |
262 | this->IO_ADDR_R = (void __iomem *)host->addr_va; | 374 | this->IO_ADDR_R = host->addr_va; |
263 | this->IO_ADDR_W = (void __iomem *)host->addr_va; | 375 | this->IO_ADDR_W = host->addr_va; |
264 | } else { | 376 | } else { |
265 | this->IO_ADDR_R = (void __iomem *)host->data_va; | 377 | this->IO_ADDR_R = host->data_va; |
266 | this->IO_ADDR_W = (void __iomem *)host->data_va; | 378 | this->IO_ADDR_W = host->data_va; |
267 | } | 379 | } |
268 | 380 | ||
269 | if (ctrl & NAND_NCE) { | 381 | pc = readl(FSMC_NAND_REG(regs, bank, PC)); |
270 | writel(readl(®s->bank_regs[bank].pc) | FSMC_ENABLE, | 382 | if (ctrl & NAND_NCE) |
271 | ®s->bank_regs[bank].pc); | 383 | pc |= FSMC_ENABLE; |
272 | } else { | 384 | else |
273 | writel(readl(®s->bank_regs[bank].pc) & ~FSMC_ENABLE, | 385 | pc &= ~FSMC_ENABLE; |
274 | ®s->bank_regs[bank].pc); | 386 | writel(pc, FSMC_NAND_REG(regs, bank, PC)); |
275 | } | ||
276 | } | 387 | } |
277 | 388 | ||
278 | mb(); | 389 | mb(); |
@@ -287,22 +398,42 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) | |||
287 | * This routine initializes timing parameters related to NAND memory access in | 398 | * This routine initializes timing parameters related to NAND memory access in |
288 | * FSMC registers | 399 | * FSMC registers |
289 | */ | 400 | */ |
290 | static void __init fsmc_nand_setup(struct fsmc_regs *regs, uint32_t bank, | 401 | static void fsmc_nand_setup(void __iomem *regs, uint32_t bank, |
291 | uint32_t busw) | 402 | uint32_t busw, struct fsmc_nand_timings *timings) |
292 | { | 403 | { |
293 | uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON; | 404 | uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON; |
405 | uint32_t tclr, tar, thiz, thold, twait, tset; | ||
406 | struct fsmc_nand_timings *tims; | ||
407 | struct fsmc_nand_timings default_timings = { | ||
408 | .tclr = FSMC_TCLR_1, | ||
409 | .tar = FSMC_TAR_1, | ||
410 | .thiz = FSMC_THIZ_1, | ||
411 | .thold = FSMC_THOLD_4, | ||
412 | .twait = FSMC_TWAIT_6, | ||
413 | .tset = FSMC_TSET_0, | ||
414 | }; | ||
415 | |||
416 | if (timings) | ||
417 | tims = timings; | ||
418 | else | ||
419 | tims = &default_timings; | ||
420 | |||
421 | tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT; | ||
422 | tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT; | ||
423 | thiz = (tims->thiz & FSMC_THIZ_MASK) << FSMC_THIZ_SHIFT; | ||
424 | thold = (tims->thold & FSMC_THOLD_MASK) << FSMC_THOLD_SHIFT; | ||
425 | twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT; | ||
426 | tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT; | ||
294 | 427 | ||
295 | if (busw) | 428 | if (busw) |
296 | writel(value | FSMC_DEVWID_16, ®s->bank_regs[bank].pc); | 429 | writel(value | FSMC_DEVWID_16, FSMC_NAND_REG(regs, bank, PC)); |
297 | else | 430 | else |
298 | writel(value | FSMC_DEVWID_8, ®s->bank_regs[bank].pc); | 431 | writel(value | FSMC_DEVWID_8, FSMC_NAND_REG(regs, bank, PC)); |
299 | 432 | ||
300 | writel(readl(®s->bank_regs[bank].pc) | FSMC_TCLR_1 | FSMC_TAR_1, | 433 | writel(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar, |
301 | ®s->bank_regs[bank].pc); | 434 | FSMC_NAND_REG(regs, bank, PC)); |
302 | writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0, | 435 | writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, COMM)); |
303 | ®s->bank_regs[bank].comm); | 436 | writel(thiz | thold | twait | tset, FSMC_NAND_REG(regs, bank, ATTRIB)); |
304 | writel(FSMC_THIZ_1 | FSMC_THOLD_4 | FSMC_TWAIT_6 | FSMC_TSET_0, | ||
305 | ®s->bank_regs[bank].attrib); | ||
306 | } | 437 | } |
307 | 438 | ||
308 | /* | 439 | /* |
@@ -312,15 +443,15 @@ static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode) | |||
312 | { | 443 | { |
313 | struct fsmc_nand_data *host = container_of(mtd, | 444 | struct fsmc_nand_data *host = container_of(mtd, |
314 | struct fsmc_nand_data, mtd); | 445 | struct fsmc_nand_data, mtd); |
315 | struct fsmc_regs *regs = host->regs_va; | 446 | void __iomem *regs = host->regs_va; |
316 | uint32_t bank = host->bank; | 447 | uint32_t bank = host->bank; |
317 | 448 | ||
318 | writel(readl(®s->bank_regs[bank].pc) & ~FSMC_ECCPLEN_256, | 449 | writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256, |
319 | ®s->bank_regs[bank].pc); | 450 | FSMC_NAND_REG(regs, bank, PC)); |
320 | writel(readl(®s->bank_regs[bank].pc) & ~FSMC_ECCEN, | 451 | writel(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN, |
321 | ®s->bank_regs[bank].pc); | 452 | FSMC_NAND_REG(regs, bank, PC)); |
322 | writel(readl(®s->bank_regs[bank].pc) | FSMC_ECCEN, | 453 | writel(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN, |
323 | ®s->bank_regs[bank].pc); | 454 | FSMC_NAND_REG(regs, bank, PC)); |
324 | } | 455 | } |
325 | 456 | ||
326 | /* | 457 | /* |
@@ -333,37 +464,42 @@ static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data, | |||
333 | { | 464 | { |
334 | struct fsmc_nand_data *host = container_of(mtd, | 465 | struct fsmc_nand_data *host = container_of(mtd, |
335 | struct fsmc_nand_data, mtd); | 466 | struct fsmc_nand_data, mtd); |
336 | struct fsmc_regs *regs = host->regs_va; | 467 | void __iomem *regs = host->regs_va; |
337 | uint32_t bank = host->bank; | 468 | uint32_t bank = host->bank; |
338 | uint32_t ecc_tmp; | 469 | uint32_t ecc_tmp; |
339 | unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT; | 470 | unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT; |
340 | 471 | ||
341 | do { | 472 | do { |
342 | if (readl(®s->bank_regs[bank].sts) & FSMC_CODE_RDY) | 473 | if (readl(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY) |
343 | break; | 474 | break; |
344 | else | 475 | else |
345 | cond_resched(); | 476 | cond_resched(); |
346 | } while (!time_after_eq(jiffies, deadline)); | 477 | } while (!time_after_eq(jiffies, deadline)); |
347 | 478 | ||
348 | ecc_tmp = readl(®s->bank_regs[bank].ecc1); | 479 | if (time_after_eq(jiffies, deadline)) { |
480 | dev_err(host->dev, "calculate ecc timed out\n"); | ||
481 | return -ETIMEDOUT; | ||
482 | } | ||
483 | |||
484 | ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1)); | ||
349 | ecc[0] = (uint8_t) (ecc_tmp >> 0); | 485 | ecc[0] = (uint8_t) (ecc_tmp >> 0); |
350 | ecc[1] = (uint8_t) (ecc_tmp >> 8); | 486 | ecc[1] = (uint8_t) (ecc_tmp >> 8); |
351 | ecc[2] = (uint8_t) (ecc_tmp >> 16); | 487 | ecc[2] = (uint8_t) (ecc_tmp >> 16); |
352 | ecc[3] = (uint8_t) (ecc_tmp >> 24); | 488 | ecc[3] = (uint8_t) (ecc_tmp >> 24); |
353 | 489 | ||
354 | ecc_tmp = readl(®s->bank_regs[bank].ecc2); | 490 | ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC2)); |
355 | ecc[4] = (uint8_t) (ecc_tmp >> 0); | 491 | ecc[4] = (uint8_t) (ecc_tmp >> 0); |
356 | ecc[5] = (uint8_t) (ecc_tmp >> 8); | 492 | ecc[5] = (uint8_t) (ecc_tmp >> 8); |
357 | ecc[6] = (uint8_t) (ecc_tmp >> 16); | 493 | ecc[6] = (uint8_t) (ecc_tmp >> 16); |
358 | ecc[7] = (uint8_t) (ecc_tmp >> 24); | 494 | ecc[7] = (uint8_t) (ecc_tmp >> 24); |
359 | 495 | ||
360 | ecc_tmp = readl(®s->bank_regs[bank].ecc3); | 496 | ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC3)); |
361 | ecc[8] = (uint8_t) (ecc_tmp >> 0); | 497 | ecc[8] = (uint8_t) (ecc_tmp >> 0); |
362 | ecc[9] = (uint8_t) (ecc_tmp >> 8); | 498 | ecc[9] = (uint8_t) (ecc_tmp >> 8); |
363 | ecc[10] = (uint8_t) (ecc_tmp >> 16); | 499 | ecc[10] = (uint8_t) (ecc_tmp >> 16); |
364 | ecc[11] = (uint8_t) (ecc_tmp >> 24); | 500 | ecc[11] = (uint8_t) (ecc_tmp >> 24); |
365 | 501 | ||
366 | ecc_tmp = readl(®s->bank_regs[bank].sts); | 502 | ecc_tmp = readl(FSMC_NAND_REG(regs, bank, STS)); |
367 | ecc[12] = (uint8_t) (ecc_tmp >> 16); | 503 | ecc[12] = (uint8_t) (ecc_tmp >> 16); |
368 | 504 | ||
369 | return 0; | 505 | return 0; |
@@ -379,11 +515,11 @@ static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data, | |||
379 | { | 515 | { |
380 | struct fsmc_nand_data *host = container_of(mtd, | 516 | struct fsmc_nand_data *host = container_of(mtd, |
381 | struct fsmc_nand_data, mtd); | 517 | struct fsmc_nand_data, mtd); |
382 | struct fsmc_regs *regs = host->regs_va; | 518 | void __iomem *regs = host->regs_va; |
383 | uint32_t bank = host->bank; | 519 | uint32_t bank = host->bank; |
384 | uint32_t ecc_tmp; | 520 | uint32_t ecc_tmp; |
385 | 521 | ||
386 | ecc_tmp = readl(®s->bank_regs[bank].ecc1); | 522 | ecc_tmp = readl(FSMC_NAND_REG(regs, bank, ECC1)); |
387 | ecc[0] = (uint8_t) (ecc_tmp >> 0); | 523 | ecc[0] = (uint8_t) (ecc_tmp >> 0); |
388 | ecc[1] = (uint8_t) (ecc_tmp >> 8); | 524 | ecc[1] = (uint8_t) (ecc_tmp >> 8); |
389 | ecc[2] = (uint8_t) (ecc_tmp >> 16); | 525 | ecc[2] = (uint8_t) (ecc_tmp >> 16); |
@@ -391,6 +527,166 @@ static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data, | |||
391 | return 0; | 527 | return 0; |
392 | } | 528 | } |
393 | 529 | ||
530 | /* Count the number of 0's in buff upto a max of max_bits */ | ||
531 | static int count_written_bits(uint8_t *buff, int size, int max_bits) | ||
532 | { | ||
533 | int k, written_bits = 0; | ||
534 | |||
535 | for (k = 0; k < size; k++) { | ||
536 | written_bits += hweight8(~buff[k]); | ||
537 | if (written_bits > max_bits) | ||
538 | break; | ||
539 | } | ||
540 | |||
541 | return written_bits; | ||
542 | } | ||
543 | |||
544 | static void dma_complete(void *param) | ||
545 | { | ||
546 | struct fsmc_nand_data *host = param; | ||
547 | |||
548 | complete(&host->dma_access_complete); | ||
549 | } | ||
550 | |||
551 | static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len, | ||
552 | enum dma_data_direction direction) | ||
553 | { | ||
554 | struct dma_chan *chan; | ||
555 | struct dma_device *dma_dev; | ||
556 | struct dma_async_tx_descriptor *tx; | ||
557 | dma_addr_t dma_dst, dma_src, dma_addr; | ||
558 | dma_cookie_t cookie; | ||
559 | unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; | ||
560 | int ret; | ||
561 | |||
562 | if (direction == DMA_TO_DEVICE) | ||
563 | chan = host->write_dma_chan; | ||
564 | else if (direction == DMA_FROM_DEVICE) | ||
565 | chan = host->read_dma_chan; | ||
566 | else | ||
567 | return -EINVAL; | ||
568 | |||
569 | dma_dev = chan->device; | ||
570 | dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction); | ||
571 | |||
572 | if (direction == DMA_TO_DEVICE) { | ||
573 | dma_src = dma_addr; | ||
574 | dma_dst = host->data_pa; | ||
575 | flags |= DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_SKIP_DEST_UNMAP; | ||
576 | } else { | ||
577 | dma_src = host->data_pa; | ||
578 | dma_dst = dma_addr; | ||
579 | flags |= DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SKIP_SRC_UNMAP; | ||
580 | } | ||
581 | |||
582 | tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src, | ||
583 | len, flags); | ||
584 | |||
585 | if (!tx) { | ||
586 | dev_err(host->dev, "device_prep_dma_memcpy error\n"); | ||
587 | dma_unmap_single(dma_dev->dev, dma_addr, len, direction); | ||
588 | return -EIO; | ||
589 | } | ||
590 | |||
591 | tx->callback = dma_complete; | ||
592 | tx->callback_param = host; | ||
593 | cookie = tx->tx_submit(tx); | ||
594 | |||
595 | ret = dma_submit_error(cookie); | ||
596 | if (ret) { | ||
597 | dev_err(host->dev, "dma_submit_error %d\n", cookie); | ||
598 | return ret; | ||
599 | } | ||
600 | |||
601 | dma_async_issue_pending(chan); | ||
602 | |||
603 | ret = | ||
604 | wait_for_completion_interruptible_timeout(&host->dma_access_complete, | ||
605 | msecs_to_jiffies(3000)); | ||
606 | if (ret <= 0) { | ||
607 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); | ||
608 | dev_err(host->dev, "wait_for_completion_timeout\n"); | ||
609 | return ret ? ret : -ETIMEDOUT; | ||
610 | } | ||
611 | |||
612 | return 0; | ||
613 | } | ||
614 | |||
615 | /* | ||
616 | * fsmc_write_buf - write buffer to chip | ||
617 | * @mtd: MTD device structure | ||
618 | * @buf: data buffer | ||
619 | * @len: number of bytes to write | ||
620 | */ | ||
621 | static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) | ||
622 | { | ||
623 | int i; | ||
624 | struct nand_chip *chip = mtd->priv; | ||
625 | |||
626 | if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) && | ||
627 | IS_ALIGNED(len, sizeof(uint32_t))) { | ||
628 | uint32_t *p = (uint32_t *)buf; | ||
629 | len = len >> 2; | ||
630 | for (i = 0; i < len; i++) | ||
631 | writel(p[i], chip->IO_ADDR_W); | ||
632 | } else { | ||
633 | for (i = 0; i < len; i++) | ||
634 | writeb(buf[i], chip->IO_ADDR_W); | ||
635 | } | ||
636 | } | ||
637 | |||
638 | /* | ||
639 | * fsmc_read_buf - read chip data into buffer | ||
640 | * @mtd: MTD device structure | ||
641 | * @buf: buffer to store date | ||
642 | * @len: number of bytes to read | ||
643 | */ | ||
644 | static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) | ||
645 | { | ||
646 | int i; | ||
647 | struct nand_chip *chip = mtd->priv; | ||
648 | |||
649 | if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) && | ||
650 | IS_ALIGNED(len, sizeof(uint32_t))) { | ||
651 | uint32_t *p = (uint32_t *)buf; | ||
652 | len = len >> 2; | ||
653 | for (i = 0; i < len; i++) | ||
654 | p[i] = readl(chip->IO_ADDR_R); | ||
655 | } else { | ||
656 | for (i = 0; i < len; i++) | ||
657 | buf[i] = readb(chip->IO_ADDR_R); | ||
658 | } | ||
659 | } | ||
660 | |||
661 | /* | ||
662 | * fsmc_read_buf_dma - read chip data into buffer | ||
663 | * @mtd: MTD device structure | ||
664 | * @buf: buffer to store date | ||
665 | * @len: number of bytes to read | ||
666 | */ | ||
667 | static void fsmc_read_buf_dma(struct mtd_info *mtd, uint8_t *buf, int len) | ||
668 | { | ||
669 | struct fsmc_nand_data *host; | ||
670 | |||
671 | host = container_of(mtd, struct fsmc_nand_data, mtd); | ||
672 | dma_xfer(host, buf, len, DMA_FROM_DEVICE); | ||
673 | } | ||
674 | |||
675 | /* | ||
676 | * fsmc_write_buf_dma - write buffer to chip | ||
677 | * @mtd: MTD device structure | ||
678 | * @buf: data buffer | ||
679 | * @len: number of bytes to write | ||
680 | */ | ||
681 | static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf, | ||
682 | int len) | ||
683 | { | ||
684 | struct fsmc_nand_data *host; | ||
685 | |||
686 | host = container_of(mtd, struct fsmc_nand_data, mtd); | ||
687 | dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE); | ||
688 | } | ||
689 | |||
394 | /* | 690 | /* |
395 | * fsmc_read_page_hwecc | 691 | * fsmc_read_page_hwecc |
396 | * @mtd: mtd info structure | 692 | * @mtd: mtd info structure |
@@ -426,7 +722,6 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, | |||
426 | uint8_t *oob = (uint8_t *)&ecc_oob[0]; | 722 | uint8_t *oob = (uint8_t *)&ecc_oob[0]; |
427 | 723 | ||
428 | for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) { | 724 | for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) { |
429 | |||
430 | chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page); | 725 | chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page); |
431 | chip->ecc.hwctl(mtd, NAND_ECC_READ); | 726 | chip->ecc.hwctl(mtd, NAND_ECC_READ); |
432 | chip->read_buf(mtd, p, eccsize); | 727 | chip->read_buf(mtd, p, eccsize); |
@@ -437,17 +732,19 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, | |||
437 | group++; | 732 | group++; |
438 | 733 | ||
439 | /* | 734 | /* |
440 | * length is intentionally kept a higher multiple of 2 | 735 | * length is intentionally kept a higher multiple of 2 |
441 | * to read at least 13 bytes even in case of 16 bit NAND | 736 | * to read at least 13 bytes even in case of 16 bit NAND |
442 | * devices | 737 | * devices |
443 | */ | 738 | */ |
444 | len = roundup(len, 2); | 739 | if (chip->options & NAND_BUSWIDTH_16) |
740 | len = roundup(len, 2); | ||
741 | |||
445 | chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page); | 742 | chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page); |
446 | chip->read_buf(mtd, oob + j, len); | 743 | chip->read_buf(mtd, oob + j, len); |
447 | j += len; | 744 | j += len; |
448 | } | 745 | } |
449 | 746 | ||
450 | memcpy(&ecc_code[i], oob, 13); | 747 | memcpy(&ecc_code[i], oob, chip->ecc.bytes); |
451 | chip->ecc.calculate(mtd, p, &ecc_calc[i]); | 748 | chip->ecc.calculate(mtd, p, &ecc_calc[i]); |
452 | 749 | ||
453 | stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); | 750 | stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); |
@@ -461,7 +758,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, | |||
461 | } | 758 | } |
462 | 759 | ||
463 | /* | 760 | /* |
464 | * fsmc_correct_data | 761 | * fsmc_bch8_correct_data |
465 | * @mtd: mtd info structure | 762 | * @mtd: mtd info structure |
466 | * @dat: buffer of read data | 763 | * @dat: buffer of read data |
467 | * @read_ecc: ecc read from device spare area | 764 | * @read_ecc: ecc read from device spare area |
@@ -470,19 +767,51 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, | |||
470 | * calc_ecc is a 104 bit information containing maximum of 8 error | 767 | * calc_ecc is a 104 bit information containing maximum of 8 error |
471 | * offset informations of 13 bits each in 512 bytes of read data. | 768 | * offset informations of 13 bits each in 512 bytes of read data. |
472 | */ | 769 | */ |
473 | static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat, | 770 | static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat, |
474 | uint8_t *read_ecc, uint8_t *calc_ecc) | 771 | uint8_t *read_ecc, uint8_t *calc_ecc) |
475 | { | 772 | { |
476 | struct fsmc_nand_data *host = container_of(mtd, | 773 | struct fsmc_nand_data *host = container_of(mtd, |
477 | struct fsmc_nand_data, mtd); | 774 | struct fsmc_nand_data, mtd); |
478 | struct fsmc_regs *regs = host->regs_va; | 775 | struct nand_chip *chip = mtd->priv; |
776 | void __iomem *regs = host->regs_va; | ||
479 | unsigned int bank = host->bank; | 777 | unsigned int bank = host->bank; |
480 | uint16_t err_idx[8]; | 778 | uint32_t err_idx[8]; |
481 | uint64_t ecc_data[2]; | ||
482 | uint32_t num_err, i; | 779 | uint32_t num_err, i; |
780 | uint32_t ecc1, ecc2, ecc3, ecc4; | ||
781 | |||
782 | num_err = (readl(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF; | ||
783 | |||
784 | /* no bit flipping */ | ||
785 | if (likely(num_err == 0)) | ||
786 | return 0; | ||
787 | |||
788 | /* too many errors */ | ||
789 | if (unlikely(num_err > 8)) { | ||
790 | /* | ||
791 | * This is a temporary erase check. A newly erased page read | ||
792 | * would result in an ecc error because the oob data is also | ||
793 | * erased to FF and the calculated ecc for an FF data is not | ||
794 | * FF..FF. | ||
795 | * This is a workaround to skip performing correction in case | ||
796 | * data is FF..FF | ||
797 | * | ||
798 | * Logic: | ||
799 | * For every page, each bit written as 0 is counted until these | ||
800 | * number of bits are greater than 8 (the maximum correction | ||
801 | * capability of FSMC for each 512 + 13 bytes) | ||
802 | */ | ||
803 | |||
804 | int bits_ecc = count_written_bits(read_ecc, chip->ecc.bytes, 8); | ||
805 | int bits_data = count_written_bits(dat, chip->ecc.size, 8); | ||
806 | |||
807 | if ((bits_ecc + bits_data) <= 8) { | ||
808 | if (bits_data) | ||
809 | memset(dat, 0xff, chip->ecc.size); | ||
810 | return bits_data; | ||
811 | } | ||
483 | 812 | ||
484 | /* The calculated ecc is actually the correction index in data */ | 813 | return -EBADMSG; |
485 | memcpy(ecc_data, calc_ecc, 13); | 814 | } |
486 | 815 | ||
487 | /* | 816 | /* |
488 | * ------------------- calc_ecc[] bit wise -----------|--13 bits--| | 817 | * ------------------- calc_ecc[] bit wise -----------|--13 bits--| |
@@ -493,27 +822,26 @@ static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat, | |||
493 | * uint64_t array and error offset indexes are populated in err_idx | 822 | * uint64_t array and error offset indexes are populated in err_idx |
494 | * array | 823 | * array |
495 | */ | 824 | */ |
496 | for (i = 0; i < 8; i++) { | 825 | ecc1 = readl(FSMC_NAND_REG(regs, bank, ECC1)); |
497 | if (i == 4) { | 826 | ecc2 = readl(FSMC_NAND_REG(regs, bank, ECC2)); |
498 | err_idx[4] = ((ecc_data[1] & 0x1) << 12) | ecc_data[0]; | 827 | ecc3 = readl(FSMC_NAND_REG(regs, bank, ECC3)); |
499 | ecc_data[1] >>= 1; | 828 | ecc4 = readl(FSMC_NAND_REG(regs, bank, STS)); |
500 | continue; | 829 | |
501 | } | 830 | err_idx[0] = (ecc1 >> 0) & 0x1FFF; |
502 | err_idx[i] = (ecc_data[i/4] & 0x1FFF); | 831 | err_idx[1] = (ecc1 >> 13) & 0x1FFF; |
503 | ecc_data[i/4] >>= 13; | 832 | err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F); |
504 | } | 833 | err_idx[3] = (ecc2 >> 7) & 0x1FFF; |
505 | 834 | err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF); | |
506 | num_err = (readl(®s->bank_regs[bank].sts) >> 10) & 0xF; | 835 | err_idx[5] = (ecc3 >> 1) & 0x1FFF; |
507 | 836 | err_idx[6] = (ecc3 >> 14) & 0x1FFF; | |
508 | if (num_err == 0xF) | 837 | err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F); |
509 | return -EBADMSG; | ||
510 | 838 | ||
511 | i = 0; | 839 | i = 0; |
512 | while (num_err--) { | 840 | while (num_err--) { |
513 | change_bit(0, (unsigned long *)&err_idx[i]); | 841 | change_bit(0, (unsigned long *)&err_idx[i]); |
514 | change_bit(1, (unsigned long *)&err_idx[i]); | 842 | change_bit(1, (unsigned long *)&err_idx[i]); |
515 | 843 | ||
516 | if (err_idx[i] <= 512 * 8) { | 844 | if (err_idx[i] < chip->ecc.size * 8) { |
517 | change_bit(err_idx[i], (unsigned long *)dat); | 845 | change_bit(err_idx[i], (unsigned long *)dat); |
518 | i++; | 846 | i++; |
519 | } | 847 | } |
@@ -521,6 +849,44 @@ static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat, | |||
521 | return i; | 849 | return i; |
522 | } | 850 | } |
523 | 851 | ||
852 | static bool filter(struct dma_chan *chan, void *slave) | ||
853 | { | ||
854 | chan->private = slave; | ||
855 | return true; | ||
856 | } | ||
857 | |||
858 | #ifdef CONFIG_OF | ||
859 | static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev, | ||
860 | struct device_node *np) | ||
861 | { | ||
862 | struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev); | ||
863 | u32 val; | ||
864 | |||
865 | /* Set default NAND width to 8 bits */ | ||
866 | pdata->width = 8; | ||
867 | if (!of_property_read_u32(np, "bank-width", &val)) { | ||
868 | if (val == 2) { | ||
869 | pdata->width = 16; | ||
870 | } else if (val != 1) { | ||
871 | dev_err(&pdev->dev, "invalid bank-width %u\n", val); | ||
872 | return -EINVAL; | ||
873 | } | ||
874 | } | ||
875 | of_property_read_u32(np, "st,ale-off", &pdata->ale_off); | ||
876 | of_property_read_u32(np, "st,cle-off", &pdata->cle_off); | ||
877 | if (of_get_property(np, "nand-skip-bbtscan", NULL)) | ||
878 | pdata->options = NAND_SKIP_BBTSCAN; | ||
879 | |||
880 | return 0; | ||
881 | } | ||
882 | #else | ||
883 | static int __devinit fsmc_nand_probe_config_dt(struct platform_device *pdev, | ||
884 | struct device_node *np) | ||
885 | { | ||
886 | return -ENOSYS; | ||
887 | } | ||
888 | #endif | ||
889 | |||
524 | /* | 890 | /* |
525 | * fsmc_nand_probe - Probe function | 891 | * fsmc_nand_probe - Probe function |
526 | * @pdev: platform device structure | 892 | * @pdev: platform device structure |
@@ -528,102 +894,109 @@ static int fsmc_correct_data(struct mtd_info *mtd, uint8_t *dat, | |||
528 | static int __init fsmc_nand_probe(struct platform_device *pdev) | 894 | static int __init fsmc_nand_probe(struct platform_device *pdev) |
529 | { | 895 | { |
530 | struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev); | 896 | struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev); |
897 | struct device_node __maybe_unused *np = pdev->dev.of_node; | ||
898 | struct mtd_part_parser_data ppdata = {}; | ||
531 | struct fsmc_nand_data *host; | 899 | struct fsmc_nand_data *host; |
532 | struct mtd_info *mtd; | 900 | struct mtd_info *mtd; |
533 | struct nand_chip *nand; | 901 | struct nand_chip *nand; |
534 | struct fsmc_regs *regs; | ||
535 | struct resource *res; | 902 | struct resource *res; |
903 | dma_cap_mask_t mask; | ||
536 | int ret = 0; | 904 | int ret = 0; |
537 | u32 pid; | 905 | u32 pid; |
538 | int i; | 906 | int i; |
539 | 907 | ||
908 | if (np) { | ||
909 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | ||
910 | pdev->dev.platform_data = pdata; | ||
911 | ret = fsmc_nand_probe_config_dt(pdev, np); | ||
912 | if (ret) { | ||
913 | dev_err(&pdev->dev, "no platform data\n"); | ||
914 | return -ENODEV; | ||
915 | } | ||
916 | } | ||
917 | |||
540 | if (!pdata) { | 918 | if (!pdata) { |
541 | dev_err(&pdev->dev, "platform data is NULL\n"); | 919 | dev_err(&pdev->dev, "platform data is NULL\n"); |
542 | return -EINVAL; | 920 | return -EINVAL; |
543 | } | 921 | } |
544 | 922 | ||
545 | /* Allocate memory for the device structure (and zero it) */ | 923 | /* Allocate memory for the device structure (and zero it) */ |
546 | host = kzalloc(sizeof(*host), GFP_KERNEL); | 924 | host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); |
547 | if (!host) { | 925 | if (!host) { |
548 | dev_err(&pdev->dev, "failed to allocate device structure\n"); | 926 | dev_err(&pdev->dev, "failed to allocate device structure\n"); |
549 | return -ENOMEM; | 927 | return -ENOMEM; |
550 | } | 928 | } |
551 | 929 | ||
552 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data"); | 930 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data"); |
553 | if (!res) { | 931 | if (!res) |
554 | ret = -EIO; | 932 | return -EINVAL; |
555 | goto err_probe1; | ||
556 | } | ||
557 | 933 | ||
558 | host->resdata = request_mem_region(res->start, resource_size(res), | 934 | if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), |
559 | pdev->name); | 935 | pdev->name)) { |
560 | if (!host->resdata) { | 936 | dev_err(&pdev->dev, "Failed to get memory data resourse\n"); |
561 | ret = -EIO; | 937 | return -ENOENT; |
562 | goto err_probe1; | ||
563 | } | 938 | } |
564 | 939 | ||
565 | host->data_va = ioremap(res->start, resource_size(res)); | 940 | host->data_pa = (dma_addr_t)res->start; |
941 | host->data_va = devm_ioremap(&pdev->dev, res->start, | ||
942 | resource_size(res)); | ||
566 | if (!host->data_va) { | 943 | if (!host->data_va) { |
567 | ret = -EIO; | 944 | dev_err(&pdev->dev, "data ioremap failed\n"); |
568 | goto err_probe1; | 945 | return -ENOMEM; |
569 | } | 946 | } |
570 | 947 | ||
571 | host->resaddr = request_mem_region(res->start + PLAT_NAND_ALE, | 948 | if (!devm_request_mem_region(&pdev->dev, res->start + pdata->ale_off, |
572 | resource_size(res), pdev->name); | 949 | resource_size(res), pdev->name)) { |
573 | if (!host->resaddr) { | 950 | dev_err(&pdev->dev, "Failed to get memory ale resourse\n"); |
574 | ret = -EIO; | 951 | return -ENOENT; |
575 | goto err_probe1; | ||
576 | } | 952 | } |
577 | 953 | ||
578 | host->addr_va = ioremap(res->start + PLAT_NAND_ALE, resource_size(res)); | 954 | host->addr_va = devm_ioremap(&pdev->dev, res->start + pdata->ale_off, |
955 | resource_size(res)); | ||
579 | if (!host->addr_va) { | 956 | if (!host->addr_va) { |
580 | ret = -EIO; | 957 | dev_err(&pdev->dev, "ale ioremap failed\n"); |
581 | goto err_probe1; | 958 | return -ENOMEM; |
582 | } | 959 | } |
583 | 960 | ||
584 | host->rescmd = request_mem_region(res->start + PLAT_NAND_CLE, | 961 | if (!devm_request_mem_region(&pdev->dev, res->start + pdata->cle_off, |
585 | resource_size(res), pdev->name); | 962 | resource_size(res), pdev->name)) { |
586 | if (!host->rescmd) { | 963 | dev_err(&pdev->dev, "Failed to get memory cle resourse\n"); |
587 | ret = -EIO; | 964 | return -ENOENT; |
588 | goto err_probe1; | ||
589 | } | 965 | } |
590 | 966 | ||
591 | host->cmd_va = ioremap(res->start + PLAT_NAND_CLE, resource_size(res)); | 967 | host->cmd_va = devm_ioremap(&pdev->dev, res->start + pdata->cle_off, |
968 | resource_size(res)); | ||
592 | if (!host->cmd_va) { | 969 | if (!host->cmd_va) { |
593 | ret = -EIO; | 970 | dev_err(&pdev->dev, "ale ioremap failed\n"); |
594 | goto err_probe1; | 971 | return -ENOMEM; |
595 | } | 972 | } |
596 | 973 | ||
597 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs"); | 974 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs"); |
598 | if (!res) { | 975 | if (!res) |
599 | ret = -EIO; | 976 | return -EINVAL; |
600 | goto err_probe1; | ||
601 | } | ||
602 | 977 | ||
603 | host->resregs = request_mem_region(res->start, resource_size(res), | 978 | if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), |
604 | pdev->name); | 979 | pdev->name)) { |
605 | if (!host->resregs) { | 980 | dev_err(&pdev->dev, "Failed to get memory regs resourse\n"); |
606 | ret = -EIO; | 981 | return -ENOENT; |
607 | goto err_probe1; | ||
608 | } | 982 | } |
609 | 983 | ||
610 | host->regs_va = ioremap(res->start, resource_size(res)); | 984 | host->regs_va = devm_ioremap(&pdev->dev, res->start, |
985 | resource_size(res)); | ||
611 | if (!host->regs_va) { | 986 | if (!host->regs_va) { |
612 | ret = -EIO; | 987 | dev_err(&pdev->dev, "regs ioremap failed\n"); |
613 | goto err_probe1; | 988 | return -ENOMEM; |
614 | } | 989 | } |
615 | 990 | ||
616 | host->clk = clk_get(&pdev->dev, NULL); | 991 | host->clk = clk_get(&pdev->dev, NULL); |
617 | if (IS_ERR(host->clk)) { | 992 | if (IS_ERR(host->clk)) { |
618 | dev_err(&pdev->dev, "failed to fetch block clock\n"); | 993 | dev_err(&pdev->dev, "failed to fetch block clock\n"); |
619 | ret = PTR_ERR(host->clk); | 994 | return PTR_ERR(host->clk); |
620 | host->clk = NULL; | ||
621 | goto err_probe1; | ||
622 | } | 995 | } |
623 | 996 | ||
624 | ret = clk_enable(host->clk); | 997 | ret = clk_enable(host->clk); |
625 | if (ret) | 998 | if (ret) |
626 | goto err_probe1; | 999 | goto err_clk_enable; |
627 | 1000 | ||
628 | /* | 1001 | /* |
629 | * This device ID is actually a common AMBA ID as used on the | 1002 | * This device ID is actually a common AMBA ID as used on the |
@@ -639,7 +1012,14 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) | |||
639 | 1012 | ||
640 | host->bank = pdata->bank; | 1013 | host->bank = pdata->bank; |
641 | host->select_chip = pdata->select_bank; | 1014 | host->select_chip = pdata->select_bank; |
642 | regs = host->regs_va; | 1015 | host->partitions = pdata->partitions; |
1016 | host->nr_partitions = pdata->nr_partitions; | ||
1017 | host->dev = &pdev->dev; | ||
1018 | host->dev_timings = pdata->nand_timings; | ||
1019 | host->mode = pdata->mode; | ||
1020 | |||
1021 | if (host->mode == USE_DMA_ACCESS) | ||
1022 | init_completion(&host->dma_access_complete); | ||
643 | 1023 | ||
644 | /* Link all private pointers */ | 1024 | /* Link all private pointers */ |
645 | mtd = &host->mtd; | 1025 | mtd = &host->mtd; |
@@ -658,21 +1038,53 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) | |||
658 | nand->ecc.size = 512; | 1038 | nand->ecc.size = 512; |
659 | nand->options = pdata->options; | 1039 | nand->options = pdata->options; |
660 | nand->select_chip = fsmc_select_chip; | 1040 | nand->select_chip = fsmc_select_chip; |
1041 | nand->badblockbits = 7; | ||
661 | 1042 | ||
662 | if (pdata->width == FSMC_NAND_BW16) | 1043 | if (pdata->width == FSMC_NAND_BW16) |
663 | nand->options |= NAND_BUSWIDTH_16; | 1044 | nand->options |= NAND_BUSWIDTH_16; |
664 | 1045 | ||
665 | fsmc_nand_setup(regs, host->bank, nand->options & NAND_BUSWIDTH_16); | 1046 | switch (host->mode) { |
1047 | case USE_DMA_ACCESS: | ||
1048 | dma_cap_zero(mask); | ||
1049 | dma_cap_set(DMA_MEMCPY, mask); | ||
1050 | host->read_dma_chan = dma_request_channel(mask, filter, | ||
1051 | pdata->read_dma_priv); | ||
1052 | if (!host->read_dma_chan) { | ||
1053 | dev_err(&pdev->dev, "Unable to get read dma channel\n"); | ||
1054 | goto err_req_read_chnl; | ||
1055 | } | ||
1056 | host->write_dma_chan = dma_request_channel(mask, filter, | ||
1057 | pdata->write_dma_priv); | ||
1058 | if (!host->write_dma_chan) { | ||
1059 | dev_err(&pdev->dev, "Unable to get write dma channel\n"); | ||
1060 | goto err_req_write_chnl; | ||
1061 | } | ||
1062 | nand->read_buf = fsmc_read_buf_dma; | ||
1063 | nand->write_buf = fsmc_write_buf_dma; | ||
1064 | break; | ||
1065 | |||
1066 | default: | ||
1067 | case USE_WORD_ACCESS: | ||
1068 | nand->read_buf = fsmc_read_buf; | ||
1069 | nand->write_buf = fsmc_write_buf; | ||
1070 | break; | ||
1071 | } | ||
1072 | |||
1073 | fsmc_nand_setup(host->regs_va, host->bank, | ||
1074 | nand->options & NAND_BUSWIDTH_16, | ||
1075 | host->dev_timings); | ||
666 | 1076 | ||
667 | if (AMBA_REV_BITS(host->pid) >= 8) { | 1077 | if (AMBA_REV_BITS(host->pid) >= 8) { |
668 | nand->ecc.read_page = fsmc_read_page_hwecc; | 1078 | nand->ecc.read_page = fsmc_read_page_hwecc; |
669 | nand->ecc.calculate = fsmc_read_hwecc_ecc4; | 1079 | nand->ecc.calculate = fsmc_read_hwecc_ecc4; |
670 | nand->ecc.correct = fsmc_correct_data; | 1080 | nand->ecc.correct = fsmc_bch8_correct_data; |
671 | nand->ecc.bytes = 13; | 1081 | nand->ecc.bytes = 13; |
1082 | nand->ecc.strength = 8; | ||
672 | } else { | 1083 | } else { |
673 | nand->ecc.calculate = fsmc_read_hwecc_ecc1; | 1084 | nand->ecc.calculate = fsmc_read_hwecc_ecc1; |
674 | nand->ecc.correct = nand_correct_data; | 1085 | nand->ecc.correct = nand_correct_data; |
675 | nand->ecc.bytes = 3; | 1086 | nand->ecc.bytes = 3; |
1087 | nand->ecc.strength = 1; | ||
676 | } | 1088 | } |
677 | 1089 | ||
678 | /* | 1090 | /* |
@@ -681,19 +1093,52 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) | |||
681 | if (nand_scan_ident(&host->mtd, 1, NULL)) { | 1093 | if (nand_scan_ident(&host->mtd, 1, NULL)) { |
682 | ret = -ENXIO; | 1094 | ret = -ENXIO; |
683 | dev_err(&pdev->dev, "No NAND Device found!\n"); | 1095 | dev_err(&pdev->dev, "No NAND Device found!\n"); |
684 | goto err_probe; | 1096 | goto err_scan_ident; |
685 | } | 1097 | } |
686 | 1098 | ||
687 | if (AMBA_REV_BITS(host->pid) >= 8) { | 1099 | if (AMBA_REV_BITS(host->pid) >= 8) { |
688 | if (host->mtd.writesize == 512) { | 1100 | switch (host->mtd.oobsize) { |
689 | nand->ecc.layout = &fsmc_ecc4_sp_layout; | 1101 | case 16: |
1102 | nand->ecc.layout = &fsmc_ecc4_16_layout; | ||
690 | host->ecc_place = &fsmc_ecc4_sp_place; | 1103 | host->ecc_place = &fsmc_ecc4_sp_place; |
691 | } else { | 1104 | break; |
692 | nand->ecc.layout = &fsmc_ecc4_lp_layout; | 1105 | case 64: |
1106 | nand->ecc.layout = &fsmc_ecc4_64_layout; | ||
1107 | host->ecc_place = &fsmc_ecc4_lp_place; | ||
1108 | break; | ||
1109 | case 128: | ||
1110 | nand->ecc.layout = &fsmc_ecc4_128_layout; | ||
1111 | host->ecc_place = &fsmc_ecc4_lp_place; | ||
1112 | break; | ||
1113 | case 224: | ||
1114 | nand->ecc.layout = &fsmc_ecc4_224_layout; | ||
693 | host->ecc_place = &fsmc_ecc4_lp_place; | 1115 | host->ecc_place = &fsmc_ecc4_lp_place; |
1116 | break; | ||
1117 | case 256: | ||
1118 | nand->ecc.layout = &fsmc_ecc4_256_layout; | ||
1119 | host->ecc_place = &fsmc_ecc4_lp_place; | ||
1120 | break; | ||
1121 | default: | ||
1122 | printk(KERN_WARNING "No oob scheme defined for " | ||
1123 | "oobsize %d\n", mtd->oobsize); | ||
1124 | BUG(); | ||
694 | } | 1125 | } |
695 | } else { | 1126 | } else { |
696 | nand->ecc.layout = &fsmc_ecc1_layout; | 1127 | switch (host->mtd.oobsize) { |
1128 | case 16: | ||
1129 | nand->ecc.layout = &fsmc_ecc1_16_layout; | ||
1130 | break; | ||
1131 | case 64: | ||
1132 | nand->ecc.layout = &fsmc_ecc1_64_layout; | ||
1133 | break; | ||
1134 | case 128: | ||
1135 | nand->ecc.layout = &fsmc_ecc1_128_layout; | ||
1136 | break; | ||
1137 | default: | ||
1138 | printk(KERN_WARNING "No oob scheme defined for " | ||
1139 | "oobsize %d\n", mtd->oobsize); | ||
1140 | BUG(); | ||
1141 | } | ||
697 | } | 1142 | } |
698 | 1143 | ||
699 | /* Second stage of scan to fill MTD data-structures */ | 1144 | /* Second stage of scan to fill MTD data-structures */ |
@@ -713,13 +1158,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) | |||
713 | * Check for partition info passed | 1158 | * Check for partition info passed |
714 | */ | 1159 | */ |
715 | host->mtd.name = "nand"; | 1160 | host->mtd.name = "nand"; |
716 | ret = mtd_device_parse_register(&host->mtd, NULL, 0, | 1161 | ppdata.of_node = np; |
717 | host->mtd.size <= 0x04000000 ? | 1162 | ret = mtd_device_parse_register(&host->mtd, NULL, &ppdata, |
718 | partition_info_16KB_blk : | 1163 | host->partitions, host->nr_partitions); |
719 | partition_info_128KB_blk, | ||
720 | host->mtd.size <= 0x04000000 ? | ||
721 | ARRAY_SIZE(partition_info_16KB_blk) : | ||
722 | ARRAY_SIZE(partition_info_128KB_blk)); | ||
723 | if (ret) | 1164 | if (ret) |
724 | goto err_probe; | 1165 | goto err_probe; |
725 | 1166 | ||
@@ -728,32 +1169,16 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) | |||
728 | return 0; | 1169 | return 0; |
729 | 1170 | ||
730 | err_probe: | 1171 | err_probe: |
1172 | err_scan_ident: | ||
1173 | if (host->mode == USE_DMA_ACCESS) | ||
1174 | dma_release_channel(host->write_dma_chan); | ||
1175 | err_req_write_chnl: | ||
1176 | if (host->mode == USE_DMA_ACCESS) | ||
1177 | dma_release_channel(host->read_dma_chan); | ||
1178 | err_req_read_chnl: | ||
731 | clk_disable(host->clk); | 1179 | clk_disable(host->clk); |
732 | err_probe1: | 1180 | err_clk_enable: |
733 | if (host->clk) | 1181 | clk_put(host->clk); |
734 | clk_put(host->clk); | ||
735 | if (host->regs_va) | ||
736 | iounmap(host->regs_va); | ||
737 | if (host->resregs) | ||
738 | release_mem_region(host->resregs->start, | ||
739 | resource_size(host->resregs)); | ||
740 | if (host->cmd_va) | ||
741 | iounmap(host->cmd_va); | ||
742 | if (host->rescmd) | ||
743 | release_mem_region(host->rescmd->start, | ||
744 | resource_size(host->rescmd)); | ||
745 | if (host->addr_va) | ||
746 | iounmap(host->addr_va); | ||
747 | if (host->resaddr) | ||
748 | release_mem_region(host->resaddr->start, | ||
749 | resource_size(host->resaddr)); | ||
750 | if (host->data_va) | ||
751 | iounmap(host->data_va); | ||
752 | if (host->resdata) | ||
753 | release_mem_region(host->resdata->start, | ||
754 | resource_size(host->resdata)); | ||
755 | |||
756 | kfree(host); | ||
757 | return ret; | 1182 | return ret; |
758 | } | 1183 | } |
759 | 1184 | ||
@@ -768,24 +1193,15 @@ static int fsmc_nand_remove(struct platform_device *pdev) | |||
768 | 1193 | ||
769 | if (host) { | 1194 | if (host) { |
770 | nand_release(&host->mtd); | 1195 | nand_release(&host->mtd); |
1196 | |||
1197 | if (host->mode == USE_DMA_ACCESS) { | ||
1198 | dma_release_channel(host->write_dma_chan); | ||
1199 | dma_release_channel(host->read_dma_chan); | ||
1200 | } | ||
771 | clk_disable(host->clk); | 1201 | clk_disable(host->clk); |
772 | clk_put(host->clk); | 1202 | clk_put(host->clk); |
773 | |||
774 | iounmap(host->regs_va); | ||
775 | release_mem_region(host->resregs->start, | ||
776 | resource_size(host->resregs)); | ||
777 | iounmap(host->cmd_va); | ||
778 | release_mem_region(host->rescmd->start, | ||
779 | resource_size(host->rescmd)); | ||
780 | iounmap(host->addr_va); | ||
781 | release_mem_region(host->resaddr->start, | ||
782 | resource_size(host->resaddr)); | ||
783 | iounmap(host->data_va); | ||
784 | release_mem_region(host->resdata->start, | ||
785 | resource_size(host->resdata)); | ||
786 | |||
787 | kfree(host); | ||
788 | } | 1203 | } |
1204 | |||
789 | return 0; | 1205 | return 0; |
790 | } | 1206 | } |
791 | 1207 | ||
@@ -801,15 +1217,24 @@ static int fsmc_nand_suspend(struct device *dev) | |||
801 | static int fsmc_nand_resume(struct device *dev) | 1217 | static int fsmc_nand_resume(struct device *dev) |
802 | { | 1218 | { |
803 | struct fsmc_nand_data *host = dev_get_drvdata(dev); | 1219 | struct fsmc_nand_data *host = dev_get_drvdata(dev); |
804 | if (host) | 1220 | if (host) { |
805 | clk_enable(host->clk); | 1221 | clk_enable(host->clk); |
1222 | fsmc_nand_setup(host->regs_va, host->bank, | ||
1223 | host->nand.options & NAND_BUSWIDTH_16, | ||
1224 | host->dev_timings); | ||
1225 | } | ||
806 | return 0; | 1226 | return 0; |
807 | } | 1227 | } |
808 | 1228 | ||
809 | static const struct dev_pm_ops fsmc_nand_pm_ops = { | 1229 | static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume); |
810 | .suspend = fsmc_nand_suspend, | 1230 | #endif |
811 | .resume = fsmc_nand_resume, | 1231 | |
1232 | #ifdef CONFIG_OF | ||
1233 | static const struct of_device_id fsmc_nand_id_table[] = { | ||
1234 | { .compatible = "st,spear600-fsmc-nand" }, | ||
1235 | {} | ||
812 | }; | 1236 | }; |
1237 | MODULE_DEVICE_TABLE(of, fsmc_nand_id_table); | ||
813 | #endif | 1238 | #endif |
814 | 1239 | ||
815 | static struct platform_driver fsmc_nand_driver = { | 1240 | static struct platform_driver fsmc_nand_driver = { |
@@ -817,6 +1242,7 @@ static struct platform_driver fsmc_nand_driver = { | |||
817 | .driver = { | 1242 | .driver = { |
818 | .owner = THIS_MODULE, | 1243 | .owner = THIS_MODULE, |
819 | .name = "fsmc-nand", | 1244 | .name = "fsmc-nand", |
1245 | .of_match_table = of_match_ptr(fsmc_nand_id_table), | ||
820 | #ifdef CONFIG_PM | 1246 | #ifdef CONFIG_PM |
821 | .pm = &fsmc_nand_pm_ops, | 1247 | .pm = &fsmc_nand_pm_ops, |
822 | #endif | 1248 | #endif |
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c index 590dd5cceed6..e8ea7107932e 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c | |||
@@ -848,7 +848,10 @@ int gpmi_send_command(struct gpmi_nand_data *this) | |||
848 | 848 | ||
849 | sg_init_one(sgl, this->cmd_buffer, this->command_length); | 849 | sg_init_one(sgl, this->cmd_buffer, this->command_length); |
850 | dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE); | 850 | dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE); |
851 | desc = dmaengine_prep_slave_sg(channel, sgl, 1, DMA_MEM_TO_DEV, 1); | 851 | desc = dmaengine_prep_slave_sg(channel, |
852 | sgl, 1, DMA_MEM_TO_DEV, | ||
853 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
854 | |||
852 | if (!desc) { | 855 | if (!desc) { |
853 | pr_err("step 2 error\n"); | 856 | pr_err("step 2 error\n"); |
854 | return -1; | 857 | return -1; |
@@ -889,7 +892,8 @@ int gpmi_send_data(struct gpmi_nand_data *this) | |||
889 | /* [2] send DMA request */ | 892 | /* [2] send DMA request */ |
890 | prepare_data_dma(this, DMA_TO_DEVICE); | 893 | prepare_data_dma(this, DMA_TO_DEVICE); |
891 | desc = dmaengine_prep_slave_sg(channel, &this->data_sgl, | 894 | desc = dmaengine_prep_slave_sg(channel, &this->data_sgl, |
892 | 1, DMA_MEM_TO_DEV, 1); | 895 | 1, DMA_MEM_TO_DEV, |
896 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
893 | if (!desc) { | 897 | if (!desc) { |
894 | pr_err("step 2 error\n"); | 898 | pr_err("step 2 error\n"); |
895 | return -1; | 899 | return -1; |
@@ -925,7 +929,8 @@ int gpmi_read_data(struct gpmi_nand_data *this) | |||
925 | /* [2] : send DMA request */ | 929 | /* [2] : send DMA request */ |
926 | prepare_data_dma(this, DMA_FROM_DEVICE); | 930 | prepare_data_dma(this, DMA_FROM_DEVICE); |
927 | desc = dmaengine_prep_slave_sg(channel, &this->data_sgl, | 931 | desc = dmaengine_prep_slave_sg(channel, &this->data_sgl, |
928 | 1, DMA_DEV_TO_MEM, 1); | 932 | 1, DMA_DEV_TO_MEM, |
933 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
929 | if (!desc) { | 934 | if (!desc) { |
930 | pr_err("step 2 error\n"); | 935 | pr_err("step 2 error\n"); |
931 | return -1; | 936 | return -1; |
@@ -970,8 +975,10 @@ int gpmi_send_page(struct gpmi_nand_data *this, | |||
970 | pio[4] = payload; | 975 | pio[4] = payload; |
971 | pio[5] = auxiliary; | 976 | pio[5] = auxiliary; |
972 | 977 | ||
973 | desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio, | 978 | desc = dmaengine_prep_slave_sg(channel, |
974 | ARRAY_SIZE(pio), DMA_TRANS_NONE, 0); | 979 | (struct scatterlist *)pio, |
980 | ARRAY_SIZE(pio), DMA_TRANS_NONE, | ||
981 | DMA_CTRL_ACK); | ||
975 | if (!desc) { | 982 | if (!desc) { |
976 | pr_err("step 2 error\n"); | 983 | pr_err("step 2 error\n"); |
977 | return -1; | 984 | return -1; |
@@ -1035,7 +1042,8 @@ int gpmi_read_page(struct gpmi_nand_data *this, | |||
1035 | pio[5] = auxiliary; | 1042 | pio[5] = auxiliary; |
1036 | desc = dmaengine_prep_slave_sg(channel, | 1043 | desc = dmaengine_prep_slave_sg(channel, |
1037 | (struct scatterlist *)pio, | 1044 | (struct scatterlist *)pio, |
1038 | ARRAY_SIZE(pio), DMA_TRANS_NONE, 1); | 1045 | ARRAY_SIZE(pio), DMA_TRANS_NONE, |
1046 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
1039 | if (!desc) { | 1047 | if (!desc) { |
1040 | pr_err("step 2 error\n"); | 1048 | pr_err("step 2 error\n"); |
1041 | return -1; | 1049 | return -1; |
@@ -1052,9 +1060,11 @@ int gpmi_read_page(struct gpmi_nand_data *this, | |||
1052 | | BF_GPMI_CTRL0_ADDRESS(address) | 1060 | | BF_GPMI_CTRL0_ADDRESS(address) |
1053 | | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); | 1061 | | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size); |
1054 | pio[1] = 0; | 1062 | pio[1] = 0; |
1063 | pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */ | ||
1055 | desc = dmaengine_prep_slave_sg(channel, | 1064 | desc = dmaengine_prep_slave_sg(channel, |
1056 | (struct scatterlist *)pio, 2, | 1065 | (struct scatterlist *)pio, 3, |
1057 | DMA_TRANS_NONE, 1); | 1066 | DMA_TRANS_NONE, |
1067 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
1058 | if (!desc) { | 1068 | if (!desc) { |
1059 | pr_err("step 3 error\n"); | 1069 | pr_err("step 3 error\n"); |
1060 | return -1; | 1070 | return -1; |
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index 493ec2fcf97f..75b1dde16358 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c | |||
@@ -1124,7 +1124,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs) | |||
1124 | chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); | 1124 | chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); |
1125 | 1125 | ||
1126 | /* Do we have a flash based bad block table ? */ | 1126 | /* Do we have a flash based bad block table ? */ |
1127 | if (chip->options & NAND_BBT_USE_FLASH) | 1127 | if (chip->bbt_options & NAND_BBT_USE_FLASH) |
1128 | ret = nand_update_bbt(mtd, ofs); | 1128 | ret = nand_update_bbt(mtd, ofs); |
1129 | else { | 1129 | else { |
1130 | chipnr = (int)(ofs >> chip->chip_shift); | 1130 | chipnr = (int)(ofs >> chip->chip_shift); |
@@ -1155,7 +1155,7 @@ static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs) | |||
1155 | return ret; | 1155 | return ret; |
1156 | } | 1156 | } |
1157 | 1157 | ||
1158 | static int __devinit nand_boot_set_geometry(struct gpmi_nand_data *this) | 1158 | static int nand_boot_set_geometry(struct gpmi_nand_data *this) |
1159 | { | 1159 | { |
1160 | struct boot_rom_geometry *geometry = &this->rom_geometry; | 1160 | struct boot_rom_geometry *geometry = &this->rom_geometry; |
1161 | 1161 | ||
@@ -1182,7 +1182,7 @@ static int __devinit nand_boot_set_geometry(struct gpmi_nand_data *this) | |||
1182 | } | 1182 | } |
1183 | 1183 | ||
1184 | static const char *fingerprint = "STMP"; | 1184 | static const char *fingerprint = "STMP"; |
1185 | static int __devinit mx23_check_transcription_stamp(struct gpmi_nand_data *this) | 1185 | static int mx23_check_transcription_stamp(struct gpmi_nand_data *this) |
1186 | { | 1186 | { |
1187 | struct boot_rom_geometry *rom_geo = &this->rom_geometry; | 1187 | struct boot_rom_geometry *rom_geo = &this->rom_geometry; |
1188 | struct device *dev = this->dev; | 1188 | struct device *dev = this->dev; |
@@ -1239,7 +1239,7 @@ static int __devinit mx23_check_transcription_stamp(struct gpmi_nand_data *this) | |||
1239 | } | 1239 | } |
1240 | 1240 | ||
1241 | /* Writes a transcription stamp. */ | 1241 | /* Writes a transcription stamp. */ |
1242 | static int __devinit mx23_write_transcription_stamp(struct gpmi_nand_data *this) | 1242 | static int mx23_write_transcription_stamp(struct gpmi_nand_data *this) |
1243 | { | 1243 | { |
1244 | struct device *dev = this->dev; | 1244 | struct device *dev = this->dev; |
1245 | struct boot_rom_geometry *rom_geo = &this->rom_geometry; | 1245 | struct boot_rom_geometry *rom_geo = &this->rom_geometry; |
@@ -1322,7 +1322,7 @@ static int __devinit mx23_write_transcription_stamp(struct gpmi_nand_data *this) | |||
1322 | return 0; | 1322 | return 0; |
1323 | } | 1323 | } |
1324 | 1324 | ||
1325 | static int __devinit mx23_boot_init(struct gpmi_nand_data *this) | 1325 | static int mx23_boot_init(struct gpmi_nand_data *this) |
1326 | { | 1326 | { |
1327 | struct device *dev = this->dev; | 1327 | struct device *dev = this->dev; |
1328 | struct nand_chip *chip = &this->nand; | 1328 | struct nand_chip *chip = &this->nand; |
@@ -1391,7 +1391,7 @@ static int __devinit mx23_boot_init(struct gpmi_nand_data *this) | |||
1391 | return 0; | 1391 | return 0; |
1392 | } | 1392 | } |
1393 | 1393 | ||
1394 | static int __devinit nand_boot_init(struct gpmi_nand_data *this) | 1394 | static int nand_boot_init(struct gpmi_nand_data *this) |
1395 | { | 1395 | { |
1396 | nand_boot_set_geometry(this); | 1396 | nand_boot_set_geometry(this); |
1397 | 1397 | ||
@@ -1401,7 +1401,7 @@ static int __devinit nand_boot_init(struct gpmi_nand_data *this) | |||
1401 | return 0; | 1401 | return 0; |
1402 | } | 1402 | } |
1403 | 1403 | ||
1404 | static int __devinit gpmi_set_geometry(struct gpmi_nand_data *this) | 1404 | static int gpmi_set_geometry(struct gpmi_nand_data *this) |
1405 | { | 1405 | { |
1406 | int ret; | 1406 | int ret; |
1407 | 1407 | ||
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h index e023bccb7781..ec6180d4ff8f 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <linux/mtd/nand.h> | 20 | #include <linux/mtd/nand.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/dma-mapping.h> | 22 | #include <linux/dma-mapping.h> |
23 | #include <mach/dma.h> | 23 | #include <linux/fsl/mxs-dma.h> |
24 | 24 | ||
25 | struct resources { | 25 | struct resources { |
26 | void *gpmi_regs; | 26 | void *gpmi_regs; |
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c index 5dc6f0d92f1a..11e487813428 100644 --- a/drivers/mtd/nand/h1910.c +++ b/drivers/mtd/nand/h1910.c | |||
@@ -135,8 +135,8 @@ static int __init h1910_init(void) | |||
135 | } | 135 | } |
136 | 136 | ||
137 | /* Register the partitions */ | 137 | /* Register the partitions */ |
138 | mtd_device_parse_register(h1910_nand_mtd, NULL, 0, | 138 | mtd_device_parse_register(h1910_nand_mtd, NULL, NULL, partition_info, |
139 | partition_info, NUM_PARTITIONS); | 139 | NUM_PARTITIONS); |
140 | 140 | ||
141 | /* Return happy */ | 141 | /* Return happy */ |
142 | return 0; | 142 | return 0; |
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c index ac3b9f255e00..e4147e8acb7c 100644 --- a/drivers/mtd/nand/jz4740_nand.c +++ b/drivers/mtd/nand/jz4740_nand.c | |||
@@ -332,6 +332,11 @@ static int __devinit jz_nand_probe(struct platform_device *pdev) | |||
332 | chip->ecc.mode = NAND_ECC_HW_OOB_FIRST; | 332 | chip->ecc.mode = NAND_ECC_HW_OOB_FIRST; |
333 | chip->ecc.size = 512; | 333 | chip->ecc.size = 512; |
334 | chip->ecc.bytes = 9; | 334 | chip->ecc.bytes = 9; |
335 | chip->ecc.strength = 2; | ||
336 | /* | ||
337 | * FIXME: ecc_strength value of 2 bits per 512 bytes of data is a | ||
338 | * conservative guess, given 9 ecc bytes and reed-solomon alg. | ||
339 | */ | ||
335 | 340 | ||
336 | if (pdata) | 341 | if (pdata) |
337 | chip->ecc.layout = pdata->ecc_layout; | 342 | chip->ecc.layout = pdata->ecc_layout; |
@@ -367,9 +372,9 @@ static int __devinit jz_nand_probe(struct platform_device *pdev) | |||
367 | goto err_gpio_free; | 372 | goto err_gpio_free; |
368 | } | 373 | } |
369 | 374 | ||
370 | ret = mtd_device_parse_register(mtd, NULL, 0, | 375 | ret = mtd_device_parse_register(mtd, NULL, NULL, |
371 | pdata ? pdata->partitions : NULL, | 376 | pdata ? pdata->partitions : NULL, |
372 | pdata ? pdata->num_partitions : 0); | 377 | pdata ? pdata->num_partitions : 0); |
373 | 378 | ||
374 | if (ret) { | 379 | if (ret) { |
375 | dev_err(&pdev->dev, "Failed to add mtd device\n"); | 380 | dev_err(&pdev->dev, "Failed to add mtd device\n"); |
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 74a43b818d0e..cc0678a967c1 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c | |||
@@ -1225,9 +1225,16 @@ static int __init mxcnd_probe(struct platform_device *pdev) | |||
1225 | goto escan; | 1225 | goto escan; |
1226 | } | 1226 | } |
1227 | 1227 | ||
1228 | if (this->ecc.mode == NAND_ECC_HW) { | ||
1229 | if (nfc_is_v1()) | ||
1230 | this->ecc.strength = 1; | ||
1231 | else | ||
1232 | this->ecc.strength = (host->eccsize == 4) ? 4 : 8; | ||
1233 | } | ||
1234 | |||
1228 | /* Register the partitions */ | 1235 | /* Register the partitions */ |
1229 | mtd_device_parse_register(mtd, part_probes, 0, | 1236 | mtd_device_parse_register(mtd, part_probes, NULL, pdata->parts, |
1230 | pdata->parts, pdata->nr_parts); | 1237 | pdata->nr_parts); |
1231 | 1238 | ||
1232 | platform_set_drvdata(pdev, host); | 1239 | platform_set_drvdata(pdev, host); |
1233 | 1240 | ||
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 8a393f9e6027..47b19c0bb070 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -123,12 +123,6 @@ static int check_offs_len(struct mtd_info *mtd, | |||
123 | ret = -EINVAL; | 123 | ret = -EINVAL; |
124 | } | 124 | } |
125 | 125 | ||
126 | /* Do not allow past end of device */ | ||
127 | if (ofs + len > mtd->size) { | ||
128 | pr_debug("%s: past end of device\n", __func__); | ||
129 | ret = -EINVAL; | ||
130 | } | ||
131 | |||
132 | return ret; | 126 | return ret; |
133 | } | 127 | } |
134 | 128 | ||
@@ -338,7 +332,7 @@ static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) | |||
338 | */ | 332 | */ |
339 | static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) | 333 | static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) |
340 | { | 334 | { |
341 | int page, chipnr, res = 0; | 335 | int page, chipnr, res = 0, i = 0; |
342 | struct nand_chip *chip = mtd->priv; | 336 | struct nand_chip *chip = mtd->priv; |
343 | u16 bad; | 337 | u16 bad; |
344 | 338 | ||
@@ -356,23 +350,29 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) | |||
356 | chip->select_chip(mtd, chipnr); | 350 | chip->select_chip(mtd, chipnr); |
357 | } | 351 | } |
358 | 352 | ||
359 | if (chip->options & NAND_BUSWIDTH_16) { | 353 | do { |
360 | chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos & 0xFE, | 354 | if (chip->options & NAND_BUSWIDTH_16) { |
361 | page); | 355 | chip->cmdfunc(mtd, NAND_CMD_READOOB, |
362 | bad = cpu_to_le16(chip->read_word(mtd)); | 356 | chip->badblockpos & 0xFE, page); |
363 | if (chip->badblockpos & 0x1) | 357 | bad = cpu_to_le16(chip->read_word(mtd)); |
364 | bad >>= 8; | 358 | if (chip->badblockpos & 0x1) |
365 | else | 359 | bad >>= 8; |
366 | bad &= 0xFF; | 360 | else |
367 | } else { | 361 | bad &= 0xFF; |
368 | chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, page); | 362 | } else { |
369 | bad = chip->read_byte(mtd); | 363 | chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos, |
370 | } | 364 | page); |
365 | bad = chip->read_byte(mtd); | ||
366 | } | ||
371 | 367 | ||
372 | if (likely(chip->badblockbits == 8)) | 368 | if (likely(chip->badblockbits == 8)) |
373 | res = bad != 0xFF; | 369 | res = bad != 0xFF; |
374 | else | 370 | else |
375 | res = hweight8(bad) < chip->badblockbits; | 371 | res = hweight8(bad) < chip->badblockbits; |
372 | ofs += mtd->writesize; | ||
373 | page = (int)(ofs >> chip->page_shift) & chip->pagemask; | ||
374 | i++; | ||
375 | } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE)); | ||
376 | 376 | ||
377 | if (getchip) | 377 | if (getchip) |
378 | nand_release_device(mtd); | 378 | nand_release_device(mtd); |
@@ -386,51 +386,79 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) | |||
386 | * @ofs: offset from device start | 386 | * @ofs: offset from device start |
387 | * | 387 | * |
388 | * This is the default implementation, which can be overridden by a hardware | 388 | * This is the default implementation, which can be overridden by a hardware |
389 | * specific driver. | 389 | * specific driver. We try operations in the following order, according to our |
390 | * bbt_options (NAND_BBT_NO_OOB_BBM and NAND_BBT_USE_FLASH): | ||
391 | * (1) erase the affected block, to allow OOB marker to be written cleanly | ||
392 | * (2) update in-memory BBT | ||
393 | * (3) write bad block marker to OOB area of affected block | ||
394 | * (4) update flash-based BBT | ||
395 | * Note that we retain the first error encountered in (3) or (4), finish the | ||
396 | * procedures, and dump the error in the end. | ||
390 | */ | 397 | */ |
391 | static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) | 398 | static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) |
392 | { | 399 | { |
393 | struct nand_chip *chip = mtd->priv; | 400 | struct nand_chip *chip = mtd->priv; |
394 | uint8_t buf[2] = { 0, 0 }; | 401 | uint8_t buf[2] = { 0, 0 }; |
395 | int block, ret, i = 0; | 402 | int block, res, ret = 0, i = 0; |
403 | int write_oob = !(chip->bbt_options & NAND_BBT_NO_OOB_BBM); | ||
396 | 404 | ||
397 | if (chip->bbt_options & NAND_BBT_SCANLASTPAGE) | 405 | if (write_oob) { |
398 | ofs += mtd->erasesize - mtd->writesize; | 406 | struct erase_info einfo; |
407 | |||
408 | /* Attempt erase before marking OOB */ | ||
409 | memset(&einfo, 0, sizeof(einfo)); | ||
410 | einfo.mtd = mtd; | ||
411 | einfo.addr = ofs; | ||
412 | einfo.len = 1 << chip->phys_erase_shift; | ||
413 | nand_erase_nand(mtd, &einfo, 0); | ||
414 | } | ||
399 | 415 | ||
400 | /* Get block number */ | 416 | /* Get block number */ |
401 | block = (int)(ofs >> chip->bbt_erase_shift); | 417 | block = (int)(ofs >> chip->bbt_erase_shift); |
418 | /* Mark block bad in memory-based BBT */ | ||
402 | if (chip->bbt) | 419 | if (chip->bbt) |
403 | chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); | 420 | chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); |
404 | 421 | ||
405 | /* Do we have a flash based bad block table? */ | 422 | /* Write bad block marker to OOB */ |
406 | if (chip->bbt_options & NAND_BBT_USE_FLASH) | 423 | if (write_oob) { |
407 | ret = nand_update_bbt(mtd, ofs); | ||
408 | else { | ||
409 | struct mtd_oob_ops ops; | 424 | struct mtd_oob_ops ops; |
425 | loff_t wr_ofs = ofs; | ||
410 | 426 | ||
411 | nand_get_device(chip, mtd, FL_WRITING); | 427 | nand_get_device(chip, mtd, FL_WRITING); |
412 | 428 | ||
413 | /* | ||
414 | * Write to first two pages if necessary. If we write to more | ||
415 | * than one location, the first error encountered quits the | ||
416 | * procedure. We write two bytes per location, so we dont have | ||
417 | * to mess with 16 bit access. | ||
418 | */ | ||
419 | ops.len = ops.ooblen = 2; | ||
420 | ops.datbuf = NULL; | 429 | ops.datbuf = NULL; |
421 | ops.oobbuf = buf; | 430 | ops.oobbuf = buf; |
422 | ops.ooboffs = chip->badblockpos & ~0x01; | 431 | ops.ooboffs = chip->badblockpos; |
432 | if (chip->options & NAND_BUSWIDTH_16) { | ||
433 | ops.ooboffs &= ~0x01; | ||
434 | ops.len = ops.ooblen = 2; | ||
435 | } else { | ||
436 | ops.len = ops.ooblen = 1; | ||
437 | } | ||
423 | ops.mode = MTD_OPS_PLACE_OOB; | 438 | ops.mode = MTD_OPS_PLACE_OOB; |
439 | |||
440 | /* Write to first/last page(s) if necessary */ | ||
441 | if (chip->bbt_options & NAND_BBT_SCANLASTPAGE) | ||
442 | wr_ofs += mtd->erasesize - mtd->writesize; | ||
424 | do { | 443 | do { |
425 | ret = nand_do_write_oob(mtd, ofs, &ops); | 444 | res = nand_do_write_oob(mtd, wr_ofs, &ops); |
445 | if (!ret) | ||
446 | ret = res; | ||
426 | 447 | ||
427 | i++; | 448 | i++; |
428 | ofs += mtd->writesize; | 449 | wr_ofs += mtd->writesize; |
429 | } while (!ret && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && | 450 | } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2); |
430 | i < 2); | ||
431 | 451 | ||
432 | nand_release_device(mtd); | 452 | nand_release_device(mtd); |
433 | } | 453 | } |
454 | |||
455 | /* Update flash-based bad block table */ | ||
456 | if (chip->bbt_options & NAND_BBT_USE_FLASH) { | ||
457 | res = nand_update_bbt(mtd, ofs); | ||
458 | if (!ret) | ||
459 | ret = res; | ||
460 | } | ||
461 | |||
434 | if (!ret) | 462 | if (!ret) |
435 | mtd->ecc_stats.badblocks++; | 463 | mtd->ecc_stats.badblocks++; |
436 | 464 | ||
@@ -1586,25 +1614,14 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
1586 | struct mtd_oob_ops ops; | 1614 | struct mtd_oob_ops ops; |
1587 | int ret; | 1615 | int ret; |
1588 | 1616 | ||
1589 | /* Do not allow reads past end of device */ | ||
1590 | if ((from + len) > mtd->size) | ||
1591 | return -EINVAL; | ||
1592 | if (!len) | ||
1593 | return 0; | ||
1594 | |||
1595 | nand_get_device(chip, mtd, FL_READING); | 1617 | nand_get_device(chip, mtd, FL_READING); |
1596 | |||
1597 | ops.len = len; | 1618 | ops.len = len; |
1598 | ops.datbuf = buf; | 1619 | ops.datbuf = buf; |
1599 | ops.oobbuf = NULL; | 1620 | ops.oobbuf = NULL; |
1600 | ops.mode = 0; | 1621 | ops.mode = 0; |
1601 | |||
1602 | ret = nand_do_read_ops(mtd, from, &ops); | 1622 | ret = nand_do_read_ops(mtd, from, &ops); |
1603 | |||
1604 | *retlen = ops.retlen; | 1623 | *retlen = ops.retlen; |
1605 | |||
1606 | nand_release_device(mtd); | 1624 | nand_release_device(mtd); |
1607 | |||
1608 | return ret; | 1625 | return ret; |
1609 | } | 1626 | } |
1610 | 1627 | ||
@@ -2293,12 +2310,6 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
2293 | struct mtd_oob_ops ops; | 2310 | struct mtd_oob_ops ops; |
2294 | int ret; | 2311 | int ret; |
2295 | 2312 | ||
2296 | /* Do not allow reads past end of device */ | ||
2297 | if ((to + len) > mtd->size) | ||
2298 | return -EINVAL; | ||
2299 | if (!len) | ||
2300 | return 0; | ||
2301 | |||
2302 | /* Wait for the device to get ready */ | 2313 | /* Wait for the device to get ready */ |
2303 | panic_nand_wait(mtd, chip, 400); | 2314 | panic_nand_wait(mtd, chip, 400); |
2304 | 2315 | ||
@@ -2333,25 +2344,14 @@ static int nand_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
2333 | struct mtd_oob_ops ops; | 2344 | struct mtd_oob_ops ops; |
2334 | int ret; | 2345 | int ret; |
2335 | 2346 | ||
2336 | /* Do not allow reads past end of device */ | ||
2337 | if ((to + len) > mtd->size) | ||
2338 | return -EINVAL; | ||
2339 | if (!len) | ||
2340 | return 0; | ||
2341 | |||
2342 | nand_get_device(chip, mtd, FL_WRITING); | 2347 | nand_get_device(chip, mtd, FL_WRITING); |
2343 | |||
2344 | ops.len = len; | 2348 | ops.len = len; |
2345 | ops.datbuf = (uint8_t *)buf; | 2349 | ops.datbuf = (uint8_t *)buf; |
2346 | ops.oobbuf = NULL; | 2350 | ops.oobbuf = NULL; |
2347 | ops.mode = 0; | 2351 | ops.mode = 0; |
2348 | |||
2349 | ret = nand_do_write_ops(mtd, to, &ops); | 2352 | ret = nand_do_write_ops(mtd, to, &ops); |
2350 | |||
2351 | *retlen = ops.retlen; | 2353 | *retlen = ops.retlen; |
2352 | |||
2353 | nand_release_device(mtd); | 2354 | nand_release_device(mtd); |
2354 | |||
2355 | return ret; | 2355 | return ret; |
2356 | } | 2356 | } |
2357 | 2357 | ||
@@ -2550,8 +2550,6 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, | |||
2550 | if (check_offs_len(mtd, instr->addr, instr->len)) | 2550 | if (check_offs_len(mtd, instr->addr, instr->len)) |
2551 | return -EINVAL; | 2551 | return -EINVAL; |
2552 | 2552 | ||
2553 | instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; | ||
2554 | |||
2555 | /* Grab the lock and see if the device is available */ | 2553 | /* Grab the lock and see if the device is available */ |
2556 | nand_get_device(chip, mtd, FL_ERASING); | 2554 | nand_get_device(chip, mtd, FL_ERASING); |
2557 | 2555 | ||
@@ -2715,10 +2713,6 @@ static void nand_sync(struct mtd_info *mtd) | |||
2715 | */ | 2713 | */ |
2716 | static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) | 2714 | static int nand_block_isbad(struct mtd_info *mtd, loff_t offs) |
2717 | { | 2715 | { |
2718 | /* Check for invalid offset */ | ||
2719 | if (offs > mtd->size) | ||
2720 | return -EINVAL; | ||
2721 | |||
2722 | return nand_block_checkbad(mtd, offs, 1, 0); | 2716 | return nand_block_checkbad(mtd, offs, 1, 0); |
2723 | } | 2717 | } |
2724 | 2718 | ||
@@ -2857,7 +2851,6 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, | |||
2857 | chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I') | 2851 | chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I') |
2858 | return 0; | 2852 | return 0; |
2859 | 2853 | ||
2860 | pr_info("ONFI flash detected\n"); | ||
2861 | chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1); | 2854 | chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1); |
2862 | for (i = 0; i < 3; i++) { | 2855 | for (i = 0; i < 3; i++) { |
2863 | chip->read_buf(mtd, (uint8_t *)p, sizeof(*p)); | 2856 | chip->read_buf(mtd, (uint8_t *)p, sizeof(*p)); |
@@ -2898,7 +2891,8 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, | |||
2898 | mtd->writesize = le32_to_cpu(p->byte_per_page); | 2891 | mtd->writesize = le32_to_cpu(p->byte_per_page); |
2899 | mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize; | 2892 | mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize; |
2900 | mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page); | 2893 | mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page); |
2901 | chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize; | 2894 | chip->chipsize = le32_to_cpu(p->blocks_per_lun); |
2895 | chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count; | ||
2902 | *busw = 0; | 2896 | *busw = 0; |
2903 | if (le16_to_cpu(p->features) & 1) | 2897 | if (le16_to_cpu(p->features) & 1) |
2904 | *busw = NAND_BUSWIDTH_16; | 2898 | *busw = NAND_BUSWIDTH_16; |
@@ -2907,6 +2901,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, | |||
2907 | chip->options |= (NAND_NO_READRDY | | 2901 | chip->options |= (NAND_NO_READRDY | |
2908 | NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK; | 2902 | NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK; |
2909 | 2903 | ||
2904 | pr_info("ONFI flash detected\n"); | ||
2910 | return 1; | 2905 | return 1; |
2911 | } | 2906 | } |
2912 | 2907 | ||
@@ -3238,6 +3233,10 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3238 | int i; | 3233 | int i; |
3239 | struct nand_chip *chip = mtd->priv; | 3234 | struct nand_chip *chip = mtd->priv; |
3240 | 3235 | ||
3236 | /* New bad blocks should be marked in OOB, flash-based BBT, or both */ | ||
3237 | BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && | ||
3238 | !(chip->bbt_options & NAND_BBT_USE_FLASH)); | ||
3239 | |||
3241 | if (!(chip->options & NAND_OWN_BUFFERS)) | 3240 | if (!(chip->options & NAND_OWN_BUFFERS)) |
3242 | chip->buffers = kmalloc(sizeof(*chip->buffers), GFP_KERNEL); | 3241 | chip->buffers = kmalloc(sizeof(*chip->buffers), GFP_KERNEL); |
3243 | if (!chip->buffers) | 3242 | if (!chip->buffers) |
@@ -3350,6 +3349,7 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3350 | if (!chip->ecc.size) | 3349 | if (!chip->ecc.size) |
3351 | chip->ecc.size = 256; | 3350 | chip->ecc.size = 256; |
3352 | chip->ecc.bytes = 3; | 3351 | chip->ecc.bytes = 3; |
3352 | chip->ecc.strength = 1; | ||
3353 | break; | 3353 | break; |
3354 | 3354 | ||
3355 | case NAND_ECC_SOFT_BCH: | 3355 | case NAND_ECC_SOFT_BCH: |
@@ -3384,6 +3384,8 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3384 | pr_warn("BCH ECC initialization failed!\n"); | 3384 | pr_warn("BCH ECC initialization failed!\n"); |
3385 | BUG(); | 3385 | BUG(); |
3386 | } | 3386 | } |
3387 | chip->ecc.strength = | ||
3388 | chip->ecc.bytes*8 / fls(8*chip->ecc.size); | ||
3387 | break; | 3389 | break; |
3388 | 3390 | ||
3389 | case NAND_ECC_NONE: | 3391 | case NAND_ECC_NONE: |
@@ -3397,6 +3399,7 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3397 | chip->ecc.write_oob = nand_write_oob_std; | 3399 | chip->ecc.write_oob = nand_write_oob_std; |
3398 | chip->ecc.size = mtd->writesize; | 3400 | chip->ecc.size = mtd->writesize; |
3399 | chip->ecc.bytes = 0; | 3401 | chip->ecc.bytes = 0; |
3402 | chip->ecc.strength = 0; | ||
3400 | break; | 3403 | break; |
3401 | 3404 | ||
3402 | default: | 3405 | default: |
@@ -3461,25 +3464,26 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
3461 | mtd->type = MTD_NANDFLASH; | 3464 | mtd->type = MTD_NANDFLASH; |
3462 | mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM : | 3465 | mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM : |
3463 | MTD_CAP_NANDFLASH; | 3466 | MTD_CAP_NANDFLASH; |
3464 | mtd->erase = nand_erase; | 3467 | mtd->_erase = nand_erase; |
3465 | mtd->point = NULL; | 3468 | mtd->_point = NULL; |
3466 | mtd->unpoint = NULL; | 3469 | mtd->_unpoint = NULL; |
3467 | mtd->read = nand_read; | 3470 | mtd->_read = nand_read; |
3468 | mtd->write = nand_write; | 3471 | mtd->_write = nand_write; |
3469 | mtd->panic_write = panic_nand_write; | 3472 | mtd->_panic_write = panic_nand_write; |
3470 | mtd->read_oob = nand_read_oob; | 3473 | mtd->_read_oob = nand_read_oob; |
3471 | mtd->write_oob = nand_write_oob; | 3474 | mtd->_write_oob = nand_write_oob; |
3472 | mtd->sync = nand_sync; | 3475 | mtd->_sync = nand_sync; |
3473 | mtd->lock = NULL; | 3476 | mtd->_lock = NULL; |
3474 | mtd->unlock = NULL; | 3477 | mtd->_unlock = NULL; |
3475 | mtd->suspend = nand_suspend; | 3478 | mtd->_suspend = nand_suspend; |
3476 | mtd->resume = nand_resume; | 3479 | mtd->_resume = nand_resume; |
3477 | mtd->block_isbad = nand_block_isbad; | 3480 | mtd->_block_isbad = nand_block_isbad; |
3478 | mtd->block_markbad = nand_block_markbad; | 3481 | mtd->_block_markbad = nand_block_markbad; |
3479 | mtd->writebufsize = mtd->writesize; | 3482 | mtd->writebufsize = mtd->writesize; |
3480 | 3483 | ||
3481 | /* propagate ecc.layout to mtd_info */ | 3484 | /* propagate ecc info to mtd_info */ |
3482 | mtd->ecclayout = chip->ecc.layout; | 3485 | mtd->ecclayout = chip->ecc.layout; |
3486 | mtd->ecc_strength = chip->ecc.strength * chip->ecc.steps; | ||
3483 | 3487 | ||
3484 | /* Check, if we should skip the bad block table scan */ | 3488 | /* Check, if we should skip the bad block table scan */ |
3485 | if (chip->options & NAND_SKIP_BBTSCAN) | 3489 | if (chip->options & NAND_SKIP_BBTSCAN) |
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c index ec688548c880..2b6f632cf274 100644 --- a/drivers/mtd/nand/ndfc.c +++ b/drivers/mtd/nand/ndfc.c | |||
@@ -179,6 +179,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc, | |||
179 | chip->ecc.mode = NAND_ECC_HW; | 179 | chip->ecc.mode = NAND_ECC_HW; |
180 | chip->ecc.size = 256; | 180 | chip->ecc.size = 256; |
181 | chip->ecc.bytes = 3; | 181 | chip->ecc.bytes = 3; |
182 | chip->ecc.strength = 1; | ||
182 | chip->priv = ndfc; | 183 | chip->priv = ndfc; |
183 | 184 | ||
184 | ndfc->mtd.priv = chip; | 185 | ndfc->mtd.priv = chip; |
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index b3a883e2a22f..c2b0bba9d8b3 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
@@ -1058,6 +1058,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
1058 | (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) { | 1058 | (pdata->ecc_opt == OMAP_ECC_HAMMING_CODE_HW_ROMCODE)) { |
1059 | info->nand.ecc.bytes = 3; | 1059 | info->nand.ecc.bytes = 3; |
1060 | info->nand.ecc.size = 512; | 1060 | info->nand.ecc.size = 512; |
1061 | info->nand.ecc.strength = 1; | ||
1061 | info->nand.ecc.calculate = omap_calculate_ecc; | 1062 | info->nand.ecc.calculate = omap_calculate_ecc; |
1062 | info->nand.ecc.hwctl = omap_enable_hwecc; | 1063 | info->nand.ecc.hwctl = omap_enable_hwecc; |
1063 | info->nand.ecc.correct = omap_correct_data; | 1064 | info->nand.ecc.correct = omap_correct_data; |
@@ -1101,8 +1102,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) | |||
1101 | goto out_release_mem_region; | 1102 | goto out_release_mem_region; |
1102 | } | 1103 | } |
1103 | 1104 | ||
1104 | mtd_device_parse_register(&info->mtd, NULL, 0, | 1105 | mtd_device_parse_register(&info->mtd, NULL, NULL, pdata->parts, |
1105 | pdata->parts, pdata->nr_parts); | 1106 | pdata->nr_parts); |
1106 | 1107 | ||
1107 | platform_set_drvdata(pdev, &info->mtd); | 1108 | platform_set_drvdata(pdev, &info->mtd); |
1108 | 1109 | ||
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index 29f505adaf84..1d3bfb26080c 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c | |||
@@ -129,8 +129,8 @@ static int __init orion_nand_probe(struct platform_device *pdev) | |||
129 | } | 129 | } |
130 | 130 | ||
131 | mtd->name = "orion_nand"; | 131 | mtd->name = "orion_nand"; |
132 | ret = mtd_device_parse_register(mtd, NULL, 0, | 132 | ret = mtd_device_parse_register(mtd, NULL, NULL, board->parts, |
133 | board->parts, board->nr_parts); | 133 | board->nr_parts); |
134 | if (ret) { | 134 | if (ret) { |
135 | nand_release(mtd); | 135 | nand_release(mtd); |
136 | goto no_dev; | 136 | goto no_dev; |
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c index 7f2da6953357..6404e6e81b10 100644 --- a/drivers/mtd/nand/plat_nand.c +++ b/drivers/mtd/nand/plat_nand.c | |||
@@ -99,8 +99,9 @@ static int __devinit plat_nand_probe(struct platform_device *pdev) | |||
99 | } | 99 | } |
100 | 100 | ||
101 | err = mtd_device_parse_register(&data->mtd, | 101 | err = mtd_device_parse_register(&data->mtd, |
102 | pdata->chip.part_probe_types, 0, | 102 | pdata->chip.part_probe_types, NULL, |
103 | pdata->chip.partitions, pdata->chip.nr_partitions); | 103 | pdata->chip.partitions, |
104 | pdata->chip.nr_partitions); | ||
104 | 105 | ||
105 | if (!err) | 106 | if (!err) |
106 | return err; | 107 | return err; |
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c index 7e52af51a198..0ddd90e5788f 100644 --- a/drivers/mtd/nand/ppchameleonevb.c +++ b/drivers/mtd/nand/ppchameleonevb.c | |||
@@ -275,11 +275,10 @@ static int __init ppchameleonevb_init(void) | |||
275 | ppchameleon_mtd->name = "ppchameleon-nand"; | 275 | ppchameleon_mtd->name = "ppchameleon-nand"; |
276 | 276 | ||
277 | /* Register the partitions */ | 277 | /* Register the partitions */ |
278 | mtd_device_parse_register(ppchameleon_mtd, NULL, 0, | 278 | mtd_device_parse_register(ppchameleon_mtd, NULL, NULL, |
279 | ppchameleon_mtd->size == NAND_SMALL_SIZE ? | 279 | ppchameleon_mtd->size == NAND_SMALL_SIZE ? |
280 | partition_info_me : | 280 | partition_info_me : partition_info_hi, |
281 | partition_info_hi, | 281 | NUM_PARTITIONS); |
282 | NUM_PARTITIONS); | ||
283 | 282 | ||
284 | nand_evb_init: | 283 | nand_evb_init: |
285 | /**************************** | 284 | /**************************** |
@@ -365,11 +364,10 @@ static int __init ppchameleonevb_init(void) | |||
365 | ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME; | 364 | ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME; |
366 | 365 | ||
367 | /* Register the partitions */ | 366 | /* Register the partitions */ |
368 | mtd_device_parse_register(ppchameleonevb_mtd, NULL, 0, | 367 | mtd_device_parse_register(ppchameleonevb_mtd, NULL, NULL, |
369 | ppchameleon_mtd->size == NAND_SMALL_SIZE ? | 368 | ppchameleon_mtd->size == NAND_SMALL_SIZE ? |
370 | partition_info_me : | 369 | partition_info_me : partition_info_hi, |
371 | partition_info_hi, | 370 | NUM_PARTITIONS); |
372 | NUM_PARTITIONS); | ||
373 | 371 | ||
374 | /* Return happy */ | 372 | /* Return happy */ |
375 | return 0; | 373 | return 0; |
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 5c3d719c37e6..def50caa6f84 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c | |||
@@ -1002,6 +1002,7 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) | |||
1002 | KEEP_CONFIG: | 1002 | KEEP_CONFIG: |
1003 | chip->ecc.mode = NAND_ECC_HW; | 1003 | chip->ecc.mode = NAND_ECC_HW; |
1004 | chip->ecc.size = host->page_size; | 1004 | chip->ecc.size = host->page_size; |
1005 | chip->ecc.strength = 1; | ||
1005 | 1006 | ||
1006 | chip->options = NAND_NO_AUTOINCR; | 1007 | chip->options = NAND_NO_AUTOINCR; |
1007 | chip->options |= NAND_NO_READRDY; | 1008 | chip->options |= NAND_NO_READRDY; |
@@ -1228,8 +1229,9 @@ static int pxa3xx_nand_probe(struct platform_device *pdev) | |||
1228 | continue; | 1229 | continue; |
1229 | } | 1230 | } |
1230 | 1231 | ||
1231 | ret = mtd_device_parse_register(info->host[cs]->mtd, NULL, 0, | 1232 | ret = mtd_device_parse_register(info->host[cs]->mtd, NULL, |
1232 | pdata->parts[cs], pdata->nr_parts[cs]); | 1233 | NULL, pdata->parts[cs], |
1234 | pdata->nr_parts[cs]); | ||
1233 | if (!ret) | 1235 | if (!ret) |
1234 | probe_success = 1; | 1236 | probe_success = 1; |
1235 | } | 1237 | } |
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c index 769a4e096b3c..c2040187c813 100644 --- a/drivers/mtd/nand/r852.c +++ b/drivers/mtd/nand/r852.c | |||
@@ -891,6 +891,7 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) | |||
891 | chip->ecc.mode = NAND_ECC_HW_SYNDROME; | 891 | chip->ecc.mode = NAND_ECC_HW_SYNDROME; |
892 | chip->ecc.size = R852_DMA_LEN; | 892 | chip->ecc.size = R852_DMA_LEN; |
893 | chip->ecc.bytes = SM_OOB_SIZE; | 893 | chip->ecc.bytes = SM_OOB_SIZE; |
894 | chip->ecc.strength = 2; | ||
894 | chip->ecc.hwctl = r852_ecc_hwctl; | 895 | chip->ecc.hwctl = r852_ecc_hwctl; |
895 | chip->ecc.calculate = r852_ecc_calculate; | 896 | chip->ecc.calculate = r852_ecc_calculate; |
896 | chip->ecc.correct = r852_ecc_correct; | 897 | chip->ecc.correct = r852_ecc_correct; |
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c index f309addc2fa0..e55b5cfbe145 100644 --- a/drivers/mtd/nand/rtc_from4.c +++ b/drivers/mtd/nand/rtc_from4.c | |||
@@ -527,6 +527,7 @@ static int __init rtc_from4_init(void) | |||
527 | this->ecc.mode = NAND_ECC_HW_SYNDROME; | 527 | this->ecc.mode = NAND_ECC_HW_SYNDROME; |
528 | this->ecc.size = 512; | 528 | this->ecc.size = 512; |
529 | this->ecc.bytes = 8; | 529 | this->ecc.bytes = 8; |
530 | this->ecc.strength = 3; | ||
530 | /* return the status of extra status and ECC checks */ | 531 | /* return the status of extra status and ECC checks */ |
531 | this->errstat = rtc_from4_errstat; | 532 | this->errstat = rtc_from4_errstat; |
532 | /* set the nand_oobinfo to support FPGA H/W error detection */ | 533 | /* set the nand_oobinfo to support FPGA H/W error detection */ |
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index 868685db6712..91121f33f743 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c | |||
@@ -751,8 +751,8 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, | |||
751 | if (set) | 751 | if (set) |
752 | mtd->mtd.name = set->name; | 752 | mtd->mtd.name = set->name; |
753 | 753 | ||
754 | return mtd_device_parse_register(&mtd->mtd, NULL, 0, | 754 | return mtd_device_parse_register(&mtd->mtd, NULL, NULL, |
755 | set->partitions, set->nr_partitions); | 755 | set->partitions, set->nr_partitions); |
756 | } | 756 | } |
757 | 757 | ||
758 | /** | 758 | /** |
@@ -823,6 +823,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info, | |||
823 | chip->ecc.calculate = s3c2410_nand_calculate_ecc; | 823 | chip->ecc.calculate = s3c2410_nand_calculate_ecc; |
824 | chip->ecc.correct = s3c2410_nand_correct_data; | 824 | chip->ecc.correct = s3c2410_nand_correct_data; |
825 | chip->ecc.mode = NAND_ECC_HW; | 825 | chip->ecc.mode = NAND_ECC_HW; |
826 | chip->ecc.strength = 1; | ||
826 | 827 | ||
827 | switch (info->cpu_type) { | 828 | switch (info->cpu_type) { |
828 | case TYPE_S3C2410: | 829 | case TYPE_S3C2410: |
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index 93b1f74321c2..e9b2b260de3a 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/delay.h> | 26 | #include <linux/delay.h> |
27 | #include <linux/io.h> | 27 | #include <linux/io.h> |
28 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
29 | #include <linux/pm_runtime.h> | ||
29 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
30 | 31 | ||
31 | #include <linux/mtd/mtd.h> | 32 | #include <linux/mtd/mtd.h> |
@@ -283,7 +284,7 @@ static void write_fiforeg(struct sh_flctl *flctl, int rlen, int offset) | |||
283 | static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val) | 284 | static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val) |
284 | { | 285 | { |
285 | struct sh_flctl *flctl = mtd_to_flctl(mtd); | 286 | struct sh_flctl *flctl = mtd_to_flctl(mtd); |
286 | uint32_t flcmncr_val = readl(FLCMNCR(flctl)) & ~SEL_16BIT; | 287 | uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT; |
287 | uint32_t flcmdcr_val, addr_len_bytes = 0; | 288 | uint32_t flcmdcr_val, addr_len_bytes = 0; |
288 | 289 | ||
289 | /* Set SNAND bit if page size is 2048byte */ | 290 | /* Set SNAND bit if page size is 2048byte */ |
@@ -303,6 +304,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va | |||
303 | break; | 304 | break; |
304 | case NAND_CMD_READ0: | 305 | case NAND_CMD_READ0: |
305 | case NAND_CMD_READOOB: | 306 | case NAND_CMD_READOOB: |
307 | case NAND_CMD_RNDOUT: | ||
306 | addr_len_bytes = flctl->rw_ADRCNT; | 308 | addr_len_bytes = flctl->rw_ADRCNT; |
307 | flcmdcr_val |= CDSRC_E; | 309 | flcmdcr_val |= CDSRC_E; |
308 | if (flctl->chip.options & NAND_BUSWIDTH_16) | 310 | if (flctl->chip.options & NAND_BUSWIDTH_16) |
@@ -320,6 +322,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va | |||
320 | break; | 322 | break; |
321 | case NAND_CMD_READID: | 323 | case NAND_CMD_READID: |
322 | flcmncr_val &= ~SNAND_E; | 324 | flcmncr_val &= ~SNAND_E; |
325 | flcmdcr_val |= CDSRC_E; | ||
323 | addr_len_bytes = ADRCNT_1; | 326 | addr_len_bytes = ADRCNT_1; |
324 | break; | 327 | break; |
325 | case NAND_CMD_STATUS: | 328 | case NAND_CMD_STATUS: |
@@ -513,6 +516,8 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command, | |||
513 | struct sh_flctl *flctl = mtd_to_flctl(mtd); | 516 | struct sh_flctl *flctl = mtd_to_flctl(mtd); |
514 | uint32_t read_cmd = 0; | 517 | uint32_t read_cmd = 0; |
515 | 518 | ||
519 | pm_runtime_get_sync(&flctl->pdev->dev); | ||
520 | |||
516 | flctl->read_bytes = 0; | 521 | flctl->read_bytes = 0; |
517 | if (command != NAND_CMD_PAGEPROG) | 522 | if (command != NAND_CMD_PAGEPROG) |
518 | flctl->index = 0; | 523 | flctl->index = 0; |
@@ -525,7 +530,6 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command, | |||
525 | execmd_read_page_sector(mtd, page_addr); | 530 | execmd_read_page_sector(mtd, page_addr); |
526 | break; | 531 | break; |
527 | } | 532 | } |
528 | empty_fifo(flctl); | ||
529 | if (flctl->page_size) | 533 | if (flctl->page_size) |
530 | set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8) | 534 | set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8) |
531 | | command); | 535 | | command); |
@@ -547,7 +551,6 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command, | |||
547 | break; | 551 | break; |
548 | } | 552 | } |
549 | 553 | ||
550 | empty_fifo(flctl); | ||
551 | if (flctl->page_size) { | 554 | if (flctl->page_size) { |
552 | set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8) | 555 | set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8) |
553 | | NAND_CMD_READ0); | 556 | | NAND_CMD_READ0); |
@@ -559,15 +562,35 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command, | |||
559 | flctl->read_bytes = mtd->oobsize; | 562 | flctl->read_bytes = mtd->oobsize; |
560 | goto read_normal_exit; | 563 | goto read_normal_exit; |
561 | 564 | ||
565 | case NAND_CMD_RNDOUT: | ||
566 | if (flctl->hwecc) | ||
567 | break; | ||
568 | |||
569 | if (flctl->page_size) | ||
570 | set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8) | ||
571 | | command); | ||
572 | else | ||
573 | set_cmd_regs(mtd, command, command); | ||
574 | |||
575 | set_addr(mtd, column, 0); | ||
576 | |||
577 | flctl->read_bytes = mtd->writesize + mtd->oobsize - column; | ||
578 | goto read_normal_exit; | ||
579 | |||
562 | case NAND_CMD_READID: | 580 | case NAND_CMD_READID: |
563 | empty_fifo(flctl); | ||
564 | set_cmd_regs(mtd, command, command); | 581 | set_cmd_regs(mtd, command, command); |
565 | set_addr(mtd, 0, 0); | ||
566 | 582 | ||
567 | flctl->read_bytes = 4; | 583 | /* READID is always performed using an 8-bit bus */ |
584 | if (flctl->chip.options & NAND_BUSWIDTH_16) | ||
585 | column <<= 1; | ||
586 | set_addr(mtd, column, 0); | ||
587 | |||
588 | flctl->read_bytes = 8; | ||
568 | writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ | 589 | writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ |
590 | empty_fifo(flctl); | ||
569 | start_translation(flctl); | 591 | start_translation(flctl); |
570 | read_datareg(flctl, 0); /* read and end */ | 592 | read_fiforeg(flctl, flctl->read_bytes, 0); |
593 | wait_completion(flctl); | ||
571 | break; | 594 | break; |
572 | 595 | ||
573 | case NAND_CMD_ERASE1: | 596 | case NAND_CMD_ERASE1: |
@@ -650,29 +673,55 @@ static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command, | |||
650 | default: | 673 | default: |
651 | break; | 674 | break; |
652 | } | 675 | } |
653 | return; | 676 | goto runtime_exit; |
654 | 677 | ||
655 | read_normal_exit: | 678 | read_normal_exit: |
656 | writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ | 679 | writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */ |
680 | empty_fifo(flctl); | ||
657 | start_translation(flctl); | 681 | start_translation(flctl); |
658 | read_fiforeg(flctl, flctl->read_bytes, 0); | 682 | read_fiforeg(flctl, flctl->read_bytes, 0); |
659 | wait_completion(flctl); | 683 | wait_completion(flctl); |
684 | runtime_exit: | ||
685 | pm_runtime_put_sync(&flctl->pdev->dev); | ||
660 | return; | 686 | return; |
661 | } | 687 | } |
662 | 688 | ||
663 | static void flctl_select_chip(struct mtd_info *mtd, int chipnr) | 689 | static void flctl_select_chip(struct mtd_info *mtd, int chipnr) |
664 | { | 690 | { |
665 | struct sh_flctl *flctl = mtd_to_flctl(mtd); | 691 | struct sh_flctl *flctl = mtd_to_flctl(mtd); |
666 | uint32_t flcmncr_val = readl(FLCMNCR(flctl)); | 692 | int ret; |
667 | 693 | ||
668 | switch (chipnr) { | 694 | switch (chipnr) { |
669 | case -1: | 695 | case -1: |
670 | flcmncr_val &= ~CE0_ENABLE; | 696 | flctl->flcmncr_base &= ~CE0_ENABLE; |
671 | writel(flcmncr_val, FLCMNCR(flctl)); | 697 | |
698 | pm_runtime_get_sync(&flctl->pdev->dev); | ||
699 | writel(flctl->flcmncr_base, FLCMNCR(flctl)); | ||
700 | |||
701 | if (flctl->qos_request) { | ||
702 | dev_pm_qos_remove_request(&flctl->pm_qos); | ||
703 | flctl->qos_request = 0; | ||
704 | } | ||
705 | |||
706 | pm_runtime_put_sync(&flctl->pdev->dev); | ||
672 | break; | 707 | break; |
673 | case 0: | 708 | case 0: |
674 | flcmncr_val |= CE0_ENABLE; | 709 | flctl->flcmncr_base |= CE0_ENABLE; |
675 | writel(flcmncr_val, FLCMNCR(flctl)); | 710 | |
711 | if (!flctl->qos_request) { | ||
712 | ret = dev_pm_qos_add_request(&flctl->pdev->dev, | ||
713 | &flctl->pm_qos, 100); | ||
714 | if (ret < 0) | ||
715 | dev_err(&flctl->pdev->dev, | ||
716 | "PM QoS request failed: %d\n", ret); | ||
717 | flctl->qos_request = 1; | ||
718 | } | ||
719 | |||
720 | if (flctl->holden) { | ||
721 | pm_runtime_get_sync(&flctl->pdev->dev); | ||
722 | writel(HOLDEN, FLHOLDCR(flctl)); | ||
723 | pm_runtime_put_sync(&flctl->pdev->dev); | ||
724 | } | ||
676 | break; | 725 | break; |
677 | default: | 726 | default: |
678 | BUG(); | 727 | BUG(); |
@@ -730,11 +779,6 @@ static int flctl_verify_buf(struct mtd_info *mtd, const u_char *buf, int len) | |||
730 | return 0; | 779 | return 0; |
731 | } | 780 | } |
732 | 781 | ||
733 | static void flctl_register_init(struct sh_flctl *flctl, unsigned long val) | ||
734 | { | ||
735 | writel(val, FLCMNCR(flctl)); | ||
736 | } | ||
737 | |||
738 | static int flctl_chip_init_tail(struct mtd_info *mtd) | 782 | static int flctl_chip_init_tail(struct mtd_info *mtd) |
739 | { | 783 | { |
740 | struct sh_flctl *flctl = mtd_to_flctl(mtd); | 784 | struct sh_flctl *flctl = mtd_to_flctl(mtd); |
@@ -781,13 +825,13 @@ static int flctl_chip_init_tail(struct mtd_info *mtd) | |||
781 | 825 | ||
782 | chip->ecc.size = 512; | 826 | chip->ecc.size = 512; |
783 | chip->ecc.bytes = 10; | 827 | chip->ecc.bytes = 10; |
828 | chip->ecc.strength = 4; | ||
784 | chip->ecc.read_page = flctl_read_page_hwecc; | 829 | chip->ecc.read_page = flctl_read_page_hwecc; |
785 | chip->ecc.write_page = flctl_write_page_hwecc; | 830 | chip->ecc.write_page = flctl_write_page_hwecc; |
786 | chip->ecc.mode = NAND_ECC_HW; | 831 | chip->ecc.mode = NAND_ECC_HW; |
787 | 832 | ||
788 | /* 4 symbols ECC enabled */ | 833 | /* 4 symbols ECC enabled */ |
789 | writel(readl(FLCMNCR(flctl)) | _4ECCEN | ECCPOS2 | ECCPOS_02, | 834 | flctl->flcmncr_base |= _4ECCEN | ECCPOS2 | ECCPOS_02; |
790 | FLCMNCR(flctl)); | ||
791 | } else { | 835 | } else { |
792 | chip->ecc.mode = NAND_ECC_SOFT; | 836 | chip->ecc.mode = NAND_ECC_SOFT; |
793 | } | 837 | } |
@@ -819,13 +863,13 @@ static int __devinit flctl_probe(struct platform_device *pdev) | |||
819 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 863 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
820 | if (!res) { | 864 | if (!res) { |
821 | dev_err(&pdev->dev, "failed to get I/O memory\n"); | 865 | dev_err(&pdev->dev, "failed to get I/O memory\n"); |
822 | goto err; | 866 | goto err_iomap; |
823 | } | 867 | } |
824 | 868 | ||
825 | flctl->reg = ioremap(res->start, resource_size(res)); | 869 | flctl->reg = ioremap(res->start, resource_size(res)); |
826 | if (flctl->reg == NULL) { | 870 | if (flctl->reg == NULL) { |
827 | dev_err(&pdev->dev, "failed to remap I/O memory\n"); | 871 | dev_err(&pdev->dev, "failed to remap I/O memory\n"); |
828 | goto err; | 872 | goto err_iomap; |
829 | } | 873 | } |
830 | 874 | ||
831 | platform_set_drvdata(pdev, flctl); | 875 | platform_set_drvdata(pdev, flctl); |
@@ -833,9 +877,9 @@ static int __devinit flctl_probe(struct platform_device *pdev) | |||
833 | nand = &flctl->chip; | 877 | nand = &flctl->chip; |
834 | flctl_mtd->priv = nand; | 878 | flctl_mtd->priv = nand; |
835 | flctl->pdev = pdev; | 879 | flctl->pdev = pdev; |
880 | flctl->flcmncr_base = pdata->flcmncr_val; | ||
836 | flctl->hwecc = pdata->has_hwecc; | 881 | flctl->hwecc = pdata->has_hwecc; |
837 | 882 | flctl->holden = pdata->use_holden; | |
838 | flctl_register_init(flctl, pdata->flcmncr_val); | ||
839 | 883 | ||
840 | nand->options = NAND_NO_AUTOINCR; | 884 | nand->options = NAND_NO_AUTOINCR; |
841 | 885 | ||
@@ -855,23 +899,28 @@ static int __devinit flctl_probe(struct platform_device *pdev) | |||
855 | nand->read_word = flctl_read_word; | 899 | nand->read_word = flctl_read_word; |
856 | } | 900 | } |
857 | 901 | ||
902 | pm_runtime_enable(&pdev->dev); | ||
903 | pm_runtime_resume(&pdev->dev); | ||
904 | |||
858 | ret = nand_scan_ident(flctl_mtd, 1, NULL); | 905 | ret = nand_scan_ident(flctl_mtd, 1, NULL); |
859 | if (ret) | 906 | if (ret) |
860 | goto err; | 907 | goto err_chip; |
861 | 908 | ||
862 | ret = flctl_chip_init_tail(flctl_mtd); | 909 | ret = flctl_chip_init_tail(flctl_mtd); |
863 | if (ret) | 910 | if (ret) |
864 | goto err; | 911 | goto err_chip; |
865 | 912 | ||
866 | ret = nand_scan_tail(flctl_mtd); | 913 | ret = nand_scan_tail(flctl_mtd); |
867 | if (ret) | 914 | if (ret) |
868 | goto err; | 915 | goto err_chip; |
869 | 916 | ||
870 | mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts); | 917 | mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts); |
871 | 918 | ||
872 | return 0; | 919 | return 0; |
873 | 920 | ||
874 | err: | 921 | err_chip: |
922 | pm_runtime_disable(&pdev->dev); | ||
923 | err_iomap: | ||
875 | kfree(flctl); | 924 | kfree(flctl); |
876 | return ret; | 925 | return ret; |
877 | } | 926 | } |
@@ -881,6 +930,7 @@ static int __devexit flctl_remove(struct platform_device *pdev) | |||
881 | struct sh_flctl *flctl = platform_get_drvdata(pdev); | 930 | struct sh_flctl *flctl = platform_get_drvdata(pdev); |
882 | 931 | ||
883 | nand_release(&flctl->mtd); | 932 | nand_release(&flctl->mtd); |
933 | pm_runtime_disable(&pdev->dev); | ||
884 | kfree(flctl); | 934 | kfree(flctl); |
885 | 935 | ||
886 | return 0; | 936 | return 0; |
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c index b175c0fd8b93..3421e3762a5a 100644 --- a/drivers/mtd/nand/sharpsl.c +++ b/drivers/mtd/nand/sharpsl.c | |||
@@ -167,6 +167,7 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev) | |||
167 | this->ecc.mode = NAND_ECC_HW; | 167 | this->ecc.mode = NAND_ECC_HW; |
168 | this->ecc.size = 256; | 168 | this->ecc.size = 256; |
169 | this->ecc.bytes = 3; | 169 | this->ecc.bytes = 3; |
170 | this->ecc.strength = 1; | ||
170 | this->badblock_pattern = data->badblock_pattern; | 171 | this->badblock_pattern = data->badblock_pattern; |
171 | this->ecc.layout = data->ecc_layout; | 172 | this->ecc.layout = data->ecc_layout; |
172 | this->ecc.hwctl = sharpsl_nand_enable_hwecc; | 173 | this->ecc.hwctl = sharpsl_nand_enable_hwecc; |
@@ -181,8 +182,8 @@ static int __devinit sharpsl_nand_probe(struct platform_device *pdev) | |||
181 | /* Register the partitions */ | 182 | /* Register the partitions */ |
182 | sharpsl->mtd.name = "sharpsl-nand"; | 183 | sharpsl->mtd.name = "sharpsl-nand"; |
183 | 184 | ||
184 | err = mtd_device_parse_register(&sharpsl->mtd, NULL, 0, | 185 | err = mtd_device_parse_register(&sharpsl->mtd, NULL, NULL, |
185 | data->partitions, data->nr_partitions); | 186 | data->partitions, data->nr_partitions); |
186 | if (err) | 187 | if (err) |
187 | goto err_add; | 188 | goto err_add; |
188 | 189 | ||
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c index 6caa0cd9d6a7..5aa518081c51 100644 --- a/drivers/mtd/nand/tmio_nand.c +++ b/drivers/mtd/nand/tmio_nand.c | |||
@@ -430,6 +430,7 @@ static int tmio_probe(struct platform_device *dev) | |||
430 | nand_chip->ecc.mode = NAND_ECC_HW; | 430 | nand_chip->ecc.mode = NAND_ECC_HW; |
431 | nand_chip->ecc.size = 512; | 431 | nand_chip->ecc.size = 512; |
432 | nand_chip->ecc.bytes = 6; | 432 | nand_chip->ecc.bytes = 6; |
433 | nand_chip->ecc.strength = 2; | ||
433 | nand_chip->ecc.hwctl = tmio_nand_enable_hwecc; | 434 | nand_chip->ecc.hwctl = tmio_nand_enable_hwecc; |
434 | nand_chip->ecc.calculate = tmio_nand_calculate_ecc; | 435 | nand_chip->ecc.calculate = tmio_nand_calculate_ecc; |
435 | nand_chip->ecc.correct = tmio_nand_correct_data; | 436 | nand_chip->ecc.correct = tmio_nand_correct_data; |
@@ -456,9 +457,9 @@ static int tmio_probe(struct platform_device *dev) | |||
456 | goto err_scan; | 457 | goto err_scan; |
457 | } | 458 | } |
458 | /* Register the partitions */ | 459 | /* Register the partitions */ |
459 | retval = mtd_device_parse_register(mtd, NULL, 0, | 460 | retval = mtd_device_parse_register(mtd, NULL, NULL, |
460 | data ? data->partition : NULL, | 461 | data ? data->partition : NULL, |
461 | data ? data->num_partitions : 0); | 462 | data ? data->num_partitions : 0); |
462 | if (!retval) | 463 | if (!retval) |
463 | return retval; | 464 | return retval; |
464 | 465 | ||
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c index c7c4f1d11c77..26398dcf21cf 100644 --- a/drivers/mtd/nand/txx9ndfmc.c +++ b/drivers/mtd/nand/txx9ndfmc.c | |||
@@ -356,6 +356,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev) | |||
356 | /* txx9ndfmc_nand_scan will overwrite ecc.size and ecc.bytes */ | 356 | /* txx9ndfmc_nand_scan will overwrite ecc.size and ecc.bytes */ |
357 | chip->ecc.size = 256; | 357 | chip->ecc.size = 256; |
358 | chip->ecc.bytes = 3; | 358 | chip->ecc.bytes = 3; |
359 | chip->ecc.strength = 1; | ||
359 | chip->chip_delay = 100; | 360 | chip->chip_delay = 100; |
360 | chip->controller = &drvdata->hw_control; | 361 | chip->controller = &drvdata->hw_control; |
361 | 362 | ||
@@ -386,7 +387,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev) | |||
386 | } | 387 | } |
387 | mtd->name = txx9_priv->mtdname; | 388 | mtd->name = txx9_priv->mtdname; |
388 | 389 | ||
389 | mtd_device_parse_register(mtd, NULL, 0, NULL, 0); | 390 | mtd_device_parse_register(mtd, NULL, NULL, NULL, 0); |
390 | drvdata->mtds[i] = mtd; | 391 | drvdata->mtds[i] = mtd; |
391 | } | 392 | } |
392 | 393 | ||
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c index a75382aff5f6..c5f4ebf4b384 100644 --- a/drivers/mtd/nftlcore.c +++ b/drivers/mtd/nftlcore.c | |||
@@ -56,13 +56,6 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |||
56 | if (memcmp(mtd->name, "DiskOnChip", 10)) | 56 | if (memcmp(mtd->name, "DiskOnChip", 10)) |
57 | return; | 57 | return; |
58 | 58 | ||
59 | if (!mtd_can_have_bb(mtd)) { | ||
60 | printk(KERN_ERR | ||
61 | "NFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n" | ||
62 | "Please use the new diskonchip driver under the NAND subsystem.\n"); | ||
63 | return; | ||
64 | } | ||
65 | |||
66 | pr_debug("NFTL: add_mtd for %s\n", mtd->name); | 59 | pr_debug("NFTL: add_mtd for %s\n", mtd->name); |
67 | 60 | ||
68 | nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL); | 61 | nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL); |
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c index 0ccd5bff2544..1c4f97c63e62 100644 --- a/drivers/mtd/onenand/generic.c +++ b/drivers/mtd/onenand/generic.c | |||
@@ -70,9 +70,9 @@ static int __devinit generic_onenand_probe(struct platform_device *pdev) | |||
70 | goto out_iounmap; | 70 | goto out_iounmap; |
71 | } | 71 | } |
72 | 72 | ||
73 | err = mtd_device_parse_register(&info->mtd, NULL, 0, | 73 | err = mtd_device_parse_register(&info->mtd, NULL, NULL, |
74 | pdata ? pdata->parts : NULL, | 74 | pdata ? pdata->parts : NULL, |
75 | pdata ? pdata->nr_parts : 0); | 75 | pdata ? pdata->nr_parts : 0); |
76 | 76 | ||
77 | platform_set_drvdata(pdev, info); | 77 | platform_set_drvdata(pdev, info); |
78 | 78 | ||
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index 7e9ea6852b67..398a82783848 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c | |||
@@ -751,9 +751,9 @@ static int __devinit omap2_onenand_probe(struct platform_device *pdev) | |||
751 | if ((r = onenand_scan(&c->mtd, 1)) < 0) | 751 | if ((r = onenand_scan(&c->mtd, 1)) < 0) |
752 | goto err_release_regulator; | 752 | goto err_release_regulator; |
753 | 753 | ||
754 | r = mtd_device_parse_register(&c->mtd, NULL, 0, | 754 | r = mtd_device_parse_register(&c->mtd, NULL, NULL, |
755 | pdata ? pdata->parts : NULL, | 755 | pdata ? pdata->parts : NULL, |
756 | pdata ? pdata->nr_parts : 0); | 756 | pdata ? pdata->nr_parts : 0); |
757 | if (r) | 757 | if (r) |
758 | goto err_release_onenand; | 758 | goto err_release_onenand; |
759 | 759 | ||
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index a061bc163da2..b3ce12ef359e 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c | |||
@@ -1753,16 +1753,6 @@ static int onenand_panic_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
1753 | pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to, | 1753 | pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to, |
1754 | (int)len); | 1754 | (int)len); |
1755 | 1755 | ||
1756 | /* Initialize retlen, in case of early exit */ | ||
1757 | *retlen = 0; | ||
1758 | |||
1759 | /* Do not allow writes past end of device */ | ||
1760 | if (unlikely((to + len) > mtd->size)) { | ||
1761 | printk(KERN_ERR "%s: Attempt write to past end of device\n", | ||
1762 | __func__); | ||
1763 | return -EINVAL; | ||
1764 | } | ||
1765 | |||
1766 | /* Reject writes, which are not page aligned */ | 1756 | /* Reject writes, which are not page aligned */ |
1767 | if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { | 1757 | if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { |
1768 | printk(KERN_ERR "%s: Attempt to write not page aligned data\n", | 1758 | printk(KERN_ERR "%s: Attempt to write not page aligned data\n", |
@@ -1890,13 +1880,6 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to, | |||
1890 | ops->retlen = 0; | 1880 | ops->retlen = 0; |
1891 | ops->oobretlen = 0; | 1881 | ops->oobretlen = 0; |
1892 | 1882 | ||
1893 | /* Do not allow writes past end of device */ | ||
1894 | if (unlikely((to + len) > mtd->size)) { | ||
1895 | printk(KERN_ERR "%s: Attempt write to past end of device\n", | ||
1896 | __func__); | ||
1897 | return -EINVAL; | ||
1898 | } | ||
1899 | |||
1900 | /* Reject writes, which are not page aligned */ | 1883 | /* Reject writes, which are not page aligned */ |
1901 | if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { | 1884 | if (unlikely(NOTALIGNED(to) || NOTALIGNED(len))) { |
1902 | printk(KERN_ERR "%s: Attempt to write not page aligned data\n", | 1885 | printk(KERN_ERR "%s: Attempt to write not page aligned data\n", |
@@ -2493,12 +2476,6 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
2493 | (unsigned long long)instr->addr, | 2476 | (unsigned long long)instr->addr, |
2494 | (unsigned long long)instr->len); | 2477 | (unsigned long long)instr->len); |
2495 | 2478 | ||
2496 | /* Do not allow erase past end of device */ | ||
2497 | if (unlikely((len + addr) > mtd->size)) { | ||
2498 | printk(KERN_ERR "%s: Erase past end of device\n", __func__); | ||
2499 | return -EINVAL; | ||
2500 | } | ||
2501 | |||
2502 | if (FLEXONENAND(this)) { | 2479 | if (FLEXONENAND(this)) { |
2503 | /* Find the eraseregion of this address */ | 2480 | /* Find the eraseregion of this address */ |
2504 | int i = flexonenand_region(mtd, addr); | 2481 | int i = flexonenand_region(mtd, addr); |
@@ -2525,8 +2502,6 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
2525 | return -EINVAL; | 2502 | return -EINVAL; |
2526 | } | 2503 | } |
2527 | 2504 | ||
2528 | instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; | ||
2529 | |||
2530 | /* Grab the lock and see if the device is available */ | 2505 | /* Grab the lock and see if the device is available */ |
2531 | onenand_get_device(mtd, FL_ERASING); | 2506 | onenand_get_device(mtd, FL_ERASING); |
2532 | 2507 | ||
@@ -4103,33 +4078,34 @@ int onenand_scan(struct mtd_info *mtd, int maxchips) | |||
4103 | mtd->oobavail = this->ecclayout->oobavail; | 4078 | mtd->oobavail = this->ecclayout->oobavail; |
4104 | 4079 | ||
4105 | mtd->ecclayout = this->ecclayout; | 4080 | mtd->ecclayout = this->ecclayout; |
4081 | mtd->ecc_strength = 1; | ||
4106 | 4082 | ||
4107 | /* Fill in remaining MTD driver data */ | 4083 | /* Fill in remaining MTD driver data */ |
4108 | mtd->type = ONENAND_IS_MLC(this) ? MTD_MLCNANDFLASH : MTD_NANDFLASH; | 4084 | mtd->type = ONENAND_IS_MLC(this) ? MTD_MLCNANDFLASH : MTD_NANDFLASH; |
4109 | mtd->flags = MTD_CAP_NANDFLASH; | 4085 | mtd->flags = MTD_CAP_NANDFLASH; |
4110 | mtd->erase = onenand_erase; | 4086 | mtd->_erase = onenand_erase; |
4111 | mtd->point = NULL; | 4087 | mtd->_point = NULL; |
4112 | mtd->unpoint = NULL; | 4088 | mtd->_unpoint = NULL; |
4113 | mtd->read = onenand_read; | 4089 | mtd->_read = onenand_read; |
4114 | mtd->write = onenand_write; | 4090 | mtd->_write = onenand_write; |
4115 | mtd->read_oob = onenand_read_oob; | 4091 | mtd->_read_oob = onenand_read_oob; |
4116 | mtd->write_oob = onenand_write_oob; | 4092 | mtd->_write_oob = onenand_write_oob; |
4117 | mtd->panic_write = onenand_panic_write; | 4093 | mtd->_panic_write = onenand_panic_write; |
4118 | #ifdef CONFIG_MTD_ONENAND_OTP | 4094 | #ifdef CONFIG_MTD_ONENAND_OTP |
4119 | mtd->get_fact_prot_info = onenand_get_fact_prot_info; | 4095 | mtd->_get_fact_prot_info = onenand_get_fact_prot_info; |
4120 | mtd->read_fact_prot_reg = onenand_read_fact_prot_reg; | 4096 | mtd->_read_fact_prot_reg = onenand_read_fact_prot_reg; |
4121 | mtd->get_user_prot_info = onenand_get_user_prot_info; | 4097 | mtd->_get_user_prot_info = onenand_get_user_prot_info; |
4122 | mtd->read_user_prot_reg = onenand_read_user_prot_reg; | 4098 | mtd->_read_user_prot_reg = onenand_read_user_prot_reg; |
4123 | mtd->write_user_prot_reg = onenand_write_user_prot_reg; | 4099 | mtd->_write_user_prot_reg = onenand_write_user_prot_reg; |
4124 | mtd->lock_user_prot_reg = onenand_lock_user_prot_reg; | 4100 | mtd->_lock_user_prot_reg = onenand_lock_user_prot_reg; |
4125 | #endif | 4101 | #endif |
4126 | mtd->sync = onenand_sync; | 4102 | mtd->_sync = onenand_sync; |
4127 | mtd->lock = onenand_lock; | 4103 | mtd->_lock = onenand_lock; |
4128 | mtd->unlock = onenand_unlock; | 4104 | mtd->_unlock = onenand_unlock; |
4129 | mtd->suspend = onenand_suspend; | 4105 | mtd->_suspend = onenand_suspend; |
4130 | mtd->resume = onenand_resume; | 4106 | mtd->_resume = onenand_resume; |
4131 | mtd->block_isbad = onenand_block_isbad; | 4107 | mtd->_block_isbad = onenand_block_isbad; |
4132 | mtd->block_markbad = onenand_block_markbad; | 4108 | mtd->_block_markbad = onenand_block_markbad; |
4133 | mtd->owner = THIS_MODULE; | 4109 | mtd->owner = THIS_MODULE; |
4134 | mtd->writebufsize = mtd->writesize; | 4110 | mtd->writebufsize = mtd->writesize; |
4135 | 4111 | ||
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c index fa1ee43f735b..8e4b3f2742ba 100644 --- a/drivers/mtd/onenand/samsung.c +++ b/drivers/mtd/onenand/samsung.c | |||
@@ -923,7 +923,7 @@ static int s3c_onenand_probe(struct platform_device *pdev) | |||
923 | r = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 923 | r = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
924 | if (!r) { | 924 | if (!r) { |
925 | dev_err(&pdev->dev, "no buffer memory resource defined\n"); | 925 | dev_err(&pdev->dev, "no buffer memory resource defined\n"); |
926 | return -ENOENT; | 926 | err = -ENOENT; |
927 | goto ahb_resource_failed; | 927 | goto ahb_resource_failed; |
928 | } | 928 | } |
929 | 929 | ||
@@ -964,7 +964,7 @@ static int s3c_onenand_probe(struct platform_device *pdev) | |||
964 | r = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 964 | r = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
965 | if (!r) { | 965 | if (!r) { |
966 | dev_err(&pdev->dev, "no dma memory resource defined\n"); | 966 | dev_err(&pdev->dev, "no dma memory resource defined\n"); |
967 | return -ENOENT; | 967 | err = -ENOENT; |
968 | goto dma_resource_failed; | 968 | goto dma_resource_failed; |
969 | } | 969 | } |
970 | 970 | ||
@@ -1014,7 +1014,7 @@ static int s3c_onenand_probe(struct platform_device *pdev) | |||
1014 | if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ) | 1014 | if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ) |
1015 | dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n"); | 1015 | dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n"); |
1016 | 1016 | ||
1017 | err = mtd_device_parse_register(mtd, NULL, 0, | 1017 | err = mtd_device_parse_register(mtd, NULL, NULL, |
1018 | pdata ? pdata->parts : NULL, | 1018 | pdata ? pdata->parts : NULL, |
1019 | pdata ? pdata->nr_parts : 0); | 1019 | pdata ? pdata->nr_parts : 0); |
1020 | 1020 | ||
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c index 48970c14beff..580035c803d6 100644 --- a/drivers/mtd/redboot.c +++ b/drivers/mtd/redboot.c | |||
@@ -78,8 +78,7 @@ static int parse_redboot_partitions(struct mtd_info *master, | |||
78 | 78 | ||
79 | if ( directory < 0 ) { | 79 | if ( directory < 0 ) { |
80 | offset = master->size + directory * master->erasesize; | 80 | offset = master->size + directory * master->erasesize; |
81 | while (mtd_can_have_bb(master) && | 81 | while (mtd_block_isbad(master, offset)) { |
82 | mtd_block_isbad(master, offset)) { | ||
83 | if (!offset) { | 82 | if (!offset) { |
84 | nogood: | 83 | nogood: |
85 | printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n"); | 84 | printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n"); |
@@ -89,8 +88,7 @@ static int parse_redboot_partitions(struct mtd_info *master, | |||
89 | } | 88 | } |
90 | } else { | 89 | } else { |
91 | offset = directory * master->erasesize; | 90 | offset = directory * master->erasesize; |
92 | while (mtd_can_have_bb(master) && | 91 | while (mtd_block_isbad(master, offset)) { |
93 | mtd_block_isbad(master, offset)) { | ||
94 | offset += master->erasesize; | 92 | offset += master->erasesize; |
95 | if (offset == master->size) | 93 | if (offset == master->size) |
96 | goto nogood; | 94 | goto nogood; |
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index 072ed5970e2f..9e2dfd517aa5 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c | |||
@@ -1256,7 +1256,7 @@ static void sm_remove_dev(struct mtd_blktrans_dev *dev) | |||
1256 | 1256 | ||
1257 | static struct mtd_blktrans_ops sm_ftl_ops = { | 1257 | static struct mtd_blktrans_ops sm_ftl_ops = { |
1258 | .name = "smblk", | 1258 | .name = "smblk", |
1259 | .major = -1, | 1259 | .major = 0, |
1260 | .part_bits = SM_FTL_PARTN_BITS, | 1260 | .part_bits = SM_FTL_PARTN_BITS, |
1261 | .blksize = SM_SECTOR_SIZE, | 1261 | .blksize = SM_SECTOR_SIZE, |
1262 | .getgeo = sm_getgeo, | 1262 | .getgeo = sm_getgeo, |
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c index 941bc3c05d6e..90b98822d9a4 100644 --- a/drivers/mtd/ubi/gluebi.c +++ b/drivers/mtd/ubi/gluebi.c | |||
@@ -174,11 +174,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
174 | int err = 0, lnum, offs, total_read; | 174 | int err = 0, lnum, offs, total_read; |
175 | struct gluebi_device *gluebi; | 175 | struct gluebi_device *gluebi; |
176 | 176 | ||
177 | if (len < 0 || from < 0 || from + len > mtd->size) | ||
178 | return -EINVAL; | ||
179 | |||
180 | gluebi = container_of(mtd, struct gluebi_device, mtd); | 177 | gluebi = container_of(mtd, struct gluebi_device, mtd); |
181 | |||
182 | lnum = div_u64_rem(from, mtd->erasesize, &offs); | 178 | lnum = div_u64_rem(from, mtd->erasesize, &offs); |
183 | total_read = len; | 179 | total_read = len; |
184 | while (total_read) { | 180 | while (total_read) { |
@@ -218,14 +214,7 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
218 | int err = 0, lnum, offs, total_written; | 214 | int err = 0, lnum, offs, total_written; |
219 | struct gluebi_device *gluebi; | 215 | struct gluebi_device *gluebi; |
220 | 216 | ||
221 | if (len < 0 || to < 0 || len + to > mtd->size) | ||
222 | return -EINVAL; | ||
223 | |||
224 | gluebi = container_of(mtd, struct gluebi_device, mtd); | 217 | gluebi = container_of(mtd, struct gluebi_device, mtd); |
225 | |||
226 | if (!(mtd->flags & MTD_WRITEABLE)) | ||
227 | return -EROFS; | ||
228 | |||
229 | lnum = div_u64_rem(to, mtd->erasesize, &offs); | 218 | lnum = div_u64_rem(to, mtd->erasesize, &offs); |
230 | 219 | ||
231 | if (len % mtd->writesize || offs % mtd->writesize) | 220 | if (len % mtd->writesize || offs % mtd->writesize) |
@@ -265,21 +254,13 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr) | |||
265 | int err, i, lnum, count; | 254 | int err, i, lnum, count; |
266 | struct gluebi_device *gluebi; | 255 | struct gluebi_device *gluebi; |
267 | 256 | ||
268 | if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize) | ||
269 | return -EINVAL; | ||
270 | if (instr->len < 0 || instr->addr + instr->len > mtd->size) | ||
271 | return -EINVAL; | ||
272 | if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd)) | 257 | if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd)) |
273 | return -EINVAL; | 258 | return -EINVAL; |
274 | 259 | ||
275 | lnum = mtd_div_by_eb(instr->addr, mtd); | 260 | lnum = mtd_div_by_eb(instr->addr, mtd); |
276 | count = mtd_div_by_eb(instr->len, mtd); | 261 | count = mtd_div_by_eb(instr->len, mtd); |
277 | |||
278 | gluebi = container_of(mtd, struct gluebi_device, mtd); | 262 | gluebi = container_of(mtd, struct gluebi_device, mtd); |
279 | 263 | ||
280 | if (!(mtd->flags & MTD_WRITEABLE)) | ||
281 | return -EROFS; | ||
282 | |||
283 | for (i = 0; i < count - 1; i++) { | 264 | for (i = 0; i < count - 1; i++) { |
284 | err = ubi_leb_unmap(gluebi->desc, lnum + i); | 265 | err = ubi_leb_unmap(gluebi->desc, lnum + i); |
285 | if (err) | 266 | if (err) |
@@ -340,11 +321,11 @@ static int gluebi_create(struct ubi_device_info *di, | |||
340 | mtd->owner = THIS_MODULE; | 321 | mtd->owner = THIS_MODULE; |
341 | mtd->writesize = di->min_io_size; | 322 | mtd->writesize = di->min_io_size; |
342 | mtd->erasesize = vi->usable_leb_size; | 323 | mtd->erasesize = vi->usable_leb_size; |
343 | mtd->read = gluebi_read; | 324 | mtd->_read = gluebi_read; |
344 | mtd->write = gluebi_write; | 325 | mtd->_write = gluebi_write; |
345 | mtd->erase = gluebi_erase; | 326 | mtd->_erase = gluebi_erase; |
346 | mtd->get_device = gluebi_get_device; | 327 | mtd->_get_device = gluebi_get_device; |
347 | mtd->put_device = gluebi_put_device; | 328 | mtd->_put_device = gluebi_put_device; |
348 | 329 | ||
349 | /* | 330 | /* |
350 | * In case of dynamic a volume, MTD device size is just volume size. In | 331 | * In case of dynamic a volume, MTD device size is just volume size. In |
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c index 26b3c23b0b6f..758148379b0e 100644 --- a/drivers/net/ethernet/sfc/mtd.c +++ b/drivers/net/ethernet/sfc/mtd.c | |||
@@ -193,7 +193,7 @@ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase) | |||
193 | erase->state = MTD_ERASE_DONE; | 193 | erase->state = MTD_ERASE_DONE; |
194 | } else { | 194 | } else { |
195 | erase->state = MTD_ERASE_FAILED; | 195 | erase->state = MTD_ERASE_FAILED; |
196 | erase->fail_addr = 0xffffffff; | 196 | erase->fail_addr = MTD_FAIL_ADDR_UNKNOWN; |
197 | } | 197 | } |
198 | mtd_erase_callback(erase); | 198 | mtd_erase_callback(erase); |
199 | return rc; | 199 | return rc; |
@@ -263,10 +263,10 @@ static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd) | |||
263 | part->mtd.owner = THIS_MODULE; | 263 | part->mtd.owner = THIS_MODULE; |
264 | part->mtd.priv = efx_mtd; | 264 | part->mtd.priv = efx_mtd; |
265 | part->mtd.name = part->name; | 265 | part->mtd.name = part->name; |
266 | part->mtd.erase = efx_mtd_erase; | 266 | part->mtd._erase = efx_mtd_erase; |
267 | part->mtd.read = efx_mtd->ops->read; | 267 | part->mtd._read = efx_mtd->ops->read; |
268 | part->mtd.write = efx_mtd->ops->write; | 268 | part->mtd._write = efx_mtd->ops->write; |
269 | part->mtd.sync = efx_mtd_sync; | 269 | part->mtd._sync = efx_mtd_sync; |
270 | 270 | ||
271 | if (mtd_device_register(&part->mtd, NULL, 0)) | 271 | if (mtd_device_register(&part->mtd, NULL, 0)) |
272 | goto fail; | 272 | goto fail; |
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c index 926d02068a14..922f146e4235 100644 --- a/fs/jffs2/acl.c +++ b/fs/jffs2/acl.c | |||
@@ -9,6 +9,8 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | |||
12 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
13 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
14 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c index 404111b016c9..2b60ce1996aa 100644 --- a/fs/jffs2/background.c +++ b/fs/jffs2/background.c | |||
@@ -10,6 +10,8 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
14 | |||
13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
14 | #include <linux/jffs2.h> | 16 | #include <linux/jffs2.h> |
15 | #include <linux/mtd/mtd.h> | 17 | #include <linux/mtd/mtd.h> |
@@ -42,12 +44,13 @@ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c) | |||
42 | 44 | ||
43 | tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index); | 45 | tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index); |
44 | if (IS_ERR(tsk)) { | 46 | if (IS_ERR(tsk)) { |
45 | printk(KERN_WARNING "fork failed for JFFS2 garbage collect thread: %ld\n", -PTR_ERR(tsk)); | 47 | pr_warn("fork failed for JFFS2 garbage collect thread: %ld\n", |
48 | -PTR_ERR(tsk)); | ||
46 | complete(&c->gc_thread_exit); | 49 | complete(&c->gc_thread_exit); |
47 | ret = PTR_ERR(tsk); | 50 | ret = PTR_ERR(tsk); |
48 | } else { | 51 | } else { |
49 | /* Wait for it... */ | 52 | /* Wait for it... */ |
50 | D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", tsk->pid)); | 53 | jffs2_dbg(1, "Garbage collect thread is pid %d\n", tsk->pid); |
51 | wait_for_completion(&c->gc_thread_start); | 54 | wait_for_completion(&c->gc_thread_start); |
52 | ret = tsk->pid; | 55 | ret = tsk->pid; |
53 | } | 56 | } |
@@ -60,7 +63,7 @@ void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c) | |||
60 | int wait = 0; | 63 | int wait = 0; |
61 | spin_lock(&c->erase_completion_lock); | 64 | spin_lock(&c->erase_completion_lock); |
62 | if (c->gc_task) { | 65 | if (c->gc_task) { |
63 | D1(printk(KERN_DEBUG "jffs2: Killing GC task %d\n", c->gc_task->pid)); | 66 | jffs2_dbg(1, "Killing GC task %d\n", c->gc_task->pid); |
64 | send_sig(SIGKILL, c->gc_task, 1); | 67 | send_sig(SIGKILL, c->gc_task, 1); |
65 | wait = 1; | 68 | wait = 1; |
66 | } | 69 | } |
@@ -90,7 +93,7 @@ static int jffs2_garbage_collect_thread(void *_c) | |||
90 | if (!jffs2_thread_should_wake(c)) { | 93 | if (!jffs2_thread_should_wake(c)) { |
91 | set_current_state (TASK_INTERRUPTIBLE); | 94 | set_current_state (TASK_INTERRUPTIBLE); |
92 | spin_unlock(&c->erase_completion_lock); | 95 | spin_unlock(&c->erase_completion_lock); |
93 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n")); | 96 | jffs2_dbg(1, "%s(): sleeping...\n", __func__); |
94 | schedule(); | 97 | schedule(); |
95 | } else | 98 | } else |
96 | spin_unlock(&c->erase_completion_lock); | 99 | spin_unlock(&c->erase_completion_lock); |
@@ -109,7 +112,7 @@ static int jffs2_garbage_collect_thread(void *_c) | |||
109 | schedule_timeout_interruptible(msecs_to_jiffies(50)); | 112 | schedule_timeout_interruptible(msecs_to_jiffies(50)); |
110 | 113 | ||
111 | if (kthread_should_stop()) { | 114 | if (kthread_should_stop()) { |
112 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): kthread_stop() called.\n")); | 115 | jffs2_dbg(1, "%s(): kthread_stop() called\n", __func__); |
113 | goto die; | 116 | goto die; |
114 | } | 117 | } |
115 | 118 | ||
@@ -126,28 +129,32 @@ static int jffs2_garbage_collect_thread(void *_c) | |||
126 | 129 | ||
127 | switch(signr) { | 130 | switch(signr) { |
128 | case SIGSTOP: | 131 | case SIGSTOP: |
129 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGSTOP received.\n")); | 132 | jffs2_dbg(1, "%s(): SIGSTOP received\n", |
133 | __func__); | ||
130 | set_current_state(TASK_STOPPED); | 134 | set_current_state(TASK_STOPPED); |
131 | schedule(); | 135 | schedule(); |
132 | break; | 136 | break; |
133 | 137 | ||
134 | case SIGKILL: | 138 | case SIGKILL: |
135 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGKILL received.\n")); | 139 | jffs2_dbg(1, "%s(): SIGKILL received\n", |
140 | __func__); | ||
136 | goto die; | 141 | goto die; |
137 | 142 | ||
138 | case SIGHUP: | 143 | case SIGHUP: |
139 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): SIGHUP received.\n")); | 144 | jffs2_dbg(1, "%s(): SIGHUP received\n", |
145 | __func__); | ||
140 | break; | 146 | break; |
141 | default: | 147 | default: |
142 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): signal %ld received\n", signr)); | 148 | jffs2_dbg(1, "%s(): signal %ld received\n", |
149 | __func__, signr); | ||
143 | } | 150 | } |
144 | } | 151 | } |
145 | /* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */ | 152 | /* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */ |
146 | disallow_signal(SIGHUP); | 153 | disallow_signal(SIGHUP); |
147 | 154 | ||
148 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): pass\n")); | 155 | jffs2_dbg(1, "%s(): pass\n", __func__); |
149 | if (jffs2_garbage_collect_pass(c) == -ENOSPC) { | 156 | if (jffs2_garbage_collect_pass(c) == -ENOSPC) { |
150 | printk(KERN_NOTICE "No space for garbage collection. Aborting GC thread\n"); | 157 | pr_notice("No space for garbage collection. Aborting GC thread\n"); |
151 | goto die; | 158 | goto die; |
152 | } | 159 | } |
153 | } | 160 | } |
diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c index 3005ec4520ad..a3750f902adc 100644 --- a/fs/jffs2/build.c +++ b/fs/jffs2/build.c | |||
@@ -10,6 +10,8 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
14 | |||
13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
14 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
15 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
@@ -307,8 +309,8 @@ static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c) | |||
307 | trying to GC to make more space. It'll be a fruitless task */ | 309 | trying to GC to make more space. It'll be a fruitless task */ |
308 | c->nospc_dirty_size = c->sector_size + (c->flash_size / 100); | 310 | c->nospc_dirty_size = c->sector_size + (c->flash_size / 100); |
309 | 311 | ||
310 | dbg_fsbuild("JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n", | 312 | dbg_fsbuild("trigger levels (size %d KiB, block size %d KiB, %d blocks)\n", |
311 | c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks); | 313 | c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks); |
312 | dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n", | 314 | dbg_fsbuild("Blocks required to allow deletion: %d (%d KiB)\n", |
313 | c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024); | 315 | c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024); |
314 | dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n", | 316 | dbg_fsbuild("Blocks required to allow writes: %d (%d KiB)\n", |
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c index 96ed3c9ec3fc..4849a4c9a0e2 100644 --- a/fs/jffs2/compr.c +++ b/fs/jffs2/compr.c | |||
@@ -12,6 +12,8 @@ | |||
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
16 | |||
15 | #include "compr.h" | 17 | #include "compr.h" |
16 | 18 | ||
17 | static DEFINE_SPINLOCK(jffs2_compressor_list_lock); | 19 | static DEFINE_SPINLOCK(jffs2_compressor_list_lock); |
@@ -79,7 +81,7 @@ static int jffs2_selected_compress(u8 compr, unsigned char *data_in, | |||
79 | 81 | ||
80 | output_buf = kmalloc(*cdatalen, GFP_KERNEL); | 82 | output_buf = kmalloc(*cdatalen, GFP_KERNEL); |
81 | if (!output_buf) { | 83 | if (!output_buf) { |
82 | printk(KERN_WARNING "JFFS2: No memory for compressor allocation. Compression failed.\n"); | 84 | pr_warn("No memory for compressor allocation. Compression failed.\n"); |
83 | return ret; | 85 | return ret; |
84 | } | 86 | } |
85 | orig_slen = *datalen; | 87 | orig_slen = *datalen; |
@@ -188,7 +190,8 @@ uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
188 | tmp_buf = kmalloc(orig_slen, GFP_KERNEL); | 190 | tmp_buf = kmalloc(orig_slen, GFP_KERNEL); |
189 | spin_lock(&jffs2_compressor_list_lock); | 191 | spin_lock(&jffs2_compressor_list_lock); |
190 | if (!tmp_buf) { | 192 | if (!tmp_buf) { |
191 | printk(KERN_WARNING "JFFS2: No memory for compressor allocation. (%d bytes)\n", orig_slen); | 193 | pr_warn("No memory for compressor allocation. (%d bytes)\n", |
194 | orig_slen); | ||
192 | continue; | 195 | continue; |
193 | } | 196 | } |
194 | else { | 197 | else { |
@@ -235,7 +238,7 @@ uint16_t jffs2_compress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
235 | cpage_out, datalen, cdatalen); | 238 | cpage_out, datalen, cdatalen); |
236 | break; | 239 | break; |
237 | default: | 240 | default: |
238 | printk(KERN_ERR "JFFS2: unknown compression mode.\n"); | 241 | pr_err("unknown compression mode\n"); |
239 | } | 242 | } |
240 | 243 | ||
241 | if (ret == JFFS2_COMPR_NONE) { | 244 | if (ret == JFFS2_COMPR_NONE) { |
@@ -277,7 +280,8 @@ int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
277 | ret = this->decompress(cdata_in, data_out, cdatalen, datalen); | 280 | ret = this->decompress(cdata_in, data_out, cdatalen, datalen); |
278 | spin_lock(&jffs2_compressor_list_lock); | 281 | spin_lock(&jffs2_compressor_list_lock); |
279 | if (ret) { | 282 | if (ret) { |
280 | printk(KERN_WARNING "Decompressor \"%s\" returned %d\n", this->name, ret); | 283 | pr_warn("Decompressor \"%s\" returned %d\n", |
284 | this->name, ret); | ||
281 | } | 285 | } |
282 | else { | 286 | else { |
283 | this->stat_decompr_blocks++; | 287 | this->stat_decompr_blocks++; |
@@ -287,7 +291,7 @@ int jffs2_decompress(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
287 | return ret; | 291 | return ret; |
288 | } | 292 | } |
289 | } | 293 | } |
290 | printk(KERN_WARNING "JFFS2 compression type 0x%02x not available.\n", comprtype); | 294 | pr_warn("compression type 0x%02x not available\n", comprtype); |
291 | spin_unlock(&jffs2_compressor_list_lock); | 295 | spin_unlock(&jffs2_compressor_list_lock); |
292 | return -EIO; | 296 | return -EIO; |
293 | } | 297 | } |
@@ -299,7 +303,7 @@ int jffs2_register_compressor(struct jffs2_compressor *comp) | |||
299 | struct jffs2_compressor *this; | 303 | struct jffs2_compressor *this; |
300 | 304 | ||
301 | if (!comp->name) { | 305 | if (!comp->name) { |
302 | printk(KERN_WARNING "NULL compressor name at registering JFFS2 compressor. Failed.\n"); | 306 | pr_warn("NULL compressor name at registering JFFS2 compressor. Failed.\n"); |
303 | return -1; | 307 | return -1; |
304 | } | 308 | } |
305 | comp->compr_buf_size=0; | 309 | comp->compr_buf_size=0; |
@@ -309,7 +313,7 @@ int jffs2_register_compressor(struct jffs2_compressor *comp) | |||
309 | comp->stat_compr_new_size=0; | 313 | comp->stat_compr_new_size=0; |
310 | comp->stat_compr_blocks=0; | 314 | comp->stat_compr_blocks=0; |
311 | comp->stat_decompr_blocks=0; | 315 | comp->stat_decompr_blocks=0; |
312 | D1(printk(KERN_DEBUG "Registering JFFS2 compressor \"%s\"\n", comp->name)); | 316 | jffs2_dbg(1, "Registering JFFS2 compressor \"%s\"\n", comp->name); |
313 | 317 | ||
314 | spin_lock(&jffs2_compressor_list_lock); | 318 | spin_lock(&jffs2_compressor_list_lock); |
315 | 319 | ||
@@ -332,15 +336,15 @@ out: | |||
332 | 336 | ||
333 | int jffs2_unregister_compressor(struct jffs2_compressor *comp) | 337 | int jffs2_unregister_compressor(struct jffs2_compressor *comp) |
334 | { | 338 | { |
335 | D2(struct jffs2_compressor *this;) | 339 | D2(struct jffs2_compressor *this); |
336 | 340 | ||
337 | D1(printk(KERN_DEBUG "Unregistering JFFS2 compressor \"%s\"\n", comp->name)); | 341 | jffs2_dbg(1, "Unregistering JFFS2 compressor \"%s\"\n", comp->name); |
338 | 342 | ||
339 | spin_lock(&jffs2_compressor_list_lock); | 343 | spin_lock(&jffs2_compressor_list_lock); |
340 | 344 | ||
341 | if (comp->usecount) { | 345 | if (comp->usecount) { |
342 | spin_unlock(&jffs2_compressor_list_lock); | 346 | spin_unlock(&jffs2_compressor_list_lock); |
343 | printk(KERN_WARNING "JFFS2: Compressor module is in use. Unregister failed.\n"); | 347 | pr_warn("Compressor module is in use. Unregister failed.\n"); |
344 | return -1; | 348 | return -1; |
345 | } | 349 | } |
346 | list_del(&comp->list); | 350 | list_del(&comp->list); |
@@ -377,17 +381,17 @@ int __init jffs2_compressors_init(void) | |||
377 | /* Setting default compression mode */ | 381 | /* Setting default compression mode */ |
378 | #ifdef CONFIG_JFFS2_CMODE_NONE | 382 | #ifdef CONFIG_JFFS2_CMODE_NONE |
379 | jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; | 383 | jffs2_compression_mode = JFFS2_COMPR_MODE_NONE; |
380 | D1(printk(KERN_INFO "JFFS2: default compression mode: none\n");) | 384 | jffs2_dbg(1, "default compression mode: none\n"); |
381 | #else | 385 | #else |
382 | #ifdef CONFIG_JFFS2_CMODE_SIZE | 386 | #ifdef CONFIG_JFFS2_CMODE_SIZE |
383 | jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE; | 387 | jffs2_compression_mode = JFFS2_COMPR_MODE_SIZE; |
384 | D1(printk(KERN_INFO "JFFS2: default compression mode: size\n");) | 388 | jffs2_dbg(1, "default compression mode: size\n"); |
385 | #else | 389 | #else |
386 | #ifdef CONFIG_JFFS2_CMODE_FAVOURLZO | 390 | #ifdef CONFIG_JFFS2_CMODE_FAVOURLZO |
387 | jffs2_compression_mode = JFFS2_COMPR_MODE_FAVOURLZO; | 391 | jffs2_compression_mode = JFFS2_COMPR_MODE_FAVOURLZO; |
388 | D1(printk(KERN_INFO "JFFS2: default compression mode: favourlzo\n");) | 392 | jffs2_dbg(1, "default compression mode: favourlzo\n"); |
389 | #else | 393 | #else |
390 | D1(printk(KERN_INFO "JFFS2: default compression mode: priority\n");) | 394 | jffs2_dbg(1, "default compression mode: priority\n"); |
391 | #endif | 395 | #endif |
392 | #endif | 396 | #endif |
393 | #endif | 397 | #endif |
diff --git a/fs/jffs2/compr_lzo.c b/fs/jffs2/compr_lzo.c index af186ee674d8..c553bd6506da 100644 --- a/fs/jffs2/compr_lzo.c +++ b/fs/jffs2/compr_lzo.c | |||
@@ -33,7 +33,6 @@ static int __init alloc_workspace(void) | |||
33 | lzo_compress_buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE)); | 33 | lzo_compress_buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE)); |
34 | 34 | ||
35 | if (!lzo_mem || !lzo_compress_buf) { | 35 | if (!lzo_mem || !lzo_compress_buf) { |
36 | printk(KERN_WARNING "Failed to allocate lzo deflate workspace\n"); | ||
37 | free_workspace(); | 36 | free_workspace(); |
38 | return -ENOMEM; | 37 | return -ENOMEM; |
39 | } | 38 | } |
diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c index 9e7cec808c4c..92e0644bf867 100644 --- a/fs/jffs2/compr_rubin.c +++ b/fs/jffs2/compr_rubin.c | |||
@@ -10,6 +10,8 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
14 | |||
13 | #include <linux/string.h> | 15 | #include <linux/string.h> |
14 | #include <linux/types.h> | 16 | #include <linux/types.h> |
15 | #include <linux/jffs2.h> | 17 | #include <linux/jffs2.h> |
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c index 5a001020c542..0b9a1e44e833 100644 --- a/fs/jffs2/compr_zlib.c +++ b/fs/jffs2/compr_zlib.c | |||
@@ -14,6 +14,8 @@ | |||
14 | #error "The userspace support got too messy and was removed. Update your mkfs.jffs2" | 14 | #error "The userspace support got too messy and was removed. Update your mkfs.jffs2" |
15 | #endif | 15 | #endif |
16 | 16 | ||
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
18 | |||
17 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
18 | #include <linux/zlib.h> | 20 | #include <linux/zlib.h> |
19 | #include <linux/zutil.h> | 21 | #include <linux/zutil.h> |
@@ -42,18 +44,18 @@ static int __init alloc_workspaces(void) | |||
42 | { | 44 | { |
43 | def_strm.workspace = vmalloc(zlib_deflate_workspacesize(MAX_WBITS, | 45 | def_strm.workspace = vmalloc(zlib_deflate_workspacesize(MAX_WBITS, |
44 | MAX_MEM_LEVEL)); | 46 | MAX_MEM_LEVEL)); |
45 | if (!def_strm.workspace) { | 47 | if (!def_strm.workspace) |
46 | printk(KERN_WARNING "Failed to allocate %d bytes for deflate workspace\n", zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL)); | ||
47 | return -ENOMEM; | 48 | return -ENOMEM; |
48 | } | 49 | |
49 | D1(printk(KERN_DEBUG "Allocated %d bytes for deflate workspace\n", zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL))); | 50 | jffs2_dbg(1, "Allocated %d bytes for deflate workspace\n", |
51 | zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL)); | ||
50 | inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); | 52 | inf_strm.workspace = vmalloc(zlib_inflate_workspacesize()); |
51 | if (!inf_strm.workspace) { | 53 | if (!inf_strm.workspace) { |
52 | printk(KERN_WARNING "Failed to allocate %d bytes for inflate workspace\n", zlib_inflate_workspacesize()); | ||
53 | vfree(def_strm.workspace); | 54 | vfree(def_strm.workspace); |
54 | return -ENOMEM; | 55 | return -ENOMEM; |
55 | } | 56 | } |
56 | D1(printk(KERN_DEBUG "Allocated %d bytes for inflate workspace\n", zlib_inflate_workspacesize())); | 57 | jffs2_dbg(1, "Allocated %d bytes for inflate workspace\n", |
58 | zlib_inflate_workspacesize()); | ||
57 | return 0; | 59 | return 0; |
58 | } | 60 | } |
59 | 61 | ||
@@ -79,7 +81,7 @@ static int jffs2_zlib_compress(unsigned char *data_in, | |||
79 | mutex_lock(&deflate_mutex); | 81 | mutex_lock(&deflate_mutex); |
80 | 82 | ||
81 | if (Z_OK != zlib_deflateInit(&def_strm, 3)) { | 83 | if (Z_OK != zlib_deflateInit(&def_strm, 3)) { |
82 | printk(KERN_WARNING "deflateInit failed\n"); | 84 | pr_warn("deflateInit failed\n"); |
83 | mutex_unlock(&deflate_mutex); | 85 | mutex_unlock(&deflate_mutex); |
84 | return -1; | 86 | return -1; |
85 | } | 87 | } |
@@ -93,13 +95,14 @@ static int jffs2_zlib_compress(unsigned char *data_in, | |||
93 | while (def_strm.total_out < *dstlen - STREAM_END_SPACE && def_strm.total_in < *sourcelen) { | 95 | while (def_strm.total_out < *dstlen - STREAM_END_SPACE && def_strm.total_in < *sourcelen) { |
94 | def_strm.avail_out = *dstlen - (def_strm.total_out + STREAM_END_SPACE); | 96 | def_strm.avail_out = *dstlen - (def_strm.total_out + STREAM_END_SPACE); |
95 | def_strm.avail_in = min((unsigned)(*sourcelen-def_strm.total_in), def_strm.avail_out); | 97 | def_strm.avail_in = min((unsigned)(*sourcelen-def_strm.total_in), def_strm.avail_out); |
96 | D1(printk(KERN_DEBUG "calling deflate with avail_in %d, avail_out %d\n", | 98 | jffs2_dbg(1, "calling deflate with avail_in %d, avail_out %d\n", |
97 | def_strm.avail_in, def_strm.avail_out)); | 99 | def_strm.avail_in, def_strm.avail_out); |
98 | ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH); | 100 | ret = zlib_deflate(&def_strm, Z_PARTIAL_FLUSH); |
99 | D1(printk(KERN_DEBUG "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n", | 101 | jffs2_dbg(1, "deflate returned with avail_in %d, avail_out %d, total_in %ld, total_out %ld\n", |
100 | def_strm.avail_in, def_strm.avail_out, def_strm.total_in, def_strm.total_out)); | 102 | def_strm.avail_in, def_strm.avail_out, |
103 | def_strm.total_in, def_strm.total_out); | ||
101 | if (ret != Z_OK) { | 104 | if (ret != Z_OK) { |
102 | D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret)); | 105 | jffs2_dbg(1, "deflate in loop returned %d\n", ret); |
103 | zlib_deflateEnd(&def_strm); | 106 | zlib_deflateEnd(&def_strm); |
104 | mutex_unlock(&deflate_mutex); | 107 | mutex_unlock(&deflate_mutex); |
105 | return -1; | 108 | return -1; |
@@ -111,20 +114,20 @@ static int jffs2_zlib_compress(unsigned char *data_in, | |||
111 | zlib_deflateEnd(&def_strm); | 114 | zlib_deflateEnd(&def_strm); |
112 | 115 | ||
113 | if (ret != Z_STREAM_END) { | 116 | if (ret != Z_STREAM_END) { |
114 | D1(printk(KERN_DEBUG "final deflate returned %d\n", ret)); | 117 | jffs2_dbg(1, "final deflate returned %d\n", ret); |
115 | ret = -1; | 118 | ret = -1; |
116 | goto out; | 119 | goto out; |
117 | } | 120 | } |
118 | 121 | ||
119 | if (def_strm.total_out >= def_strm.total_in) { | 122 | if (def_strm.total_out >= def_strm.total_in) { |
120 | D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld; failing\n", | 123 | jffs2_dbg(1, "zlib compressed %ld bytes into %ld; failing\n", |
121 | def_strm.total_in, def_strm.total_out)); | 124 | def_strm.total_in, def_strm.total_out); |
122 | ret = -1; | 125 | ret = -1; |
123 | goto out; | 126 | goto out; |
124 | } | 127 | } |
125 | 128 | ||
126 | D1(printk(KERN_DEBUG "zlib compressed %ld bytes into %ld\n", | 129 | jffs2_dbg(1, "zlib compressed %ld bytes into %ld\n", |
127 | def_strm.total_in, def_strm.total_out)); | 130 | def_strm.total_in, def_strm.total_out); |
128 | 131 | ||
129 | *dstlen = def_strm.total_out; | 132 | *dstlen = def_strm.total_out; |
130 | *sourcelen = def_strm.total_in; | 133 | *sourcelen = def_strm.total_in; |
@@ -157,18 +160,18 @@ static int jffs2_zlib_decompress(unsigned char *data_in, | |||
157 | ((data_in[0] & 0x0f) == Z_DEFLATED) && | 160 | ((data_in[0] & 0x0f) == Z_DEFLATED) && |
158 | !(((data_in[0]<<8) + data_in[1]) % 31)) { | 161 | !(((data_in[0]<<8) + data_in[1]) % 31)) { |
159 | 162 | ||
160 | D2(printk(KERN_DEBUG "inflate skipping adler32\n")); | 163 | jffs2_dbg(2, "inflate skipping adler32\n"); |
161 | wbits = -((data_in[0] >> 4) + 8); | 164 | wbits = -((data_in[0] >> 4) + 8); |
162 | inf_strm.next_in += 2; | 165 | inf_strm.next_in += 2; |
163 | inf_strm.avail_in -= 2; | 166 | inf_strm.avail_in -= 2; |
164 | } else { | 167 | } else { |
165 | /* Let this remain D1 for now -- it should never happen */ | 168 | /* Let this remain D1 for now -- it should never happen */ |
166 | D1(printk(KERN_DEBUG "inflate not skipping adler32\n")); | 169 | jffs2_dbg(1, "inflate not skipping adler32\n"); |
167 | } | 170 | } |
168 | 171 | ||
169 | 172 | ||
170 | if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) { | 173 | if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) { |
171 | printk(KERN_WARNING "inflateInit failed\n"); | 174 | pr_warn("inflateInit failed\n"); |
172 | mutex_unlock(&inflate_mutex); | 175 | mutex_unlock(&inflate_mutex); |
173 | return 1; | 176 | return 1; |
174 | } | 177 | } |
@@ -176,7 +179,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in, | |||
176 | while((ret = zlib_inflate(&inf_strm, Z_FINISH)) == Z_OK) | 179 | while((ret = zlib_inflate(&inf_strm, Z_FINISH)) == Z_OK) |
177 | ; | 180 | ; |
178 | if (ret != Z_STREAM_END) { | 181 | if (ret != Z_STREAM_END) { |
179 | printk(KERN_NOTICE "inflate returned %d\n", ret); | 182 | pr_notice("inflate returned %d\n", ret); |
180 | } | 183 | } |
181 | zlib_inflateEnd(&inf_strm); | 184 | zlib_inflateEnd(&inf_strm); |
182 | mutex_unlock(&inflate_mutex); | 185 | mutex_unlock(&inflate_mutex); |
diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c index e0b76c87a91a..1090eb64b90d 100644 --- a/fs/jffs2/debug.c +++ b/fs/jffs2/debug.c | |||
@@ -10,6 +10,8 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
14 | |||
13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
14 | #include <linux/types.h> | 16 | #include <linux/types.h> |
15 | #include <linux/pagemap.h> | 17 | #include <linux/pagemap.h> |
@@ -261,12 +263,15 @@ void __jffs2_dbg_superblock_counts(struct jffs2_sb_info *c) | |||
261 | bad += c->sector_size; | 263 | bad += c->sector_size; |
262 | } | 264 | } |
263 | 265 | ||
264 | #define check(sz) \ | 266 | #define check(sz) \ |
265 | if (sz != c->sz##_size) { \ | 267 | do { \ |
266 | printk(KERN_WARNING #sz "_size mismatch counted 0x%x, c->" #sz "_size 0x%x\n", \ | 268 | if (sz != c->sz##_size) { \ |
267 | sz, c->sz##_size); \ | 269 | pr_warn("%s_size mismatch counted 0x%x, c->%s_size 0x%x\n", \ |
268 | dump = 1; \ | 270 | #sz, sz, #sz, c->sz##_size); \ |
269 | } | 271 | dump = 1; \ |
272 | } \ | ||
273 | } while (0) | ||
274 | |||
270 | check(free); | 275 | check(free); |
271 | check(dirty); | 276 | check(dirty); |
272 | check(used); | 277 | check(used); |
@@ -274,11 +279,12 @@ void __jffs2_dbg_superblock_counts(struct jffs2_sb_info *c) | |||
274 | check(unchecked); | 279 | check(unchecked); |
275 | check(bad); | 280 | check(bad); |
276 | check(erasing); | 281 | check(erasing); |
282 | |||
277 | #undef check | 283 | #undef check |
278 | 284 | ||
279 | if (nr_counted != c->nr_blocks) { | 285 | if (nr_counted != c->nr_blocks) { |
280 | printk(KERN_WARNING "%s counted only 0x%x blocks of 0x%x. Where are the others?\n", | 286 | pr_warn("%s counted only 0x%x blocks of 0x%x. Where are the others?\n", |
281 | __func__, nr_counted, c->nr_blocks); | 287 | __func__, nr_counted, c->nr_blocks); |
282 | dump = 1; | 288 | dump = 1; |
283 | } | 289 | } |
284 | 290 | ||
diff --git a/fs/jffs2/debug.h b/fs/jffs2/debug.h index c4f8eef5ca68..4fd9be4cbc98 100644 --- a/fs/jffs2/debug.h +++ b/fs/jffs2/debug.h | |||
@@ -51,6 +51,7 @@ | |||
51 | * superseded by nicer dbg_xxx() macros... | 51 | * superseded by nicer dbg_xxx() macros... |
52 | */ | 52 | */ |
53 | #if CONFIG_JFFS2_FS_DEBUG > 0 | 53 | #if CONFIG_JFFS2_FS_DEBUG > 0 |
54 | #define DEBUG | ||
54 | #define D1(x) x | 55 | #define D1(x) x |
55 | #else | 56 | #else |
56 | #define D1(x) | 57 | #define D1(x) |
@@ -62,50 +63,33 @@ | |||
62 | #define D2(x) | 63 | #define D2(x) |
63 | #endif | 64 | #endif |
64 | 65 | ||
66 | #define jffs2_dbg(level, fmt, ...) \ | ||
67 | do { \ | ||
68 | if (CONFIG_JFFS2_FS_DEBUG >= level) \ | ||
69 | pr_debug(fmt, ##__VA_ARGS__); \ | ||
70 | } while (0) | ||
71 | |||
65 | /* The prefixes of JFFS2 messages */ | 72 | /* The prefixes of JFFS2 messages */ |
73 | #define JFFS2_DBG KERN_DEBUG | ||
66 | #define JFFS2_DBG_PREFIX "[JFFS2 DBG]" | 74 | #define JFFS2_DBG_PREFIX "[JFFS2 DBG]" |
67 | #define JFFS2_ERR_PREFIX "JFFS2 error:" | ||
68 | #define JFFS2_WARN_PREFIX "JFFS2 warning:" | ||
69 | #define JFFS2_NOTICE_PREFIX "JFFS2 notice:" | ||
70 | |||
71 | #define JFFS2_ERR KERN_ERR | ||
72 | #define JFFS2_WARN KERN_WARNING | ||
73 | #define JFFS2_NOT KERN_NOTICE | ||
74 | #define JFFS2_DBG KERN_DEBUG | ||
75 | |||
76 | #define JFFS2_DBG_MSG_PREFIX JFFS2_DBG JFFS2_DBG_PREFIX | 75 | #define JFFS2_DBG_MSG_PREFIX JFFS2_DBG JFFS2_DBG_PREFIX |
77 | #define JFFS2_ERR_MSG_PREFIX JFFS2_ERR JFFS2_ERR_PREFIX | ||
78 | #define JFFS2_WARN_MSG_PREFIX JFFS2_WARN JFFS2_WARN_PREFIX | ||
79 | #define JFFS2_NOTICE_MSG_PREFIX JFFS2_NOT JFFS2_NOTICE_PREFIX | ||
80 | 76 | ||
81 | /* JFFS2 message macros */ | 77 | /* JFFS2 message macros */ |
82 | #define JFFS2_ERROR(fmt, ...) \ | 78 | #define JFFS2_ERROR(fmt, ...) \ |
83 | do { \ | 79 | pr_err("error: (%d) %s: " fmt, \ |
84 | printk(JFFS2_ERR_MSG_PREFIX \ | 80 | task_pid_nr(current), __func__, ##__VA_ARGS__) |
85 | " (%d) %s: " fmt, task_pid_nr(current), \ | ||
86 | __func__ , ##__VA_ARGS__); \ | ||
87 | } while(0) | ||
88 | 81 | ||
89 | #define JFFS2_WARNING(fmt, ...) \ | 82 | #define JFFS2_WARNING(fmt, ...) \ |
90 | do { \ | 83 | pr_warn("warning: (%d) %s: " fmt, \ |
91 | printk(JFFS2_WARN_MSG_PREFIX \ | 84 | task_pid_nr(current), __func__, ##__VA_ARGS__) |
92 | " (%d) %s: " fmt, task_pid_nr(current), \ | ||
93 | __func__ , ##__VA_ARGS__); \ | ||
94 | } while(0) | ||
95 | 85 | ||
96 | #define JFFS2_NOTICE(fmt, ...) \ | 86 | #define JFFS2_NOTICE(fmt, ...) \ |
97 | do { \ | 87 | pr_notice("notice: (%d) %s: " fmt, \ |
98 | printk(JFFS2_NOTICE_MSG_PREFIX \ | 88 | task_pid_nr(current), __func__, ##__VA_ARGS__) |
99 | " (%d) %s: " fmt, task_pid_nr(current), \ | ||
100 | __func__ , ##__VA_ARGS__); \ | ||
101 | } while(0) | ||
102 | 89 | ||
103 | #define JFFS2_DEBUG(fmt, ...) \ | 90 | #define JFFS2_DEBUG(fmt, ...) \ |
104 | do { \ | 91 | printk(KERN_DEBUG "[JFFS2 DBG] (%d) %s: " fmt, \ |
105 | printk(JFFS2_DBG_MSG_PREFIX \ | 92 | task_pid_nr(current), __func__, ##__VA_ARGS__) |
106 | " (%d) %s: " fmt, task_pid_nr(current), \ | ||
107 | __func__ , ##__VA_ARGS__); \ | ||
108 | } while(0) | ||
109 | 93 | ||
110 | /* | 94 | /* |
111 | * We split our debugging messages on several parts, depending on the JFFS2 | 95 | * We split our debugging messages on several parts, depending on the JFFS2 |
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index 973ac5822bd7..b56018896d5e 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c | |||
@@ -10,6 +10,8 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
14 | |||
13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
14 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
15 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
@@ -79,7 +81,7 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target, | |||
79 | uint32_t ino = 0; | 81 | uint32_t ino = 0; |
80 | struct inode *inode = NULL; | 82 | struct inode *inode = NULL; |
81 | 83 | ||
82 | D1(printk(KERN_DEBUG "jffs2_lookup()\n")); | 84 | jffs2_dbg(1, "jffs2_lookup()\n"); |
83 | 85 | ||
84 | if (target->d_name.len > JFFS2_MAX_NAME_LEN) | 86 | if (target->d_name.len > JFFS2_MAX_NAME_LEN) |
85 | return ERR_PTR(-ENAMETOOLONG); | 87 | return ERR_PTR(-ENAMETOOLONG); |
@@ -103,7 +105,7 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target, | |||
103 | if (ino) { | 105 | if (ino) { |
104 | inode = jffs2_iget(dir_i->i_sb, ino); | 106 | inode = jffs2_iget(dir_i->i_sb, ino); |
105 | if (IS_ERR(inode)) | 107 | if (IS_ERR(inode)) |
106 | printk(KERN_WARNING "iget() failed for ino #%u\n", ino); | 108 | pr_warn("iget() failed for ino #%u\n", ino); |
107 | } | 109 | } |
108 | 110 | ||
109 | return d_splice_alias(inode, target); | 111 | return d_splice_alias(inode, target); |
@@ -119,21 +121,22 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
119 | struct jffs2_full_dirent *fd; | 121 | struct jffs2_full_dirent *fd; |
120 | unsigned long offset, curofs; | 122 | unsigned long offset, curofs; |
121 | 123 | ||
122 | D1(printk(KERN_DEBUG "jffs2_readdir() for dir_i #%lu\n", filp->f_path.dentry->d_inode->i_ino)); | 124 | jffs2_dbg(1, "jffs2_readdir() for dir_i #%lu\n", |
125 | filp->f_path.dentry->d_inode->i_ino); | ||
123 | 126 | ||
124 | f = JFFS2_INODE_INFO(inode); | 127 | f = JFFS2_INODE_INFO(inode); |
125 | 128 | ||
126 | offset = filp->f_pos; | 129 | offset = filp->f_pos; |
127 | 130 | ||
128 | if (offset == 0) { | 131 | if (offset == 0) { |
129 | D1(printk(KERN_DEBUG "Dirent 0: \".\", ino #%lu\n", inode->i_ino)); | 132 | jffs2_dbg(1, "Dirent 0: \".\", ino #%lu\n", inode->i_ino); |
130 | if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) | 133 | if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR) < 0) |
131 | goto out; | 134 | goto out; |
132 | offset++; | 135 | offset++; |
133 | } | 136 | } |
134 | if (offset == 1) { | 137 | if (offset == 1) { |
135 | unsigned long pino = parent_ino(filp->f_path.dentry); | 138 | unsigned long pino = parent_ino(filp->f_path.dentry); |
136 | D1(printk(KERN_DEBUG "Dirent 1: \"..\", ino #%lu\n", pino)); | 139 | jffs2_dbg(1, "Dirent 1: \"..\", ino #%lu\n", pino); |
137 | if (filldir(dirent, "..", 2, 1, pino, DT_DIR) < 0) | 140 | if (filldir(dirent, "..", 2, 1, pino, DT_DIR) < 0) |
138 | goto out; | 141 | goto out; |
139 | offset++; | 142 | offset++; |
@@ -146,16 +149,18 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
146 | curofs++; | 149 | curofs++; |
147 | /* First loop: curofs = 2; offset = 2 */ | 150 | /* First loop: curofs = 2; offset = 2 */ |
148 | if (curofs < offset) { | 151 | if (curofs < offset) { |
149 | D2(printk(KERN_DEBUG "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n", | 152 | jffs2_dbg(2, "Skipping dirent: \"%s\", ino #%u, type %d, because curofs %ld < offset %ld\n", |
150 | fd->name, fd->ino, fd->type, curofs, offset)); | 153 | fd->name, fd->ino, fd->type, curofs, offset); |
151 | continue; | 154 | continue; |
152 | } | 155 | } |
153 | if (!fd->ino) { | 156 | if (!fd->ino) { |
154 | D2(printk(KERN_DEBUG "Skipping deletion dirent \"%s\"\n", fd->name)); | 157 | jffs2_dbg(2, "Skipping deletion dirent \"%s\"\n", |
158 | fd->name); | ||
155 | offset++; | 159 | offset++; |
156 | continue; | 160 | continue; |
157 | } | 161 | } |
158 | D2(printk(KERN_DEBUG "Dirent %ld: \"%s\", ino #%u, type %d\n", offset, fd->name, fd->ino, fd->type)); | 162 | jffs2_dbg(2, "Dirent %ld: \"%s\", ino #%u, type %d\n", |
163 | offset, fd->name, fd->ino, fd->type); | ||
159 | if (filldir(dirent, fd->name, strlen(fd->name), offset, fd->ino, fd->type) < 0) | 164 | if (filldir(dirent, fd->name, strlen(fd->name), offset, fd->ino, fd->type) < 0) |
160 | break; | 165 | break; |
161 | offset++; | 166 | offset++; |
@@ -184,12 +189,12 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, | |||
184 | 189 | ||
185 | c = JFFS2_SB_INFO(dir_i->i_sb); | 190 | c = JFFS2_SB_INFO(dir_i->i_sb); |
186 | 191 | ||
187 | D1(printk(KERN_DEBUG "jffs2_create()\n")); | 192 | jffs2_dbg(1, "%s()\n", __func__); |
188 | 193 | ||
189 | inode = jffs2_new_inode(dir_i, mode, ri); | 194 | inode = jffs2_new_inode(dir_i, mode, ri); |
190 | 195 | ||
191 | if (IS_ERR(inode)) { | 196 | if (IS_ERR(inode)) { |
192 | D1(printk(KERN_DEBUG "jffs2_new_inode() failed\n")); | 197 | jffs2_dbg(1, "jffs2_new_inode() failed\n"); |
193 | jffs2_free_raw_inode(ri); | 198 | jffs2_free_raw_inode(ri); |
194 | return PTR_ERR(inode); | 199 | return PTR_ERR(inode); |
195 | } | 200 | } |
@@ -217,9 +222,9 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry, | |||
217 | 222 | ||
218 | jffs2_free_raw_inode(ri); | 223 | jffs2_free_raw_inode(ri); |
219 | 224 | ||
220 | D1(printk(KERN_DEBUG "jffs2_create: Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n", | 225 | jffs2_dbg(1, "%s(): Created ino #%lu with mode %o, nlink %d(%d). nrpages %ld\n", |
221 | inode->i_ino, inode->i_mode, inode->i_nlink, | 226 | __func__, inode->i_ino, inode->i_mode, inode->i_nlink, |
222 | f->inocache->pino_nlink, inode->i_mapping->nrpages)); | 227 | f->inocache->pino_nlink, inode->i_mapping->nrpages); |
223 | 228 | ||
224 | d_instantiate(dentry, inode); | 229 | d_instantiate(dentry, inode); |
225 | unlock_new_inode(inode); | 230 | unlock_new_inode(inode); |
@@ -362,14 +367,15 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char | |||
362 | /* We use f->target field to store the target path. */ | 367 | /* We use f->target field to store the target path. */ |
363 | f->target = kmemdup(target, targetlen + 1, GFP_KERNEL); | 368 | f->target = kmemdup(target, targetlen + 1, GFP_KERNEL); |
364 | if (!f->target) { | 369 | if (!f->target) { |
365 | printk(KERN_WARNING "Can't allocate %d bytes of memory\n", targetlen + 1); | 370 | pr_warn("Can't allocate %d bytes of memory\n", targetlen + 1); |
366 | mutex_unlock(&f->sem); | 371 | mutex_unlock(&f->sem); |
367 | jffs2_complete_reservation(c); | 372 | jffs2_complete_reservation(c); |
368 | ret = -ENOMEM; | 373 | ret = -ENOMEM; |
369 | goto fail; | 374 | goto fail; |
370 | } | 375 | } |
371 | 376 | ||
372 | D1(printk(KERN_DEBUG "jffs2_symlink: symlink's target '%s' cached\n", (char *)f->target)); | 377 | jffs2_dbg(1, "%s(): symlink's target '%s' cached\n", |
378 | __func__, (char *)f->target); | ||
373 | 379 | ||
374 | /* No data here. Only a metadata node, which will be | 380 | /* No data here. Only a metadata node, which will be |
375 | obsoleted by the first data write | 381 | obsoleted by the first data write |
@@ -856,7 +862,8 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, | |||
856 | f->inocache->pino_nlink++; | 862 | f->inocache->pino_nlink++; |
857 | mutex_unlock(&f->sem); | 863 | mutex_unlock(&f->sem); |
858 | 864 | ||
859 | printk(KERN_NOTICE "jffs2_rename(): Link succeeded, unlink failed (err %d). You now have a hard link\n", ret); | 865 | pr_notice("%s(): Link succeeded, unlink failed (err %d). You now have a hard link\n", |
866 | __func__, ret); | ||
860 | /* Might as well let the VFS know */ | 867 | /* Might as well let the VFS know */ |
861 | d_instantiate(new_dentry, old_dentry->d_inode); | 868 | d_instantiate(new_dentry, old_dentry->d_inode); |
862 | ihold(old_dentry->d_inode); | 869 | ihold(old_dentry->d_inode); |
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index eafb8d37a6fb..4a6cf289be24 100644 --- a/fs/jffs2/erase.c +++ b/fs/jffs2/erase.c | |||
@@ -10,6 +10,8 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
14 | |||
13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
14 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
15 | #include <linux/mtd/mtd.h> | 17 | #include <linux/mtd/mtd.h> |
@@ -46,11 +48,12 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, | |||
46 | #else /* Linux */ | 48 | #else /* Linux */ |
47 | struct erase_info *instr; | 49 | struct erase_info *instr; |
48 | 50 | ||
49 | D1(printk(KERN_DEBUG "jffs2_erase_block(): erase block %#08x (range %#08x-%#08x)\n", | 51 | jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n", |
50 | jeb->offset, jeb->offset, jeb->offset + c->sector_size)); | 52 | __func__, |
53 | jeb->offset, jeb->offset, jeb->offset + c->sector_size); | ||
51 | instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL); | 54 | instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL); |
52 | if (!instr) { | 55 | if (!instr) { |
53 | printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); | 56 | pr_warn("kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); |
54 | mutex_lock(&c->erase_free_sem); | 57 | mutex_lock(&c->erase_free_sem); |
55 | spin_lock(&c->erase_completion_lock); | 58 | spin_lock(&c->erase_completion_lock); |
56 | list_move(&jeb->list, &c->erase_pending_list); | 59 | list_move(&jeb->list, &c->erase_pending_list); |
@@ -69,7 +72,6 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, | |||
69 | instr->len = c->sector_size; | 72 | instr->len = c->sector_size; |
70 | instr->callback = jffs2_erase_callback; | 73 | instr->callback = jffs2_erase_callback; |
71 | instr->priv = (unsigned long)(&instr[1]); | 74 | instr->priv = (unsigned long)(&instr[1]); |
72 | instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; | ||
73 | 75 | ||
74 | ((struct erase_priv_struct *)instr->priv)->jeb = jeb; | 76 | ((struct erase_priv_struct *)instr->priv)->jeb = jeb; |
75 | ((struct erase_priv_struct *)instr->priv)->c = c; | 77 | ((struct erase_priv_struct *)instr->priv)->c = c; |
@@ -84,7 +86,8 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, | |||
84 | 86 | ||
85 | if (ret == -ENOMEM || ret == -EAGAIN) { | 87 | if (ret == -ENOMEM || ret == -EAGAIN) { |
86 | /* Erase failed immediately. Refile it on the list */ | 88 | /* Erase failed immediately. Refile it on the list */ |
87 | D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret)); | 89 | jffs2_dbg(1, "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", |
90 | jeb->offset, ret); | ||
88 | mutex_lock(&c->erase_free_sem); | 91 | mutex_lock(&c->erase_free_sem); |
89 | spin_lock(&c->erase_completion_lock); | 92 | spin_lock(&c->erase_completion_lock); |
90 | list_move(&jeb->list, &c->erase_pending_list); | 93 | list_move(&jeb->list, &c->erase_pending_list); |
@@ -97,9 +100,11 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, | |||
97 | } | 100 | } |
98 | 101 | ||
99 | if (ret == -EROFS) | 102 | if (ret == -EROFS) |
100 | printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset); | 103 | pr_warn("Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", |
104 | jeb->offset); | ||
101 | else | 105 | else |
102 | printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret); | 106 | pr_warn("Erase at 0x%08x failed immediately: errno %d\n", |
107 | jeb->offset, ret); | ||
103 | 108 | ||
104 | jffs2_erase_failed(c, jeb, bad_offset); | 109 | jffs2_erase_failed(c, jeb, bad_offset); |
105 | } | 110 | } |
@@ -125,13 +130,14 @@ int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) | |||
125 | 130 | ||
126 | work_done++; | 131 | work_done++; |
127 | if (!--count) { | 132 | if (!--count) { |
128 | D1(printk(KERN_DEBUG "Count reached. jffs2_erase_pending_blocks leaving\n")); | 133 | jffs2_dbg(1, "Count reached. jffs2_erase_pending_blocks leaving\n"); |
129 | goto done; | 134 | goto done; |
130 | } | 135 | } |
131 | 136 | ||
132 | } else if (!list_empty(&c->erase_pending_list)) { | 137 | } else if (!list_empty(&c->erase_pending_list)) { |
133 | jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list); | 138 | jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list); |
134 | D1(printk(KERN_DEBUG "Starting erase of pending block 0x%08x\n", jeb->offset)); | 139 | jffs2_dbg(1, "Starting erase of pending block 0x%08x\n", |
140 | jeb->offset); | ||
135 | list_del(&jeb->list); | 141 | list_del(&jeb->list); |
136 | c->erasing_size += c->sector_size; | 142 | c->erasing_size += c->sector_size; |
137 | c->wasted_size -= jeb->wasted_size; | 143 | c->wasted_size -= jeb->wasted_size; |
@@ -159,13 +165,13 @@ int jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) | |||
159 | spin_unlock(&c->erase_completion_lock); | 165 | spin_unlock(&c->erase_completion_lock); |
160 | mutex_unlock(&c->erase_free_sem); | 166 | mutex_unlock(&c->erase_free_sem); |
161 | done: | 167 | done: |
162 | D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n")); | 168 | jffs2_dbg(1, "jffs2_erase_pending_blocks completed\n"); |
163 | return work_done; | 169 | return work_done; |
164 | } | 170 | } |
165 | 171 | ||
166 | static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | 172 | static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) |
167 | { | 173 | { |
168 | D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset)); | 174 | jffs2_dbg(1, "Erase completed successfully at 0x%08x\n", jeb->offset); |
169 | mutex_lock(&c->erase_free_sem); | 175 | mutex_lock(&c->erase_free_sem); |
170 | spin_lock(&c->erase_completion_lock); | 176 | spin_lock(&c->erase_completion_lock); |
171 | list_move_tail(&jeb->list, &c->erase_complete_list); | 177 | list_move_tail(&jeb->list, &c->erase_complete_list); |
@@ -214,7 +220,7 @@ static void jffs2_erase_callback(struct erase_info *instr) | |||
214 | struct erase_priv_struct *priv = (void *)instr->priv; | 220 | struct erase_priv_struct *priv = (void *)instr->priv; |
215 | 221 | ||
216 | if(instr->state != MTD_ERASE_DONE) { | 222 | if(instr->state != MTD_ERASE_DONE) { |
217 | printk(KERN_WARNING "Erase at 0x%08llx finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", | 223 | pr_warn("Erase at 0x%08llx finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", |
218 | (unsigned long long)instr->addr, instr->state); | 224 | (unsigned long long)instr->addr, instr->state); |
219 | jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr); | 225 | jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr); |
220 | } else { | 226 | } else { |
@@ -269,8 +275,8 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, | |||
269 | return; | 275 | return; |
270 | } | 276 | } |
271 | 277 | ||
272 | D1(printk(KERN_DEBUG "Removed nodes in range 0x%08x-0x%08x from ino #%u\n", | 278 | jffs2_dbg(1, "Removed nodes in range 0x%08x-0x%08x from ino #%u\n", |
273 | jeb->offset, jeb->offset + c->sector_size, ic->ino)); | 279 | jeb->offset, jeb->offset + c->sector_size, ic->ino); |
274 | 280 | ||
275 | D2({ | 281 | D2({ |
276 | int i=0; | 282 | int i=0; |
@@ -281,7 +287,7 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, | |||
281 | 287 | ||
282 | printk(KERN_DEBUG); | 288 | printk(KERN_DEBUG); |
283 | while(this) { | 289 | while(this) { |
284 | printk(KERN_CONT "0x%08x(%d)->", | 290 | pr_cont("0x%08x(%d)->", |
285 | ref_offset(this), ref_flags(this)); | 291 | ref_offset(this), ref_flags(this)); |
286 | if (++i == 5) { | 292 | if (++i == 5) { |
287 | printk(KERN_DEBUG); | 293 | printk(KERN_DEBUG); |
@@ -289,7 +295,7 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, | |||
289 | } | 295 | } |
290 | this = this->next_in_ino; | 296 | this = this->next_in_ino; |
291 | } | 297 | } |
292 | printk(KERN_CONT "\n"); | 298 | pr_cont("\n"); |
293 | }); | 299 | }); |
294 | 300 | ||
295 | switch (ic->class) { | 301 | switch (ic->class) { |
@@ -310,7 +316,8 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, | |||
310 | void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) | 316 | void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) |
311 | { | 317 | { |
312 | struct jffs2_raw_node_ref *block, *ref; | 318 | struct jffs2_raw_node_ref *block, *ref; |
313 | D1(printk(KERN_DEBUG "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset)); | 319 | jffs2_dbg(1, "Freeing all node refs for eraseblock offset 0x%08x\n", |
320 | jeb->offset); | ||
314 | 321 | ||
315 | block = ref = jeb->first_node; | 322 | block = ref = jeb->first_node; |
316 | 323 | ||
@@ -342,12 +349,13 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl | |||
342 | &ebuf, NULL); | 349 | &ebuf, NULL); |
343 | if (ret != -EOPNOTSUPP) { | 350 | if (ret != -EOPNOTSUPP) { |
344 | if (ret) { | 351 | if (ret) { |
345 | D1(printk(KERN_DEBUG "MTD point failed %d\n", ret)); | 352 | jffs2_dbg(1, "MTD point failed %d\n", ret); |
346 | goto do_flash_read; | 353 | goto do_flash_read; |
347 | } | 354 | } |
348 | if (retlen < c->sector_size) { | 355 | if (retlen < c->sector_size) { |
349 | /* Don't muck about if it won't let us point to the whole erase sector */ | 356 | /* Don't muck about if it won't let us point to the whole erase sector */ |
350 | D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen)); | 357 | jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n", |
358 | retlen); | ||
351 | mtd_unpoint(c->mtd, jeb->offset, retlen); | 359 | mtd_unpoint(c->mtd, jeb->offset, retlen); |
352 | goto do_flash_read; | 360 | goto do_flash_read; |
353 | } | 361 | } |
@@ -359,8 +367,10 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl | |||
359 | } while(--retlen); | 367 | } while(--retlen); |
360 | mtd_unpoint(c->mtd, jeb->offset, c->sector_size); | 368 | mtd_unpoint(c->mtd, jeb->offset, c->sector_size); |
361 | if (retlen) { | 369 | if (retlen) { |
362 | printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08tx\n", | 370 | pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08tx\n", |
363 | *wordebuf, jeb->offset + c->sector_size-retlen*sizeof(*wordebuf)); | 371 | *wordebuf, |
372 | jeb->offset + | ||
373 | c->sector_size-retlen * sizeof(*wordebuf)); | ||
364 | return -EIO; | 374 | return -EIO; |
365 | } | 375 | } |
366 | return 0; | 376 | return 0; |
@@ -368,11 +378,12 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl | |||
368 | do_flash_read: | 378 | do_flash_read: |
369 | ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 379 | ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); |
370 | if (!ebuf) { | 380 | if (!ebuf) { |
371 | printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset); | 381 | pr_warn("Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", |
382 | jeb->offset); | ||
372 | return -EAGAIN; | 383 | return -EAGAIN; |
373 | } | 384 | } |
374 | 385 | ||
375 | D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset)); | 386 | jffs2_dbg(1, "Verifying erase at 0x%08x\n", jeb->offset); |
376 | 387 | ||
377 | for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) { | 388 | for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) { |
378 | uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs); | 389 | uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs); |
@@ -382,12 +393,14 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl | |||
382 | 393 | ||
383 | ret = mtd_read(c->mtd, ofs, readlen, &retlen, ebuf); | 394 | ret = mtd_read(c->mtd, ofs, readlen, &retlen, ebuf); |
384 | if (ret) { | 395 | if (ret) { |
385 | printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret); | 396 | pr_warn("Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", |
397 | ofs, ret); | ||
386 | ret = -EIO; | 398 | ret = -EIO; |
387 | goto fail; | 399 | goto fail; |
388 | } | 400 | } |
389 | if (retlen != readlen) { | 401 | if (retlen != readlen) { |
390 | printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen); | 402 | pr_warn("Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", |
403 | ofs, readlen, retlen); | ||
391 | ret = -EIO; | 404 | ret = -EIO; |
392 | goto fail; | 405 | goto fail; |
393 | } | 406 | } |
@@ -396,7 +409,8 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl | |||
396 | unsigned long *datum = ebuf + i; | 409 | unsigned long *datum = ebuf + i; |
397 | if (*datum + 1) { | 410 | if (*datum + 1) { |
398 | *bad_offset += i; | 411 | *bad_offset += i; |
399 | printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", *datum, *bad_offset); | 412 | pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08x\n", |
413 | *datum, *bad_offset); | ||
400 | ret = -EIO; | 414 | ret = -EIO; |
401 | goto fail; | 415 | goto fail; |
402 | } | 416 | } |
@@ -422,7 +436,7 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb | |||
422 | } | 436 | } |
423 | 437 | ||
424 | /* Write the erase complete marker */ | 438 | /* Write the erase complete marker */ |
425 | D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset)); | 439 | jffs2_dbg(1, "Writing erased marker to block at 0x%08x\n", jeb->offset); |
426 | bad_offset = jeb->offset; | 440 | bad_offset = jeb->offset; |
427 | 441 | ||
428 | /* Cleanmarker in oob area or no cleanmarker at all ? */ | 442 | /* Cleanmarker in oob area or no cleanmarker at all ? */ |
@@ -451,10 +465,10 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb | |||
451 | 465 | ||
452 | if (ret || retlen != sizeof(marker)) { | 466 | if (ret || retlen != sizeof(marker)) { |
453 | if (ret) | 467 | if (ret) |
454 | printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n", | 468 | pr_warn("Write clean marker to block at 0x%08x failed: %d\n", |
455 | jeb->offset, ret); | 469 | jeb->offset, ret); |
456 | else | 470 | else |
457 | printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", | 471 | pr_warn("Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", |
458 | jeb->offset, sizeof(marker), retlen); | 472 | jeb->offset, sizeof(marker), retlen); |
459 | 473 | ||
460 | goto filebad; | 474 | goto filebad; |
diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 61e6723535b9..db3889ba8818 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c | |||
@@ -10,6 +10,8 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
14 | |||
13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
14 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
15 | #include <linux/time.h> | 17 | #include <linux/time.h> |
@@ -85,7 +87,8 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) | |||
85 | unsigned char *pg_buf; | 87 | unsigned char *pg_buf; |
86 | int ret; | 88 | int ret; |
87 | 89 | ||
88 | D2(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT)); | 90 | jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n", |
91 | __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT); | ||
89 | 92 | ||
90 | BUG_ON(!PageLocked(pg)); | 93 | BUG_ON(!PageLocked(pg)); |
91 | 94 | ||
@@ -105,7 +108,7 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) | |||
105 | flush_dcache_page(pg); | 108 | flush_dcache_page(pg); |
106 | kunmap(pg); | 109 | kunmap(pg); |
107 | 110 | ||
108 | D2(printk(KERN_DEBUG "readpage finished\n")); | 111 | jffs2_dbg(2, "readpage finished\n"); |
109 | return ret; | 112 | return ret; |
110 | } | 113 | } |
111 | 114 | ||
@@ -144,7 +147,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, | |||
144 | return -ENOMEM; | 147 | return -ENOMEM; |
145 | *pagep = pg; | 148 | *pagep = pg; |
146 | 149 | ||
147 | D1(printk(KERN_DEBUG "jffs2_write_begin()\n")); | 150 | jffs2_dbg(1, "%s()\n", __func__); |
148 | 151 | ||
149 | if (pageofs > inode->i_size) { | 152 | if (pageofs > inode->i_size) { |
150 | /* Make new hole frag from old EOF to new page */ | 153 | /* Make new hole frag from old EOF to new page */ |
@@ -153,8 +156,8 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, | |||
153 | struct jffs2_full_dnode *fn; | 156 | struct jffs2_full_dnode *fn; |
154 | uint32_t alloc_len; | 157 | uint32_t alloc_len; |
155 | 158 | ||
156 | D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", | 159 | jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", |
157 | (unsigned int)inode->i_size, pageofs)); | 160 | (unsigned int)inode->i_size, pageofs); |
158 | 161 | ||
159 | ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, | 162 | ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, |
160 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | 163 | ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); |
@@ -198,7 +201,8 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, | |||
198 | f->metadata = NULL; | 201 | f->metadata = NULL; |
199 | } | 202 | } |
200 | if (ret) { | 203 | if (ret) { |
201 | D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in write_begin, returned %d\n", ret)); | 204 | jffs2_dbg(1, "Eep. add_full_dnode_to_inode() failed in write_begin, returned %d\n", |
205 | ret); | ||
202 | jffs2_mark_node_obsolete(c, fn->raw); | 206 | jffs2_mark_node_obsolete(c, fn->raw); |
203 | jffs2_free_full_dnode(fn); | 207 | jffs2_free_full_dnode(fn); |
204 | jffs2_complete_reservation(c); | 208 | jffs2_complete_reservation(c); |
@@ -222,7 +226,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, | |||
222 | if (ret) | 226 | if (ret) |
223 | goto out_page; | 227 | goto out_page; |
224 | } | 228 | } |
225 | D1(printk(KERN_DEBUG "end write_begin(). pg->flags %lx\n", pg->flags)); | 229 | jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags); |
226 | return ret; | 230 | return ret; |
227 | 231 | ||
228 | out_page: | 232 | out_page: |
@@ -248,8 +252,9 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping, | |||
248 | int ret = 0; | 252 | int ret = 0; |
249 | uint32_t writtenlen = 0; | 253 | uint32_t writtenlen = 0; |
250 | 254 | ||
251 | D1(printk(KERN_DEBUG "jffs2_write_end(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", | 255 | jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", |
252 | inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags)); | 256 | __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT, |
257 | start, end, pg->flags); | ||
253 | 258 | ||
254 | /* We need to avoid deadlock with page_cache_read() in | 259 | /* We need to avoid deadlock with page_cache_read() in |
255 | jffs2_garbage_collect_pass(). So the page must be | 260 | jffs2_garbage_collect_pass(). So the page must be |
@@ -268,7 +273,8 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping, | |||
268 | ri = jffs2_alloc_raw_inode(); | 273 | ri = jffs2_alloc_raw_inode(); |
269 | 274 | ||
270 | if (!ri) { | 275 | if (!ri) { |
271 | D1(printk(KERN_DEBUG "jffs2_write_end(): Allocation of raw inode failed\n")); | 276 | jffs2_dbg(1, "%s(): Allocation of raw inode failed\n", |
277 | __func__); | ||
272 | unlock_page(pg); | 278 | unlock_page(pg); |
273 | page_cache_release(pg); | 279 | page_cache_release(pg); |
274 | return -ENOMEM; | 280 | return -ENOMEM; |
@@ -315,13 +321,14 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping, | |||
315 | /* generic_file_write has written more to the page cache than we've | 321 | /* generic_file_write has written more to the page cache than we've |
316 | actually written to the medium. Mark the page !Uptodate so that | 322 | actually written to the medium. Mark the page !Uptodate so that |
317 | it gets reread */ | 323 | it gets reread */ |
318 | D1(printk(KERN_DEBUG "jffs2_write_end(): Not all bytes written. Marking page !uptodate\n")); | 324 | jffs2_dbg(1, "%s(): Not all bytes written. Marking page !uptodate\n", |
325 | __func__); | ||
319 | SetPageError(pg); | 326 | SetPageError(pg); |
320 | ClearPageUptodate(pg); | 327 | ClearPageUptodate(pg); |
321 | } | 328 | } |
322 | 329 | ||
323 | D1(printk(KERN_DEBUG "jffs2_write_end() returning %d\n", | 330 | jffs2_dbg(1, "%s() returning %d\n", |
324 | writtenlen > 0 ? writtenlen : ret)); | 331 | __func__, writtenlen > 0 ? writtenlen : ret); |
325 | unlock_page(pg); | 332 | unlock_page(pg); |
326 | page_cache_release(pg); | 333 | page_cache_release(pg); |
327 | return writtenlen > 0 ? writtenlen : ret; | 334 | return writtenlen > 0 ? writtenlen : ret; |
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index c0d5c9d770da..bb6f993ebca9 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c | |||
@@ -10,6 +10,8 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
14 | |||
13 | #include <linux/capability.h> | 15 | #include <linux/capability.h> |
14 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
15 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
@@ -39,7 +41,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
39 | int ret; | 41 | int ret; |
40 | int alloc_type = ALLOC_NORMAL; | 42 | int alloc_type = ALLOC_NORMAL; |
41 | 43 | ||
42 | D1(printk(KERN_DEBUG "jffs2_setattr(): ino #%lu\n", inode->i_ino)); | 44 | jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino); |
43 | 45 | ||
44 | /* Special cases - we don't want more than one data node | 46 | /* Special cases - we don't want more than one data node |
45 | for these types on the medium at any time. So setattr | 47 | for these types on the medium at any time. So setattr |
@@ -50,7 +52,8 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
50 | /* For these, we don't actually need to read the old node */ | 52 | /* For these, we don't actually need to read the old node */ |
51 | mdatalen = jffs2_encode_dev(&dev, inode->i_rdev); | 53 | mdatalen = jffs2_encode_dev(&dev, inode->i_rdev); |
52 | mdata = (char *)&dev; | 54 | mdata = (char *)&dev; |
53 | D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of kdev_t\n", mdatalen)); | 55 | jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n", |
56 | __func__, mdatalen); | ||
54 | } else if (S_ISLNK(inode->i_mode)) { | 57 | } else if (S_ISLNK(inode->i_mode)) { |
55 | mutex_lock(&f->sem); | 58 | mutex_lock(&f->sem); |
56 | mdatalen = f->metadata->size; | 59 | mdatalen = f->metadata->size; |
@@ -66,7 +69,8 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr) | |||
66 | return ret; | 69 | return ret; |
67 | } | 70 | } |
68 | mutex_unlock(&f->sem); | 71 | mutex_unlock(&f->sem); |
69 | D1(printk(KERN_DEBUG "jffs2_setattr(): Writing %d bytes of symlink target\n", mdatalen)); | 72 | jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n", |
73 | __func__, mdatalen); | ||
70 | } | 74 | } |
71 | 75 | ||
72 | ri = jffs2_alloc_raw_inode(); | 76 | ri = jffs2_alloc_raw_inode(); |
@@ -233,7 +237,8 @@ void jffs2_evict_inode (struct inode *inode) | |||
233 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); | 237 | struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); |
234 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); | 238 | struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); |
235 | 239 | ||
236 | D1(printk(KERN_DEBUG "jffs2_evict_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode)); | 240 | jffs2_dbg(1, "%s(): ino #%lu mode %o\n", |
241 | __func__, inode->i_ino, inode->i_mode); | ||
237 | truncate_inode_pages(&inode->i_data, 0); | 242 | truncate_inode_pages(&inode->i_data, 0); |
238 | end_writeback(inode); | 243 | end_writeback(inode); |
239 | jffs2_do_clear_inode(c, f); | 244 | jffs2_do_clear_inode(c, f); |
@@ -249,7 +254,7 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino) | |||
249 | dev_t rdev = 0; | 254 | dev_t rdev = 0; |
250 | int ret; | 255 | int ret; |
251 | 256 | ||
252 | D1(printk(KERN_DEBUG "jffs2_iget(): ino == %lu\n", ino)); | 257 | jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino); |
253 | 258 | ||
254 | inode = iget_locked(sb, ino); | 259 | inode = iget_locked(sb, ino); |
255 | if (!inode) | 260 | if (!inode) |
@@ -317,14 +322,16 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino) | |||
317 | /* Read the device numbers from the media */ | 322 | /* Read the device numbers from the media */ |
318 | if (f->metadata->size != sizeof(jdev.old_id) && | 323 | if (f->metadata->size != sizeof(jdev.old_id) && |
319 | f->metadata->size != sizeof(jdev.new_id)) { | 324 | f->metadata->size != sizeof(jdev.new_id)) { |
320 | printk(KERN_NOTICE "Device node has strange size %d\n", f->metadata->size); | 325 | pr_notice("Device node has strange size %d\n", |
326 | f->metadata->size); | ||
321 | goto error_io; | 327 | goto error_io; |
322 | } | 328 | } |
323 | D1(printk(KERN_DEBUG "Reading device numbers from flash\n")); | 329 | jffs2_dbg(1, "Reading device numbers from flash\n"); |
324 | ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size); | 330 | ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size); |
325 | if (ret < 0) { | 331 | if (ret < 0) { |
326 | /* Eep */ | 332 | /* Eep */ |
327 | printk(KERN_NOTICE "Read device numbers for inode %lu failed\n", (unsigned long)inode->i_ino); | 333 | pr_notice("Read device numbers for inode %lu failed\n", |
334 | (unsigned long)inode->i_ino); | ||
328 | goto error; | 335 | goto error; |
329 | } | 336 | } |
330 | if (f->metadata->size == sizeof(jdev.old_id)) | 337 | if (f->metadata->size == sizeof(jdev.old_id)) |
@@ -339,12 +346,13 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino) | |||
339 | break; | 346 | break; |
340 | 347 | ||
341 | default: | 348 | default: |
342 | printk(KERN_WARNING "jffs2_read_inode(): Bogus imode %o for ino %lu\n", inode->i_mode, (unsigned long)inode->i_ino); | 349 | pr_warn("%s(): Bogus i_mode %o for ino %lu\n", |
350 | __func__, inode->i_mode, (unsigned long)inode->i_ino); | ||
343 | } | 351 | } |
344 | 352 | ||
345 | mutex_unlock(&f->sem); | 353 | mutex_unlock(&f->sem); |
346 | 354 | ||
347 | D1(printk(KERN_DEBUG "jffs2_read_inode() returning\n")); | 355 | jffs2_dbg(1, "jffs2_read_inode() returning\n"); |
348 | unlock_new_inode(inode); | 356 | unlock_new_inode(inode); |
349 | return inode; | 357 | return inode; |
350 | 358 | ||
@@ -362,11 +370,13 @@ void jffs2_dirty_inode(struct inode *inode, int flags) | |||
362 | struct iattr iattr; | 370 | struct iattr iattr; |
363 | 371 | ||
364 | if (!(inode->i_state & I_DIRTY_DATASYNC)) { | 372 | if (!(inode->i_state & I_DIRTY_DATASYNC)) { |
365 | D2(printk(KERN_DEBUG "jffs2_dirty_inode() not calling setattr() for ino #%lu\n", inode->i_ino)); | 373 | jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n", |
374 | __func__, inode->i_ino); | ||
366 | return; | 375 | return; |
367 | } | 376 | } |
368 | 377 | ||
369 | D1(printk(KERN_DEBUG "jffs2_dirty_inode() calling setattr() for ino #%lu\n", inode->i_ino)); | 378 | jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n", |
379 | __func__, inode->i_ino); | ||
370 | 380 | ||
371 | iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME; | 381 | iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME; |
372 | iattr.ia_mode = inode->i_mode; | 382 | iattr.ia_mode = inode->i_mode; |
@@ -414,7 +424,8 @@ struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_r | |||
414 | struct jffs2_inode_info *f; | 424 | struct jffs2_inode_info *f; |
415 | int ret; | 425 | int ret; |
416 | 426 | ||
417 | D1(printk(KERN_DEBUG "jffs2_new_inode(): dir_i %ld, mode 0x%x\n", dir_i->i_ino, mode)); | 427 | jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n", |
428 | __func__, dir_i->i_ino, mode); | ||
418 | 429 | ||
419 | c = JFFS2_SB_INFO(sb); | 430 | c = JFFS2_SB_INFO(sb); |
420 | 431 | ||
@@ -504,11 +515,11 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) | |||
504 | 515 | ||
505 | #ifndef CONFIG_JFFS2_FS_WRITEBUFFER | 516 | #ifndef CONFIG_JFFS2_FS_WRITEBUFFER |
506 | if (c->mtd->type == MTD_NANDFLASH) { | 517 | if (c->mtd->type == MTD_NANDFLASH) { |
507 | printk(KERN_ERR "jffs2: Cannot operate on NAND flash unless jffs2 NAND support is compiled in.\n"); | 518 | pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n"); |
508 | return -EINVAL; | 519 | return -EINVAL; |
509 | } | 520 | } |
510 | if (c->mtd->type == MTD_DATAFLASH) { | 521 | if (c->mtd->type == MTD_DATAFLASH) { |
511 | printk(KERN_ERR "jffs2: Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in.\n"); | 522 | pr_err("Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in\n"); |
512 | return -EINVAL; | 523 | return -EINVAL; |
513 | } | 524 | } |
514 | #endif | 525 | #endif |
@@ -522,12 +533,13 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) | |||
522 | */ | 533 | */ |
523 | if ((c->sector_size * blocks) != c->flash_size) { | 534 | if ((c->sector_size * blocks) != c->flash_size) { |
524 | c->flash_size = c->sector_size * blocks; | 535 | c->flash_size = c->sector_size * blocks; |
525 | printk(KERN_INFO "jffs2: Flash size not aligned to erasesize, reducing to %dKiB\n", | 536 | pr_info("Flash size not aligned to erasesize, reducing to %dKiB\n", |
526 | c->flash_size / 1024); | 537 | c->flash_size / 1024); |
527 | } | 538 | } |
528 | 539 | ||
529 | if (c->flash_size < 5*c->sector_size) { | 540 | if (c->flash_size < 5*c->sector_size) { |
530 | printk(KERN_ERR "jffs2: Too few erase blocks (%d)\n", c->flash_size / c->sector_size); | 541 | pr_err("Too few erase blocks (%d)\n", |
542 | c->flash_size / c->sector_size); | ||
531 | return -EINVAL; | 543 | return -EINVAL; |
532 | } | 544 | } |
533 | 545 | ||
@@ -550,17 +562,17 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) | |||
550 | if ((ret = jffs2_do_mount_fs(c))) | 562 | if ((ret = jffs2_do_mount_fs(c))) |
551 | goto out_inohash; | 563 | goto out_inohash; |
552 | 564 | ||
553 | D1(printk(KERN_DEBUG "jffs2_do_fill_super(): Getting root inode\n")); | 565 | jffs2_dbg(1, "%s(): Getting root inode\n", __func__); |
554 | root_i = jffs2_iget(sb, 1); | 566 | root_i = jffs2_iget(sb, 1); |
555 | if (IS_ERR(root_i)) { | 567 | if (IS_ERR(root_i)) { |
556 | D1(printk(KERN_WARNING "get root inode failed\n")); | 568 | jffs2_dbg(1, "get root inode failed\n"); |
557 | ret = PTR_ERR(root_i); | 569 | ret = PTR_ERR(root_i); |
558 | goto out_root; | 570 | goto out_root; |
559 | } | 571 | } |
560 | 572 | ||
561 | ret = -ENOMEM; | 573 | ret = -ENOMEM; |
562 | 574 | ||
563 | D1(printk(KERN_DEBUG "jffs2_do_fill_super(): d_alloc_root()\n")); | 575 | jffs2_dbg(1, "%s(): d_make_root()\n", __func__); |
564 | sb->s_root = d_make_root(root_i); | 576 | sb->s_root = d_make_root(root_i); |
565 | if (!sb->s_root) | 577 | if (!sb->s_root) |
566 | goto out_root; | 578 | goto out_root; |
@@ -618,20 +630,21 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, | |||
618 | */ | 630 | */ |
619 | inode = ilookup(OFNI_BS_2SFFJ(c), inum); | 631 | inode = ilookup(OFNI_BS_2SFFJ(c), inum); |
620 | if (!inode) { | 632 | if (!inode) { |
621 | D1(printk(KERN_DEBUG "ilookup() failed for ino #%u; inode is probably deleted.\n", | 633 | jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n", |
622 | inum)); | 634 | inum); |
623 | 635 | ||
624 | spin_lock(&c->inocache_lock); | 636 | spin_lock(&c->inocache_lock); |
625 | ic = jffs2_get_ino_cache(c, inum); | 637 | ic = jffs2_get_ino_cache(c, inum); |
626 | if (!ic) { | 638 | if (!ic) { |
627 | D1(printk(KERN_DEBUG "Inode cache for ino #%u is gone.\n", inum)); | 639 | jffs2_dbg(1, "Inode cache for ino #%u is gone\n", |
640 | inum); | ||
628 | spin_unlock(&c->inocache_lock); | 641 | spin_unlock(&c->inocache_lock); |
629 | return NULL; | 642 | return NULL; |
630 | } | 643 | } |
631 | if (ic->state != INO_STATE_CHECKEDABSENT) { | 644 | if (ic->state != INO_STATE_CHECKEDABSENT) { |
632 | /* Wait for progress. Don't just loop */ | 645 | /* Wait for progress. Don't just loop */ |
633 | D1(printk(KERN_DEBUG "Waiting for ino #%u in state %d\n", | 646 | jffs2_dbg(1, "Waiting for ino #%u in state %d\n", |
634 | ic->ino, ic->state)); | 647 | ic->ino, ic->state); |
635 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); | 648 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); |
636 | } else { | 649 | } else { |
637 | spin_unlock(&c->inocache_lock); | 650 | spin_unlock(&c->inocache_lock); |
@@ -649,8 +662,8 @@ struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, | |||
649 | return ERR_CAST(inode); | 662 | return ERR_CAST(inode); |
650 | } | 663 | } |
651 | if (is_bad_inode(inode)) { | 664 | if (is_bad_inode(inode)) { |
652 | printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. unlinked %d\n", | 665 | pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n", |
653 | inum, unlinked); | 666 | inum, unlinked); |
654 | /* NB. This will happen again. We need to do something appropriate here. */ | 667 | /* NB. This will happen again. We need to do something appropriate here. */ |
655 | iput(inode); | 668 | iput(inode); |
656 | return ERR_PTR(-EIO); | 669 | return ERR_PTR(-EIO); |
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c index 31dce611337c..ad271c70aa25 100644 --- a/fs/jffs2/gc.c +++ b/fs/jffs2/gc.c | |||
@@ -10,6 +10,8 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
14 | |||
13 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
14 | #include <linux/mtd/mtd.h> | 16 | #include <linux/mtd/mtd.h> |
15 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
@@ -51,44 +53,44 @@ static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c) | |||
51 | number of free blocks is low. */ | 53 | number of free blocks is low. */ |
52 | again: | 54 | again: |
53 | if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) { | 55 | if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) { |
54 | D1(printk(KERN_DEBUG "Picking block from bad_used_list to GC next\n")); | 56 | jffs2_dbg(1, "Picking block from bad_used_list to GC next\n"); |
55 | nextlist = &c->bad_used_list; | 57 | nextlist = &c->bad_used_list; |
56 | } else if (n < 50 && !list_empty(&c->erasable_list)) { | 58 | } else if (n < 50 && !list_empty(&c->erasable_list)) { |
57 | /* Note that most of them will have gone directly to be erased. | 59 | /* Note that most of them will have gone directly to be erased. |
58 | So don't favour the erasable_list _too_ much. */ | 60 | So don't favour the erasable_list _too_ much. */ |
59 | D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n")); | 61 | jffs2_dbg(1, "Picking block from erasable_list to GC next\n"); |
60 | nextlist = &c->erasable_list; | 62 | nextlist = &c->erasable_list; |
61 | } else if (n < 110 && !list_empty(&c->very_dirty_list)) { | 63 | } else if (n < 110 && !list_empty(&c->very_dirty_list)) { |
62 | /* Most of the time, pick one off the very_dirty list */ | 64 | /* Most of the time, pick one off the very_dirty list */ |
63 | D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next\n")); | 65 | jffs2_dbg(1, "Picking block from very_dirty_list to GC next\n"); |
64 | nextlist = &c->very_dirty_list; | 66 | nextlist = &c->very_dirty_list; |
65 | } else if (n < 126 && !list_empty(&c->dirty_list)) { | 67 | } else if (n < 126 && !list_empty(&c->dirty_list)) { |
66 | D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next\n")); | 68 | jffs2_dbg(1, "Picking block from dirty_list to GC next\n"); |
67 | nextlist = &c->dirty_list; | 69 | nextlist = &c->dirty_list; |
68 | } else if (!list_empty(&c->clean_list)) { | 70 | } else if (!list_empty(&c->clean_list)) { |
69 | D1(printk(KERN_DEBUG "Picking block from clean_list to GC next\n")); | 71 | jffs2_dbg(1, "Picking block from clean_list to GC next\n"); |
70 | nextlist = &c->clean_list; | 72 | nextlist = &c->clean_list; |
71 | } else if (!list_empty(&c->dirty_list)) { | 73 | } else if (!list_empty(&c->dirty_list)) { |
72 | D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next (clean_list was empty)\n")); | 74 | jffs2_dbg(1, "Picking block from dirty_list to GC next (clean_list was empty)\n"); |
73 | 75 | ||
74 | nextlist = &c->dirty_list; | 76 | nextlist = &c->dirty_list; |
75 | } else if (!list_empty(&c->very_dirty_list)) { | 77 | } else if (!list_empty(&c->very_dirty_list)) { |
76 | D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n")); | 78 | jffs2_dbg(1, "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n"); |
77 | nextlist = &c->very_dirty_list; | 79 | nextlist = &c->very_dirty_list; |
78 | } else if (!list_empty(&c->erasable_list)) { | 80 | } else if (!list_empty(&c->erasable_list)) { |
79 | D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n")); | 81 | jffs2_dbg(1, "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n"); |
80 | 82 | ||
81 | nextlist = &c->erasable_list; | 83 | nextlist = &c->erasable_list; |
82 | } else if (!list_empty(&c->erasable_pending_wbuf_list)) { | 84 | } else if (!list_empty(&c->erasable_pending_wbuf_list)) { |
83 | /* There are blocks are wating for the wbuf sync */ | 85 | /* There are blocks are wating for the wbuf sync */ |
84 | D1(printk(KERN_DEBUG "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n")); | 86 | jffs2_dbg(1, "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n"); |
85 | spin_unlock(&c->erase_completion_lock); | 87 | spin_unlock(&c->erase_completion_lock); |
86 | jffs2_flush_wbuf_pad(c); | 88 | jffs2_flush_wbuf_pad(c); |
87 | spin_lock(&c->erase_completion_lock); | 89 | spin_lock(&c->erase_completion_lock); |
88 | goto again; | 90 | goto again; |
89 | } else { | 91 | } else { |
90 | /* Eep. All were empty */ | 92 | /* Eep. All were empty */ |
91 | D1(printk(KERN_NOTICE "jffs2: No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n")); | 93 | jffs2_dbg(1, "No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n"); |
92 | return NULL; | 94 | return NULL; |
93 | } | 95 | } |
94 | 96 | ||
@@ -97,13 +99,15 @@ again: | |||
97 | c->gcblock = ret; | 99 | c->gcblock = ret; |
98 | ret->gc_node = ret->first_node; | 100 | ret->gc_node = ret->first_node; |
99 | if (!ret->gc_node) { | 101 | if (!ret->gc_node) { |
100 | printk(KERN_WARNING "Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset); | 102 | pr_warn("Eep. ret->gc_node for block at 0x%08x is NULL\n", |
103 | ret->offset); | ||
101 | BUG(); | 104 | BUG(); |
102 | } | 105 | } |
103 | 106 | ||
104 | /* Have we accidentally picked a clean block with wasted space ? */ | 107 | /* Have we accidentally picked a clean block with wasted space ? */ |
105 | if (ret->wasted_size) { | 108 | if (ret->wasted_size) { |
106 | D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size)); | 109 | jffs2_dbg(1, "Converting wasted_size %08x to dirty_size\n", |
110 | ret->wasted_size); | ||
107 | ret->dirty_size += ret->wasted_size; | 111 | ret->dirty_size += ret->wasted_size; |
108 | c->wasted_size -= ret->wasted_size; | 112 | c->wasted_size -= ret->wasted_size; |
109 | c->dirty_size += ret->wasted_size; | 113 | c->dirty_size += ret->wasted_size; |
@@ -140,8 +144,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
140 | 144 | ||
141 | /* checked_ino is protected by the alloc_sem */ | 145 | /* checked_ino is protected by the alloc_sem */ |
142 | if (c->checked_ino > c->highest_ino && xattr) { | 146 | if (c->checked_ino > c->highest_ino && xattr) { |
143 | printk(KERN_CRIT "Checked all inodes but still 0x%x bytes of unchecked space?\n", | 147 | pr_crit("Checked all inodes but still 0x%x bytes of unchecked space?\n", |
144 | c->unchecked_size); | 148 | c->unchecked_size); |
145 | jffs2_dbg_dump_block_lists_nolock(c); | 149 | jffs2_dbg_dump_block_lists_nolock(c); |
146 | spin_unlock(&c->erase_completion_lock); | 150 | spin_unlock(&c->erase_completion_lock); |
147 | mutex_unlock(&c->alloc_sem); | 151 | mutex_unlock(&c->alloc_sem); |
@@ -163,8 +167,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
163 | } | 167 | } |
164 | 168 | ||
165 | if (!ic->pino_nlink) { | 169 | if (!ic->pino_nlink) { |
166 | D1(printk(KERN_DEBUG "Skipping check of ino #%d with nlink/pino zero\n", | 170 | jffs2_dbg(1, "Skipping check of ino #%d with nlink/pino zero\n", |
167 | ic->ino)); | 171 | ic->ino); |
168 | spin_unlock(&c->inocache_lock); | 172 | spin_unlock(&c->inocache_lock); |
169 | jffs2_xattr_delete_inode(c, ic); | 173 | jffs2_xattr_delete_inode(c, ic); |
170 | continue; | 174 | continue; |
@@ -172,13 +176,15 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
172 | switch(ic->state) { | 176 | switch(ic->state) { |
173 | case INO_STATE_CHECKEDABSENT: | 177 | case INO_STATE_CHECKEDABSENT: |
174 | case INO_STATE_PRESENT: | 178 | case INO_STATE_PRESENT: |
175 | D1(printk(KERN_DEBUG "Skipping ino #%u already checked\n", ic->ino)); | 179 | jffs2_dbg(1, "Skipping ino #%u already checked\n", |
180 | ic->ino); | ||
176 | spin_unlock(&c->inocache_lock); | 181 | spin_unlock(&c->inocache_lock); |
177 | continue; | 182 | continue; |
178 | 183 | ||
179 | case INO_STATE_GC: | 184 | case INO_STATE_GC: |
180 | case INO_STATE_CHECKING: | 185 | case INO_STATE_CHECKING: |
181 | printk(KERN_WARNING "Inode #%u is in state %d during CRC check phase!\n", ic->ino, ic->state); | 186 | pr_warn("Inode #%u is in state %d during CRC check phase!\n", |
187 | ic->ino, ic->state); | ||
182 | spin_unlock(&c->inocache_lock); | 188 | spin_unlock(&c->inocache_lock); |
183 | BUG(); | 189 | BUG(); |
184 | 190 | ||
@@ -186,7 +192,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
186 | /* We need to wait for it to finish, lest we move on | 192 | /* We need to wait for it to finish, lest we move on |
187 | and trigger the BUG() above while we haven't yet | 193 | and trigger the BUG() above while we haven't yet |
188 | finished checking all its nodes */ | 194 | finished checking all its nodes */ |
189 | D1(printk(KERN_DEBUG "Waiting for ino #%u to finish reading\n", ic->ino)); | 195 | jffs2_dbg(1, "Waiting for ino #%u to finish reading\n", |
196 | ic->ino); | ||
190 | /* We need to come back again for the _same_ inode. We've | 197 | /* We need to come back again for the _same_ inode. We've |
191 | made no progress in this case, but that should be OK */ | 198 | made no progress in this case, but that should be OK */ |
192 | c->checked_ino--; | 199 | c->checked_ino--; |
@@ -204,11 +211,13 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
204 | ic->state = INO_STATE_CHECKING; | 211 | ic->state = INO_STATE_CHECKING; |
205 | spin_unlock(&c->inocache_lock); | 212 | spin_unlock(&c->inocache_lock); |
206 | 213 | ||
207 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() triggering inode scan of ino#%u\n", ic->ino)); | 214 | jffs2_dbg(1, "%s(): triggering inode scan of ino#%u\n", |
215 | __func__, ic->ino); | ||
208 | 216 | ||
209 | ret = jffs2_do_crccheck_inode(c, ic); | 217 | ret = jffs2_do_crccheck_inode(c, ic); |
210 | if (ret) | 218 | if (ret) |
211 | printk(KERN_WARNING "Returned error for crccheck of ino #%u. Expect badness...\n", ic->ino); | 219 | pr_warn("Returned error for crccheck of ino #%u. Expect badness...\n", |
220 | ic->ino); | ||
212 | 221 | ||
213 | jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT); | 222 | jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT); |
214 | mutex_unlock(&c->alloc_sem); | 223 | mutex_unlock(&c->alloc_sem); |
@@ -220,11 +229,11 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
220 | !list_empty(&c->erase_pending_list)) { | 229 | !list_empty(&c->erase_pending_list)) { |
221 | spin_unlock(&c->erase_completion_lock); | 230 | spin_unlock(&c->erase_completion_lock); |
222 | mutex_unlock(&c->alloc_sem); | 231 | mutex_unlock(&c->alloc_sem); |
223 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() erasing pending blocks\n")); | 232 | jffs2_dbg(1, "%s(): erasing pending blocks\n", __func__); |
224 | if (jffs2_erase_pending_blocks(c, 1)) | 233 | if (jffs2_erase_pending_blocks(c, 1)) |
225 | return 0; | 234 | return 0; |
226 | 235 | ||
227 | D1(printk(KERN_DEBUG "No progress from erasing blocks; doing GC anyway\n")); | 236 | jffs2_dbg(1, "No progress from erasing block; doing GC anyway\n"); |
228 | spin_lock(&c->erase_completion_lock); | 237 | spin_lock(&c->erase_completion_lock); |
229 | mutex_lock(&c->alloc_sem); | 238 | mutex_lock(&c->alloc_sem); |
230 | } | 239 | } |
@@ -242,13 +251,14 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
242 | mutex_unlock(&c->alloc_sem); | 251 | mutex_unlock(&c->alloc_sem); |
243 | return -EAGAIN; | 252 | return -EAGAIN; |
244 | } | 253 | } |
245 | D1(printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n")); | 254 | jffs2_dbg(1, "Couldn't find erase block to garbage collect!\n"); |
246 | spin_unlock(&c->erase_completion_lock); | 255 | spin_unlock(&c->erase_completion_lock); |
247 | mutex_unlock(&c->alloc_sem); | 256 | mutex_unlock(&c->alloc_sem); |
248 | return -EIO; | 257 | return -EIO; |
249 | } | 258 | } |
250 | 259 | ||
251 | D1(printk(KERN_DEBUG "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size)); | 260 | jffs2_dbg(1, "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n", |
261 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size); | ||
252 | D1(if (c->nextblock) | 262 | D1(if (c->nextblock) |
253 | printk(KERN_DEBUG "Nextblock at %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size)); | 263 | printk(KERN_DEBUG "Nextblock at %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size)); |
254 | 264 | ||
@@ -261,12 +271,14 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
261 | gcblock_dirty = jeb->dirty_size; | 271 | gcblock_dirty = jeb->dirty_size; |
262 | 272 | ||
263 | while(ref_obsolete(raw)) { | 273 | while(ref_obsolete(raw)) { |
264 | D1(printk(KERN_DEBUG "Node at 0x%08x is obsolete... skipping\n", ref_offset(raw))); | 274 | jffs2_dbg(1, "Node at 0x%08x is obsolete... skipping\n", |
275 | ref_offset(raw)); | ||
265 | raw = ref_next(raw); | 276 | raw = ref_next(raw); |
266 | if (unlikely(!raw)) { | 277 | if (unlikely(!raw)) { |
267 | printk(KERN_WARNING "eep. End of raw list while still supposedly nodes to GC\n"); | 278 | pr_warn("eep. End of raw list while still supposedly nodes to GC\n"); |
268 | printk(KERN_WARNING "erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n", | 279 | pr_warn("erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n", |
269 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size); | 280 | jeb->offset, jeb->free_size, |
281 | jeb->dirty_size, jeb->used_size); | ||
270 | jeb->gc_node = raw; | 282 | jeb->gc_node = raw; |
271 | spin_unlock(&c->erase_completion_lock); | 283 | spin_unlock(&c->erase_completion_lock); |
272 | mutex_unlock(&c->alloc_sem); | 284 | mutex_unlock(&c->alloc_sem); |
@@ -275,7 +287,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
275 | } | 287 | } |
276 | jeb->gc_node = raw; | 288 | jeb->gc_node = raw; |
277 | 289 | ||
278 | D1(printk(KERN_DEBUG "Going to garbage collect node at 0x%08x\n", ref_offset(raw))); | 290 | jffs2_dbg(1, "Going to garbage collect node at 0x%08x\n", |
291 | ref_offset(raw)); | ||
279 | 292 | ||
280 | if (!raw->next_in_ino) { | 293 | if (!raw->next_in_ino) { |
281 | /* Inode-less node. Clean marker, snapshot or something like that */ | 294 | /* Inode-less node. Clean marker, snapshot or something like that */ |
@@ -316,7 +329,9 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
316 | 329 | ||
317 | spin_unlock(&c->erase_completion_lock); | 330 | spin_unlock(&c->erase_completion_lock); |
318 | 331 | ||
319 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n", jeb->offset, ref_offset(raw), ref_flags(raw), ic->ino)); | 332 | jffs2_dbg(1, "%s(): collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n", |
333 | __func__, jeb->offset, ref_offset(raw), ref_flags(raw), | ||
334 | ic->ino); | ||
320 | 335 | ||
321 | /* Three possibilities: | 336 | /* Three possibilities: |
322 | 1. Inode is already in-core. We must iget it and do proper | 337 | 1. Inode is already in-core. We must iget it and do proper |
@@ -336,8 +351,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
336 | if (ref_flags(raw) == REF_PRISTINE) | 351 | if (ref_flags(raw) == REF_PRISTINE) |
337 | ic->state = INO_STATE_GC; | 352 | ic->state = INO_STATE_GC; |
338 | else { | 353 | else { |
339 | D1(printk(KERN_DEBUG "Ino #%u is absent but node not REF_PRISTINE. Reading.\n", | 354 | jffs2_dbg(1, "Ino #%u is absent but node not REF_PRISTINE. Reading.\n", |
340 | ic->ino)); | 355 | ic->ino); |
341 | } | 356 | } |
342 | break; | 357 | break; |
343 | 358 | ||
@@ -353,8 +368,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
353 | we're holding the alloc_sem, no other garbage collection | 368 | we're holding the alloc_sem, no other garbage collection |
354 | can happen. | 369 | can happen. |
355 | */ | 370 | */ |
356 | printk(KERN_CRIT "Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n", | 371 | pr_crit("Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n", |
357 | ic->ino, ic->state); | 372 | ic->ino, ic->state); |
358 | mutex_unlock(&c->alloc_sem); | 373 | mutex_unlock(&c->alloc_sem); |
359 | spin_unlock(&c->inocache_lock); | 374 | spin_unlock(&c->inocache_lock); |
360 | BUG(); | 375 | BUG(); |
@@ -367,8 +382,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
367 | drop the alloc_sem before sleeping. */ | 382 | drop the alloc_sem before sleeping. */ |
368 | 383 | ||
369 | mutex_unlock(&c->alloc_sem); | 384 | mutex_unlock(&c->alloc_sem); |
370 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_pass() waiting for ino #%u in state %d\n", | 385 | jffs2_dbg(1, "%s(): waiting for ino #%u in state %d\n", |
371 | ic->ino, ic->state)); | 386 | __func__, ic->ino, ic->state); |
372 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); | 387 | sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); |
373 | /* And because we dropped the alloc_sem we must start again from the | 388 | /* And because we dropped the alloc_sem we must start again from the |
374 | beginning. Ponder chance of livelock here -- we're returning success | 389 | beginning. Ponder chance of livelock here -- we're returning success |
@@ -433,7 +448,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
433 | test_gcnode: | 448 | test_gcnode: |
434 | if (jeb->dirty_size == gcblock_dirty && !ref_obsolete(jeb->gc_node)) { | 449 | if (jeb->dirty_size == gcblock_dirty && !ref_obsolete(jeb->gc_node)) { |
435 | /* Eep. This really should never happen. GC is broken */ | 450 | /* Eep. This really should never happen. GC is broken */ |
436 | printk(KERN_ERR "Error garbage collecting node at %08x!\n", ref_offset(jeb->gc_node)); | 451 | pr_err("Error garbage collecting node at %08x!\n", |
452 | ref_offset(jeb->gc_node)); | ||
437 | ret = -ENOSPC; | 453 | ret = -ENOSPC; |
438 | } | 454 | } |
439 | release_sem: | 455 | release_sem: |
@@ -445,7 +461,8 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c) | |||
445 | 461 | ||
446 | eraseit: | 462 | eraseit: |
447 | if (c->gcblock && !c->gcblock->used_size) { | 463 | if (c->gcblock && !c->gcblock->used_size) { |
448 | D1(printk(KERN_DEBUG "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n", c->gcblock->offset)); | 464 | jffs2_dbg(1, "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n", |
465 | c->gcblock->offset); | ||
449 | /* We're GC'ing an empty block? */ | 466 | /* We're GC'ing an empty block? */ |
450 | list_add_tail(&c->gcblock->list, &c->erase_pending_list); | 467 | list_add_tail(&c->gcblock->list, &c->erase_pending_list); |
451 | c->gcblock = NULL; | 468 | c->gcblock = NULL; |
@@ -475,12 +492,12 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era | |||
475 | 492 | ||
476 | if (c->gcblock != jeb) { | 493 | if (c->gcblock != jeb) { |
477 | spin_unlock(&c->erase_completion_lock); | 494 | spin_unlock(&c->erase_completion_lock); |
478 | D1(printk(KERN_DEBUG "GC block is no longer gcblock. Restart\n")); | 495 | jffs2_dbg(1, "GC block is no longer gcblock. Restart\n"); |
479 | goto upnout; | 496 | goto upnout; |
480 | } | 497 | } |
481 | if (ref_obsolete(raw)) { | 498 | if (ref_obsolete(raw)) { |
482 | spin_unlock(&c->erase_completion_lock); | 499 | spin_unlock(&c->erase_completion_lock); |
483 | D1(printk(KERN_DEBUG "node to be GC'd was obsoleted in the meantime.\n")); | 500 | jffs2_dbg(1, "node to be GC'd was obsoleted in the meantime.\n"); |
484 | /* They'll call again */ | 501 | /* They'll call again */ |
485 | goto upnout; | 502 | goto upnout; |
486 | } | 503 | } |
@@ -536,10 +553,10 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era | |||
536 | } else if (fd) { | 553 | } else if (fd) { |
537 | ret = jffs2_garbage_collect_deletion_dirent(c, jeb, f, fd); | 554 | ret = jffs2_garbage_collect_deletion_dirent(c, jeb, f, fd); |
538 | } else { | 555 | } else { |
539 | printk(KERN_WARNING "Raw node at 0x%08x wasn't in node lists for ino #%u\n", | 556 | pr_warn("Raw node at 0x%08x wasn't in node lists for ino #%u\n", |
540 | ref_offset(raw), f->inocache->ino); | 557 | ref_offset(raw), f->inocache->ino); |
541 | if (ref_obsolete(raw)) { | 558 | if (ref_obsolete(raw)) { |
542 | printk(KERN_WARNING "But it's obsolete so we don't mind too much\n"); | 559 | pr_warn("But it's obsolete so we don't mind too much\n"); |
543 | } else { | 560 | } else { |
544 | jffs2_dbg_dump_node(c, ref_offset(raw)); | 561 | jffs2_dbg_dump_node(c, ref_offset(raw)); |
545 | BUG(); | 562 | BUG(); |
@@ -562,7 +579,8 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
562 | uint32_t crc, rawlen; | 579 | uint32_t crc, rawlen; |
563 | int retried = 0; | 580 | int retried = 0; |
564 | 581 | ||
565 | D1(printk(KERN_DEBUG "Going to GC REF_PRISTINE node at 0x%08x\n", ref_offset(raw))); | 582 | jffs2_dbg(1, "Going to GC REF_PRISTINE node at 0x%08x\n", |
583 | ref_offset(raw)); | ||
566 | 584 | ||
567 | alloclen = rawlen = ref_totlen(c, c->gcblock, raw); | 585 | alloclen = rawlen = ref_totlen(c, c->gcblock, raw); |
568 | 586 | ||
@@ -595,8 +613,8 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
595 | 613 | ||
596 | crc = crc32(0, node, sizeof(struct jffs2_unknown_node)-4); | 614 | crc = crc32(0, node, sizeof(struct jffs2_unknown_node)-4); |
597 | if (je32_to_cpu(node->u.hdr_crc) != crc) { | 615 | if (je32_to_cpu(node->u.hdr_crc) != crc) { |
598 | printk(KERN_WARNING "Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 616 | pr_warn("Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", |
599 | ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc); | 617 | ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc); |
600 | goto bail; | 618 | goto bail; |
601 | } | 619 | } |
602 | 620 | ||
@@ -604,16 +622,18 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
604 | case JFFS2_NODETYPE_INODE: | 622 | case JFFS2_NODETYPE_INODE: |
605 | crc = crc32(0, node, sizeof(node->i)-8); | 623 | crc = crc32(0, node, sizeof(node->i)-8); |
606 | if (je32_to_cpu(node->i.node_crc) != crc) { | 624 | if (je32_to_cpu(node->i.node_crc) != crc) { |
607 | printk(KERN_WARNING "Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 625 | pr_warn("Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", |
608 | ref_offset(raw), je32_to_cpu(node->i.node_crc), crc); | 626 | ref_offset(raw), je32_to_cpu(node->i.node_crc), |
627 | crc); | ||
609 | goto bail; | 628 | goto bail; |
610 | } | 629 | } |
611 | 630 | ||
612 | if (je32_to_cpu(node->i.dsize)) { | 631 | if (je32_to_cpu(node->i.dsize)) { |
613 | crc = crc32(0, node->i.data, je32_to_cpu(node->i.csize)); | 632 | crc = crc32(0, node->i.data, je32_to_cpu(node->i.csize)); |
614 | if (je32_to_cpu(node->i.data_crc) != crc) { | 633 | if (je32_to_cpu(node->i.data_crc) != crc) { |
615 | printk(KERN_WARNING "Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 634 | pr_warn("Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", |
616 | ref_offset(raw), je32_to_cpu(node->i.data_crc), crc); | 635 | ref_offset(raw), |
636 | je32_to_cpu(node->i.data_crc), crc); | ||
617 | goto bail; | 637 | goto bail; |
618 | } | 638 | } |
619 | } | 639 | } |
@@ -622,21 +642,24 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
622 | case JFFS2_NODETYPE_DIRENT: | 642 | case JFFS2_NODETYPE_DIRENT: |
623 | crc = crc32(0, node, sizeof(node->d)-8); | 643 | crc = crc32(0, node, sizeof(node->d)-8); |
624 | if (je32_to_cpu(node->d.node_crc) != crc) { | 644 | if (je32_to_cpu(node->d.node_crc) != crc) { |
625 | printk(KERN_WARNING "Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 645 | pr_warn("Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", |
626 | ref_offset(raw), je32_to_cpu(node->d.node_crc), crc); | 646 | ref_offset(raw), |
647 | je32_to_cpu(node->d.node_crc), crc); | ||
627 | goto bail; | 648 | goto bail; |
628 | } | 649 | } |
629 | 650 | ||
630 | if (strnlen(node->d.name, node->d.nsize) != node->d.nsize) { | 651 | if (strnlen(node->d.name, node->d.nsize) != node->d.nsize) { |
631 | printk(KERN_WARNING "Name in dirent node at 0x%08x contains zeroes\n", ref_offset(raw)); | 652 | pr_warn("Name in dirent node at 0x%08x contains zeroes\n", |
653 | ref_offset(raw)); | ||
632 | goto bail; | 654 | goto bail; |
633 | } | 655 | } |
634 | 656 | ||
635 | if (node->d.nsize) { | 657 | if (node->d.nsize) { |
636 | crc = crc32(0, node->d.name, node->d.nsize); | 658 | crc = crc32(0, node->d.name, node->d.nsize); |
637 | if (je32_to_cpu(node->d.name_crc) != crc) { | 659 | if (je32_to_cpu(node->d.name_crc) != crc) { |
638 | printk(KERN_WARNING "Name CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 660 | pr_warn("Name CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", |
639 | ref_offset(raw), je32_to_cpu(node->d.name_crc), crc); | 661 | ref_offset(raw), |
662 | je32_to_cpu(node->d.name_crc), crc); | ||
640 | goto bail; | 663 | goto bail; |
641 | } | 664 | } |
642 | } | 665 | } |
@@ -644,8 +667,8 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
644 | default: | 667 | default: |
645 | /* If it's inode-less, we don't _know_ what it is. Just copy it intact */ | 668 | /* If it's inode-less, we don't _know_ what it is. Just copy it intact */ |
646 | if (ic) { | 669 | if (ic) { |
647 | printk(KERN_WARNING "Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n", | 670 | pr_warn("Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n", |
648 | ref_offset(raw), je16_to_cpu(node->u.nodetype)); | 671 | ref_offset(raw), je16_to_cpu(node->u.nodetype)); |
649 | goto bail; | 672 | goto bail; |
650 | } | 673 | } |
651 | } | 674 | } |
@@ -657,12 +680,13 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
657 | ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node); | 680 | ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node); |
658 | 681 | ||
659 | if (ret || (retlen != rawlen)) { | 682 | if (ret || (retlen != rawlen)) { |
660 | printk(KERN_NOTICE "Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n", | 683 | pr_notice("Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n", |
661 | rawlen, phys_ofs, ret, retlen); | 684 | rawlen, phys_ofs, ret, retlen); |
662 | if (retlen) { | 685 | if (retlen) { |
663 | jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, rawlen, NULL); | 686 | jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, rawlen, NULL); |
664 | } else { | 687 | } else { |
665 | printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", phys_ofs); | 688 | pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", |
689 | phys_ofs); | ||
666 | } | 690 | } |
667 | if (!retried) { | 691 | if (!retried) { |
668 | /* Try to reallocate space and retry */ | 692 | /* Try to reallocate space and retry */ |
@@ -671,7 +695,7 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
671 | 695 | ||
672 | retried = 1; | 696 | retried = 1; |
673 | 697 | ||
674 | D1(printk(KERN_DEBUG "Retrying failed write of REF_PRISTINE node.\n")); | 698 | jffs2_dbg(1, "Retrying failed write of REF_PRISTINE node.\n"); |
675 | 699 | ||
676 | jffs2_dbg_acct_sanity_check(c,jeb); | 700 | jffs2_dbg_acct_sanity_check(c,jeb); |
677 | jffs2_dbg_acct_paranoia_check(c, jeb); | 701 | jffs2_dbg_acct_paranoia_check(c, jeb); |
@@ -681,14 +705,16 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
681 | it is only an upper estimation */ | 705 | it is only an upper estimation */ |
682 | 706 | ||
683 | if (!ret) { | 707 | if (!ret) { |
684 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", phys_ofs)); | 708 | jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write.\n", |
709 | phys_ofs); | ||
685 | 710 | ||
686 | jffs2_dbg_acct_sanity_check(c,jeb); | 711 | jffs2_dbg_acct_sanity_check(c,jeb); |
687 | jffs2_dbg_acct_paranoia_check(c, jeb); | 712 | jffs2_dbg_acct_paranoia_check(c, jeb); |
688 | 713 | ||
689 | goto retry; | 714 | goto retry; |
690 | } | 715 | } |
691 | D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); | 716 | jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n", |
717 | ret); | ||
692 | } | 718 | } |
693 | 719 | ||
694 | if (!ret) | 720 | if (!ret) |
@@ -698,7 +724,8 @@ static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c, | |||
698 | jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic); | 724 | jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic); |
699 | 725 | ||
700 | jffs2_mark_node_obsolete(c, raw); | 726 | jffs2_mark_node_obsolete(c, raw); |
701 | D1(printk(KERN_DEBUG "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", ref_offset(raw))); | 727 | jffs2_dbg(1, "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n", |
728 | ref_offset(raw)); | ||
702 | 729 | ||
703 | out_node: | 730 | out_node: |
704 | kfree(node); | 731 | kfree(node); |
@@ -725,29 +752,32 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_ | |||
725 | /* For these, we don't actually need to read the old node */ | 752 | /* For these, we don't actually need to read the old node */ |
726 | mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f)); | 753 | mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f)); |
727 | mdata = (char *)&dev; | 754 | mdata = (char *)&dev; |
728 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bytes of kdev_t\n", mdatalen)); | 755 | jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n", |
756 | __func__, mdatalen); | ||
729 | } else if (S_ISLNK(JFFS2_F_I_MODE(f))) { | 757 | } else if (S_ISLNK(JFFS2_F_I_MODE(f))) { |
730 | mdatalen = fn->size; | 758 | mdatalen = fn->size; |
731 | mdata = kmalloc(fn->size, GFP_KERNEL); | 759 | mdata = kmalloc(fn->size, GFP_KERNEL); |
732 | if (!mdata) { | 760 | if (!mdata) { |
733 | printk(KERN_WARNING "kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n"); | 761 | pr_warn("kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n"); |
734 | return -ENOMEM; | 762 | return -ENOMEM; |
735 | } | 763 | } |
736 | ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen); | 764 | ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen); |
737 | if (ret) { | 765 | if (ret) { |
738 | printk(KERN_WARNING "read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n", ret); | 766 | pr_warn("read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n", |
767 | ret); | ||
739 | kfree(mdata); | 768 | kfree(mdata); |
740 | return ret; | 769 | return ret; |
741 | } | 770 | } |
742 | D1(printk(KERN_DEBUG "jffs2_garbage_collect_metadata(): Writing %d bites of symlink target\n", mdatalen)); | 771 | jffs2_dbg(1, "%s(): Writing %d bites of symlink target\n", |
772 | __func__, mdatalen); | ||
743 | 773 | ||
744 | } | 774 | } |
745 | 775 | ||
746 | ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &alloclen, | 776 | ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &alloclen, |
747 | JFFS2_SUMMARY_INODE_SIZE); | 777 | JFFS2_SUMMARY_INODE_SIZE); |
748 | if (ret) { | 778 | if (ret) { |
749 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n", | 779 | pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n", |
750 | sizeof(ri)+ mdatalen, ret); | 780 | sizeof(ri) + mdatalen, ret); |
751 | goto out; | 781 | goto out; |
752 | } | 782 | } |
753 | 783 | ||
@@ -784,7 +814,7 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_ | |||
784 | new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, ALLOC_GC); | 814 | new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, ALLOC_GC); |
785 | 815 | ||
786 | if (IS_ERR(new_fn)) { | 816 | if (IS_ERR(new_fn)) { |
787 | printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn)); | 817 | pr_warn("Error writing new dnode: %ld\n", PTR_ERR(new_fn)); |
788 | ret = PTR_ERR(new_fn); | 818 | ret = PTR_ERR(new_fn); |
789 | goto out; | 819 | goto out; |
790 | } | 820 | } |
@@ -827,14 +857,15 @@ static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_er | |||
827 | ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &alloclen, | 857 | ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &alloclen, |
828 | JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize)); | 858 | JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize)); |
829 | if (ret) { | 859 | if (ret) { |
830 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n", | 860 | pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n", |
831 | sizeof(rd)+rd.nsize, ret); | 861 | sizeof(rd)+rd.nsize, ret); |
832 | return ret; | 862 | return ret; |
833 | } | 863 | } |
834 | new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, ALLOC_GC); | 864 | new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, ALLOC_GC); |
835 | 865 | ||
836 | if (IS_ERR(new_fd)) { | 866 | if (IS_ERR(new_fd)) { |
837 | printk(KERN_WARNING "jffs2_write_dirent in garbage_collect_dirent failed: %ld\n", PTR_ERR(new_fd)); | 867 | pr_warn("jffs2_write_dirent in garbage_collect_dirent failed: %ld\n", |
868 | PTR_ERR(new_fd)); | ||
838 | return PTR_ERR(new_fd); | 869 | return PTR_ERR(new_fd); |
839 | } | 870 | } |
840 | jffs2_add_fd_to_list(c, new_fd, &f->dents); | 871 | jffs2_add_fd_to_list(c, new_fd, &f->dents); |
@@ -887,19 +918,22 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct | |||
887 | if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset)) | 918 | if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset)) |
888 | continue; | 919 | continue; |
889 | 920 | ||
890 | D1(printk(KERN_DEBUG "Check potential deletion dirent at %08x\n", ref_offset(raw))); | 921 | jffs2_dbg(1, "Check potential deletion dirent at %08x\n", |
922 | ref_offset(raw)); | ||
891 | 923 | ||
892 | /* This is an obsolete node belonging to the same directory, and it's of the right | 924 | /* This is an obsolete node belonging to the same directory, and it's of the right |
893 | length. We need to take a closer look...*/ | 925 | length. We need to take a closer look...*/ |
894 | ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)rd); | 926 | ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)rd); |
895 | if (ret) { | 927 | if (ret) { |
896 | printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Read error (%d) reading obsolete node at %08x\n", ret, ref_offset(raw)); | 928 | pr_warn("%s(): Read error (%d) reading obsolete node at %08x\n", |
929 | __func__, ret, ref_offset(raw)); | ||
897 | /* If we can't read it, we don't need to continue to obsolete it. Continue */ | 930 | /* If we can't read it, we don't need to continue to obsolete it. Continue */ |
898 | continue; | 931 | continue; |
899 | } | 932 | } |
900 | if (retlen != rawlen) { | 933 | if (retlen != rawlen) { |
901 | printk(KERN_WARNING "jffs2_g_c_deletion_dirent(): Short read (%zd not %u) reading header from obsolete node at %08x\n", | 934 | pr_warn("%s(): Short read (%zd not %u) reading header from obsolete node at %08x\n", |
902 | retlen, rawlen, ref_offset(raw)); | 935 | __func__, retlen, rawlen, |
936 | ref_offset(raw)); | ||
903 | continue; | 937 | continue; |
904 | } | 938 | } |
905 | 939 | ||
@@ -923,8 +957,9 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct | |||
923 | a new deletion dirent to replace it */ | 957 | a new deletion dirent to replace it */ |
924 | mutex_unlock(&c->erase_free_sem); | 958 | mutex_unlock(&c->erase_free_sem); |
925 | 959 | ||
926 | D1(printk(KERN_DEBUG "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n", | 960 | jffs2_dbg(1, "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n", |
927 | ref_offset(fd->raw), fd->name, ref_offset(raw), je32_to_cpu(rd->ino))); | 961 | ref_offset(fd->raw), fd->name, |
962 | ref_offset(raw), je32_to_cpu(rd->ino)); | ||
928 | kfree(rd); | 963 | kfree(rd); |
929 | 964 | ||
930 | return jffs2_garbage_collect_dirent(c, jeb, f, fd); | 965 | return jffs2_garbage_collect_dirent(c, jeb, f, fd); |
@@ -947,7 +982,8 @@ static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct | |||
947 | fdp = &(*fdp)->next; | 982 | fdp = &(*fdp)->next; |
948 | } | 983 | } |
949 | if (!found) { | 984 | if (!found) { |
950 | printk(KERN_WARNING "Deletion dirent \"%s\" not found in list for ino #%u\n", fd->name, f->inocache->ino); | 985 | pr_warn("Deletion dirent \"%s\" not found in list for ino #%u\n", |
986 | fd->name, f->inocache->ino); | ||
951 | } | 987 | } |
952 | jffs2_mark_node_obsolete(c, fd->raw); | 988 | jffs2_mark_node_obsolete(c, fd->raw); |
953 | jffs2_free_full_dirent(fd); | 989 | jffs2_free_full_dirent(fd); |
@@ -964,8 +1000,8 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
964 | uint32_t alloclen, ilen; | 1000 | uint32_t alloclen, ilen; |
965 | int ret; | 1001 | int ret; |
966 | 1002 | ||
967 | D1(printk(KERN_DEBUG "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n", | 1003 | jffs2_dbg(1, "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n", |
968 | f->inocache->ino, start, end)); | 1004 | f->inocache->ino, start, end); |
969 | 1005 | ||
970 | memset(&ri, 0, sizeof(ri)); | 1006 | memset(&ri, 0, sizeof(ri)); |
971 | 1007 | ||
@@ -976,35 +1012,37 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
976 | write it out again with the _same_ version as before */ | 1012 | write it out again with the _same_ version as before */ |
977 | ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri); | 1013 | ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri); |
978 | if (readlen != sizeof(ri) || ret) { | 1014 | if (readlen != sizeof(ri) || ret) { |
979 | printk(KERN_WARNING "Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %zd. Data will be lost by writing new hole node\n", ret, readlen); | 1015 | pr_warn("Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %zd. Data will be lost by writing new hole node\n", |
1016 | ret, readlen); | ||
980 | goto fill; | 1017 | goto fill; |
981 | } | 1018 | } |
982 | if (je16_to_cpu(ri.nodetype) != JFFS2_NODETYPE_INODE) { | 1019 | if (je16_to_cpu(ri.nodetype) != JFFS2_NODETYPE_INODE) { |
983 | printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had node type 0x%04x instead of JFFS2_NODETYPE_INODE(0x%04x)\n", | 1020 | pr_warn("%s(): Node at 0x%08x had node type 0x%04x instead of JFFS2_NODETYPE_INODE(0x%04x)\n", |
984 | ref_offset(fn->raw), | 1021 | __func__, ref_offset(fn->raw), |
985 | je16_to_cpu(ri.nodetype), JFFS2_NODETYPE_INODE); | 1022 | je16_to_cpu(ri.nodetype), JFFS2_NODETYPE_INODE); |
986 | return -EIO; | 1023 | return -EIO; |
987 | } | 1024 | } |
988 | if (je32_to_cpu(ri.totlen) != sizeof(ri)) { | 1025 | if (je32_to_cpu(ri.totlen) != sizeof(ri)) { |
989 | printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had totlen 0x%x instead of expected 0x%zx\n", | 1026 | pr_warn("%s(): Node at 0x%08x had totlen 0x%x instead of expected 0x%zx\n", |
990 | ref_offset(fn->raw), | 1027 | __func__, ref_offset(fn->raw), |
991 | je32_to_cpu(ri.totlen), sizeof(ri)); | 1028 | je32_to_cpu(ri.totlen), sizeof(ri)); |
992 | return -EIO; | 1029 | return -EIO; |
993 | } | 1030 | } |
994 | crc = crc32(0, &ri, sizeof(ri)-8); | 1031 | crc = crc32(0, &ri, sizeof(ri)-8); |
995 | if (crc != je32_to_cpu(ri.node_crc)) { | 1032 | if (crc != je32_to_cpu(ri.node_crc)) { |
996 | printk(KERN_WARNING "jffs2_garbage_collect_hole: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n", | 1033 | pr_warn("%s: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n", |
997 | ref_offset(fn->raw), | 1034 | __func__, ref_offset(fn->raw), |
998 | je32_to_cpu(ri.node_crc), crc); | 1035 | je32_to_cpu(ri.node_crc), crc); |
999 | /* FIXME: We could possibly deal with this by writing new holes for each frag */ | 1036 | /* FIXME: We could possibly deal with this by writing new holes for each frag */ |
1000 | printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", | 1037 | pr_warn("Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", |
1001 | start, end, f->inocache->ino); | 1038 | start, end, f->inocache->ino); |
1002 | goto fill; | 1039 | goto fill; |
1003 | } | 1040 | } |
1004 | if (ri.compr != JFFS2_COMPR_ZERO) { | 1041 | if (ri.compr != JFFS2_COMPR_ZERO) { |
1005 | printk(KERN_WARNING "jffs2_garbage_collect_hole: Node 0x%08x wasn't a hole node!\n", ref_offset(fn->raw)); | 1042 | pr_warn("%s(): Node 0x%08x wasn't a hole node!\n", |
1006 | printk(KERN_WARNING "Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", | 1043 | __func__, ref_offset(fn->raw)); |
1007 | start, end, f->inocache->ino); | 1044 | pr_warn("Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n", |
1045 | start, end, f->inocache->ino); | ||
1008 | goto fill; | 1046 | goto fill; |
1009 | } | 1047 | } |
1010 | } else { | 1048 | } else { |
@@ -1043,14 +1081,14 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
1043 | ret = jffs2_reserve_space_gc(c, sizeof(ri), &alloclen, | 1081 | ret = jffs2_reserve_space_gc(c, sizeof(ri), &alloclen, |
1044 | JFFS2_SUMMARY_INODE_SIZE); | 1082 | JFFS2_SUMMARY_INODE_SIZE); |
1045 | if (ret) { | 1083 | if (ret) { |
1046 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n", | 1084 | pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n", |
1047 | sizeof(ri), ret); | 1085 | sizeof(ri), ret); |
1048 | return ret; | 1086 | return ret; |
1049 | } | 1087 | } |
1050 | new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_GC); | 1088 | new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_GC); |
1051 | 1089 | ||
1052 | if (IS_ERR(new_fn)) { | 1090 | if (IS_ERR(new_fn)) { |
1053 | printk(KERN_WARNING "Error writing new hole node: %ld\n", PTR_ERR(new_fn)); | 1091 | pr_warn("Error writing new hole node: %ld\n", PTR_ERR(new_fn)); |
1054 | return PTR_ERR(new_fn); | 1092 | return PTR_ERR(new_fn); |
1055 | } | 1093 | } |
1056 | if (je32_to_cpu(ri.version) == f->highest_version) { | 1094 | if (je32_to_cpu(ri.version) == f->highest_version) { |
@@ -1070,9 +1108,9 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
1070 | * above.) | 1108 | * above.) |
1071 | */ | 1109 | */ |
1072 | D1(if(unlikely(fn->frags <= 1)) { | 1110 | D1(if(unlikely(fn->frags <= 1)) { |
1073 | printk(KERN_WARNING "jffs2_garbage_collect_hole: Replacing fn with %d frag(s) but new ver %d != highest_version %d of ino #%d\n", | 1111 | pr_warn("%s(): Replacing fn with %d frag(s) but new ver %d != highest_version %d of ino #%d\n", |
1074 | fn->frags, je32_to_cpu(ri.version), f->highest_version, | 1112 | __func__, fn->frags, je32_to_cpu(ri.version), |
1075 | je32_to_cpu(ri.ino)); | 1113 | f->highest_version, je32_to_cpu(ri.ino)); |
1076 | }); | 1114 | }); |
1077 | 1115 | ||
1078 | /* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */ | 1116 | /* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */ |
@@ -1089,11 +1127,11 @@ static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eras | |||
1089 | } | 1127 | } |
1090 | } | 1128 | } |
1091 | if (fn->frags) { | 1129 | if (fn->frags) { |
1092 | printk(KERN_WARNING "jffs2_garbage_collect_hole: Old node still has frags!\n"); | 1130 | pr_warn("%s(): Old node still has frags!\n", __func__); |
1093 | BUG(); | 1131 | BUG(); |
1094 | } | 1132 | } |
1095 | if (!new_fn->frags) { | 1133 | if (!new_fn->frags) { |
1096 | printk(KERN_WARNING "jffs2_garbage_collect_hole: New node has no frags!\n"); | 1134 | pr_warn("%s(): New node has no frags!\n", __func__); |
1097 | BUG(); | 1135 | BUG(); |
1098 | } | 1136 | } |
1099 | 1137 | ||
@@ -1117,8 +1155,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1117 | 1155 | ||
1118 | memset(&ri, 0, sizeof(ri)); | 1156 | memset(&ri, 0, sizeof(ri)); |
1119 | 1157 | ||
1120 | D1(printk(KERN_DEBUG "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n", | 1158 | jffs2_dbg(1, "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n", |
1121 | f->inocache->ino, start, end)); | 1159 | f->inocache->ino, start, end); |
1122 | 1160 | ||
1123 | orig_end = end; | 1161 | orig_end = end; |
1124 | orig_start = start; | 1162 | orig_start = start; |
@@ -1149,15 +1187,15 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1149 | /* If the previous frag doesn't even reach the beginning, there's | 1187 | /* If the previous frag doesn't even reach the beginning, there's |
1150 | excessive fragmentation. Just merge. */ | 1188 | excessive fragmentation. Just merge. */ |
1151 | if (frag->ofs > min) { | 1189 | if (frag->ofs > min) { |
1152 | D1(printk(KERN_DEBUG "Expanding down to cover partial frag (0x%x-0x%x)\n", | 1190 | jffs2_dbg(1, "Expanding down to cover partial frag (0x%x-0x%x)\n", |
1153 | frag->ofs, frag->ofs+frag->size)); | 1191 | frag->ofs, frag->ofs+frag->size); |
1154 | start = frag->ofs; | 1192 | start = frag->ofs; |
1155 | continue; | 1193 | continue; |
1156 | } | 1194 | } |
1157 | /* OK. This frag holds the first byte of the page. */ | 1195 | /* OK. This frag holds the first byte of the page. */ |
1158 | if (!frag->node || !frag->node->raw) { | 1196 | if (!frag->node || !frag->node->raw) { |
1159 | D1(printk(KERN_DEBUG "First frag in page is hole (0x%x-0x%x). Not expanding down.\n", | 1197 | jffs2_dbg(1, "First frag in page is hole (0x%x-0x%x). Not expanding down.\n", |
1160 | frag->ofs, frag->ofs+frag->size)); | 1198 | frag->ofs, frag->ofs+frag->size); |
1161 | break; | 1199 | break; |
1162 | } else { | 1200 | } else { |
1163 | 1201 | ||
@@ -1171,19 +1209,25 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1171 | jeb = &c->blocks[raw->flash_offset / c->sector_size]; | 1209 | jeb = &c->blocks[raw->flash_offset / c->sector_size]; |
1172 | 1210 | ||
1173 | if (jeb == c->gcblock) { | 1211 | if (jeb == c->gcblock) { |
1174 | D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n", | 1212 | jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n", |
1175 | frag->ofs, frag->ofs+frag->size, ref_offset(raw))); | 1213 | frag->ofs, |
1214 | frag->ofs + frag->size, | ||
1215 | ref_offset(raw)); | ||
1176 | start = frag->ofs; | 1216 | start = frag->ofs; |
1177 | break; | 1217 | break; |
1178 | } | 1218 | } |
1179 | if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) { | 1219 | if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) { |
1180 | D1(printk(KERN_DEBUG "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n", | 1220 | jffs2_dbg(1, "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n", |
1181 | frag->ofs, frag->ofs+frag->size, jeb->offset)); | 1221 | frag->ofs, |
1222 | frag->ofs + frag->size, | ||
1223 | jeb->offset); | ||
1182 | break; | 1224 | break; |
1183 | } | 1225 | } |
1184 | 1226 | ||
1185 | D1(printk(KERN_DEBUG "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n", | 1227 | jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n", |
1186 | frag->ofs, frag->ofs+frag->size, jeb->offset)); | 1228 | frag->ofs, |
1229 | frag->ofs + frag->size, | ||
1230 | jeb->offset); | ||
1187 | start = frag->ofs; | 1231 | start = frag->ofs; |
1188 | break; | 1232 | break; |
1189 | } | 1233 | } |
@@ -1199,15 +1243,15 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1199 | /* If the previous frag doesn't even reach the beginning, there's lots | 1243 | /* If the previous frag doesn't even reach the beginning, there's lots |
1200 | of fragmentation. Just merge. */ | 1244 | of fragmentation. Just merge. */ |
1201 | if (frag->ofs+frag->size < max) { | 1245 | if (frag->ofs+frag->size < max) { |
1202 | D1(printk(KERN_DEBUG "Expanding up to cover partial frag (0x%x-0x%x)\n", | 1246 | jffs2_dbg(1, "Expanding up to cover partial frag (0x%x-0x%x)\n", |
1203 | frag->ofs, frag->ofs+frag->size)); | 1247 | frag->ofs, frag->ofs+frag->size); |
1204 | end = frag->ofs + frag->size; | 1248 | end = frag->ofs + frag->size; |
1205 | continue; | 1249 | continue; |
1206 | } | 1250 | } |
1207 | 1251 | ||
1208 | if (!frag->node || !frag->node->raw) { | 1252 | if (!frag->node || !frag->node->raw) { |
1209 | D1(printk(KERN_DEBUG "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n", | 1253 | jffs2_dbg(1, "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n", |
1210 | frag->ofs, frag->ofs+frag->size)); | 1254 | frag->ofs, frag->ofs+frag->size); |
1211 | break; | 1255 | break; |
1212 | } else { | 1256 | } else { |
1213 | 1257 | ||
@@ -1221,25 +1265,31 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1221 | jeb = &c->blocks[raw->flash_offset / c->sector_size]; | 1265 | jeb = &c->blocks[raw->flash_offset / c->sector_size]; |
1222 | 1266 | ||
1223 | if (jeb == c->gcblock) { | 1267 | if (jeb == c->gcblock) { |
1224 | D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n", | 1268 | jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n", |
1225 | frag->ofs, frag->ofs+frag->size, ref_offset(raw))); | 1269 | frag->ofs, |
1270 | frag->ofs + frag->size, | ||
1271 | ref_offset(raw)); | ||
1226 | end = frag->ofs + frag->size; | 1272 | end = frag->ofs + frag->size; |
1227 | break; | 1273 | break; |
1228 | } | 1274 | } |
1229 | if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) { | 1275 | if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) { |
1230 | D1(printk(KERN_DEBUG "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n", | 1276 | jffs2_dbg(1, "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n", |
1231 | frag->ofs, frag->ofs+frag->size, jeb->offset)); | 1277 | frag->ofs, |
1278 | frag->ofs + frag->size, | ||
1279 | jeb->offset); | ||
1232 | break; | 1280 | break; |
1233 | } | 1281 | } |
1234 | 1282 | ||
1235 | D1(printk(KERN_DEBUG "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n", | 1283 | jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n", |
1236 | frag->ofs, frag->ofs+frag->size, jeb->offset)); | 1284 | frag->ofs, |
1285 | frag->ofs + frag->size, | ||
1286 | jeb->offset); | ||
1237 | end = frag->ofs + frag->size; | 1287 | end = frag->ofs + frag->size; |
1238 | break; | 1288 | break; |
1239 | } | 1289 | } |
1240 | } | 1290 | } |
1241 | D1(printk(KERN_DEBUG "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n", | 1291 | jffs2_dbg(1, "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n", |
1242 | orig_start, orig_end, start, end)); | 1292 | orig_start, orig_end, start, end); |
1243 | 1293 | ||
1244 | D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size)); | 1294 | D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size)); |
1245 | BUG_ON(end < orig_end); | 1295 | BUG_ON(end < orig_end); |
@@ -1256,7 +1306,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1256 | pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg); | 1306 | pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg); |
1257 | 1307 | ||
1258 | if (IS_ERR(pg_ptr)) { | 1308 | if (IS_ERR(pg_ptr)) { |
1259 | printk(KERN_WARNING "read_cache_page() returned error: %ld\n", PTR_ERR(pg_ptr)); | 1309 | pr_warn("read_cache_page() returned error: %ld\n", |
1310 | PTR_ERR(pg_ptr)); | ||
1260 | return PTR_ERR(pg_ptr); | 1311 | return PTR_ERR(pg_ptr); |
1261 | } | 1312 | } |
1262 | 1313 | ||
@@ -1270,8 +1321,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1270 | &alloclen, JFFS2_SUMMARY_INODE_SIZE); | 1321 | &alloclen, JFFS2_SUMMARY_INODE_SIZE); |
1271 | 1322 | ||
1272 | if (ret) { | 1323 | if (ret) { |
1273 | printk(KERN_WARNING "jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n", | 1324 | pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n", |
1274 | sizeof(ri)+ JFFS2_MIN_DATA_LEN, ret); | 1325 | sizeof(ri) + JFFS2_MIN_DATA_LEN, ret); |
1275 | break; | 1326 | break; |
1276 | } | 1327 | } |
1277 | cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset); | 1328 | cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset); |
@@ -1308,7 +1359,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era | |||
1308 | jffs2_free_comprbuf(comprbuf, writebuf); | 1359 | jffs2_free_comprbuf(comprbuf, writebuf); |
1309 | 1360 | ||
1310 | if (IS_ERR(new_fn)) { | 1361 | if (IS_ERR(new_fn)) { |
1311 | printk(KERN_WARNING "Error writing new dnode: %ld\n", PTR_ERR(new_fn)); | 1362 | pr_warn("Error writing new dnode: %ld\n", |
1363 | PTR_ERR(new_fn)); | ||
1312 | ret = PTR_ERR(new_fn); | 1364 | ret = PTR_ERR(new_fn); |
1313 | break; | 1365 | break; |
1314 | } | 1366 | } |
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c index c082868910f2..4f47aa24b556 100644 --- a/fs/jffs2/malloc.c +++ b/fs/jffs2/malloc.c | |||
@@ -9,6 +9,8 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | |||
12 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
13 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
14 | #include <linux/init.h> | 16 | #include <linux/init.h> |
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c index 5e03233c2363..975a1f562c10 100644 --- a/fs/jffs2/nodelist.c +++ b/fs/jffs2/nodelist.c | |||
@@ -9,6 +9,8 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | |||
12 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
13 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
14 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
@@ -687,8 +689,8 @@ int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb | |||
687 | if (!size) | 689 | if (!size) |
688 | return 0; | 690 | return 0; |
689 | if (unlikely(size > jeb->free_size)) { | 691 | if (unlikely(size > jeb->free_size)) { |
690 | printk(KERN_CRIT "Dirty space 0x%x larger then free_size 0x%x (wasted 0x%x)\n", | 692 | pr_crit("Dirty space 0x%x larger then free_size 0x%x (wasted 0x%x)\n", |
691 | size, jeb->free_size, jeb->wasted_size); | 693 | size, jeb->free_size, jeb->wasted_size); |
692 | BUG(); | 694 | BUG(); |
693 | } | 695 | } |
694 | /* REF_EMPTY_NODE is !obsolete, so that works OK */ | 696 | /* REF_EMPTY_NODE is !obsolete, so that works OK */ |
@@ -726,8 +728,10 @@ static inline uint32_t __ref_totlen(struct jffs2_sb_info *c, | |||
726 | 728 | ||
727 | /* Last node in block. Use free_space */ | 729 | /* Last node in block. Use free_space */ |
728 | if (unlikely(ref != jeb->last_node)) { | 730 | if (unlikely(ref != jeb->last_node)) { |
729 | printk(KERN_CRIT "ref %p @0x%08x is not jeb->last_node (%p @0x%08x)\n", | 731 | pr_crit("ref %p @0x%08x is not jeb->last_node (%p @0x%08x)\n", |
730 | ref, ref_offset(ref), jeb->last_node, jeb->last_node?ref_offset(jeb->last_node):0); | 732 | ref, ref_offset(ref), jeb->last_node, |
733 | jeb->last_node ? | ||
734 | ref_offset(jeb->last_node) : 0); | ||
731 | BUG(); | 735 | BUG(); |
732 | } | 736 | } |
733 | ref_end = jeb->offset + c->sector_size - jeb->free_size; | 737 | ref_end = jeb->offset + c->sector_size - jeb->free_size; |
@@ -747,16 +751,20 @@ uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *je | |||
747 | if (!jeb) | 751 | if (!jeb) |
748 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; | 752 | jeb = &c->blocks[ref->flash_offset / c->sector_size]; |
749 | 753 | ||
750 | printk(KERN_CRIT "Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n", | 754 | pr_crit("Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n", |
751 | ref, ref_offset(ref), ref_offset(ref)+ref->__totlen, | 755 | ref, ref_offset(ref), ref_offset(ref) + ref->__totlen, |
752 | ret, ref->__totlen); | 756 | ret, ref->__totlen); |
753 | if (ref_next(ref)) { | 757 | if (ref_next(ref)) { |
754 | printk(KERN_CRIT "next %p (0x%08x-0x%08x)\n", ref_next(ref), ref_offset(ref_next(ref)), | 758 | pr_crit("next %p (0x%08x-0x%08x)\n", |
755 | ref_offset(ref_next(ref))+ref->__totlen); | 759 | ref_next(ref), ref_offset(ref_next(ref)), |
760 | ref_offset(ref_next(ref)) + ref->__totlen); | ||
756 | } else | 761 | } else |
757 | printk(KERN_CRIT "No next ref. jeb->last_node is %p\n", jeb->last_node); | 762 | pr_crit("No next ref. jeb->last_node is %p\n", |
763 | jeb->last_node); | ||
758 | 764 | ||
759 | printk(KERN_CRIT "jeb->wasted_size %x, dirty_size %x, used_size %x, free_size %x\n", jeb->wasted_size, jeb->dirty_size, jeb->used_size, jeb->free_size); | 765 | pr_crit("jeb->wasted_size %x, dirty_size %x, used_size %x, free_size %x\n", |
766 | jeb->wasted_size, jeb->dirty_size, jeb->used_size, | ||
767 | jeb->free_size); | ||
760 | 768 | ||
761 | #if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS) | 769 | #if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS) |
762 | __jffs2_dbg_dump_node_refs_nolock(c, jeb); | 770 | __jffs2_dbg_dump_node_refs_nolock(c, jeb); |
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c index 694aa5b03505..6784d1e7a7eb 100644 --- a/fs/jffs2/nodemgmt.c +++ b/fs/jffs2/nodemgmt.c | |||
@@ -9,6 +9,8 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | |||
12 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
13 | #include <linux/mtd/mtd.h> | 15 | #include <linux/mtd/mtd.h> |
14 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
@@ -46,10 +48,10 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
46 | /* align it */ | 48 | /* align it */ |
47 | minsize = PAD(minsize); | 49 | minsize = PAD(minsize); |
48 | 50 | ||
49 | D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize)); | 51 | jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize); |
50 | mutex_lock(&c->alloc_sem); | 52 | mutex_lock(&c->alloc_sem); |
51 | 53 | ||
52 | D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n")); | 54 | jffs2_dbg(1, "%s(): alloc sem got\n", __func__); |
53 | 55 | ||
54 | spin_lock(&c->erase_completion_lock); | 56 | spin_lock(&c->erase_completion_lock); |
55 | 57 | ||
@@ -73,11 +75,13 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
73 | dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size; | 75 | dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size; |
74 | if (dirty < c->nospc_dirty_size) { | 76 | if (dirty < c->nospc_dirty_size) { |
75 | if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { | 77 | if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { |
76 | D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n")); | 78 | jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n", |
79 | __func__); | ||
77 | break; | 80 | break; |
78 | } | 81 | } |
79 | D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n", | 82 | jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n", |
80 | dirty, c->unchecked_size, c->sector_size)); | 83 | dirty, c->unchecked_size, |
84 | c->sector_size); | ||
81 | 85 | ||
82 | spin_unlock(&c->erase_completion_lock); | 86 | spin_unlock(&c->erase_completion_lock); |
83 | mutex_unlock(&c->alloc_sem); | 87 | mutex_unlock(&c->alloc_sem); |
@@ -96,12 +100,13 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
96 | avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size; | 100 | avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size; |
97 | if ( (avail / c->sector_size) <= blocksneeded) { | 101 | if ( (avail / c->sector_size) <= blocksneeded) { |
98 | if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { | 102 | if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { |
99 | D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n")); | 103 | jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n", |
104 | __func__); | ||
100 | break; | 105 | break; |
101 | } | 106 | } |
102 | 107 | ||
103 | D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n", | 108 | jffs2_dbg(1, "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n", |
104 | avail, blocksneeded * c->sector_size)); | 109 | avail, blocksneeded * c->sector_size); |
105 | spin_unlock(&c->erase_completion_lock); | 110 | spin_unlock(&c->erase_completion_lock); |
106 | mutex_unlock(&c->alloc_sem); | 111 | mutex_unlock(&c->alloc_sem); |
107 | return -ENOSPC; | 112 | return -ENOSPC; |
@@ -109,9 +114,14 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
109 | 114 | ||
110 | mutex_unlock(&c->alloc_sem); | 115 | mutex_unlock(&c->alloc_sem); |
111 | 116 | ||
112 | D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n", | 117 | jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n", |
113 | c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size, | 118 | c->nr_free_blocks, c->nr_erasing_blocks, |
114 | c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size)); | 119 | c->free_size, c->dirty_size, c->wasted_size, |
120 | c->used_size, c->erasing_size, c->bad_size, | ||
121 | c->free_size + c->dirty_size + | ||
122 | c->wasted_size + c->used_size + | ||
123 | c->erasing_size + c->bad_size, | ||
124 | c->flash_size); | ||
115 | spin_unlock(&c->erase_completion_lock); | 125 | spin_unlock(&c->erase_completion_lock); |
116 | 126 | ||
117 | ret = jffs2_garbage_collect_pass(c); | 127 | ret = jffs2_garbage_collect_pass(c); |
@@ -124,7 +134,8 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
124 | DECLARE_WAITQUEUE(wait, current); | 134 | DECLARE_WAITQUEUE(wait, current); |
125 | set_current_state(TASK_UNINTERRUPTIBLE); | 135 | set_current_state(TASK_UNINTERRUPTIBLE); |
126 | add_wait_queue(&c->erase_wait, &wait); | 136 | add_wait_queue(&c->erase_wait, &wait); |
127 | D1(printk(KERN_DEBUG "%s waiting for erase to complete\n", __func__)); | 137 | jffs2_dbg(1, "%s waiting for erase to complete\n", |
138 | __func__); | ||
128 | spin_unlock(&c->erase_completion_lock); | 139 | spin_unlock(&c->erase_completion_lock); |
129 | 140 | ||
130 | schedule(); | 141 | schedule(); |
@@ -144,7 +155,7 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
144 | 155 | ||
145 | ret = jffs2_do_reserve_space(c, minsize, len, sumsize); | 156 | ret = jffs2_do_reserve_space(c, minsize, len, sumsize); |
146 | if (ret) { | 157 | if (ret) { |
147 | D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret)); | 158 | jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret); |
148 | } | 159 | } |
149 | } | 160 | } |
150 | spin_unlock(&c->erase_completion_lock); | 161 | spin_unlock(&c->erase_completion_lock); |
@@ -161,13 +172,14 @@ int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, | |||
161 | int ret = -EAGAIN; | 172 | int ret = -EAGAIN; |
162 | minsize = PAD(minsize); | 173 | minsize = PAD(minsize); |
163 | 174 | ||
164 | D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize)); | 175 | jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize); |
165 | 176 | ||
166 | spin_lock(&c->erase_completion_lock); | 177 | spin_lock(&c->erase_completion_lock); |
167 | while(ret == -EAGAIN) { | 178 | while(ret == -EAGAIN) { |
168 | ret = jffs2_do_reserve_space(c, minsize, len, sumsize); | 179 | ret = jffs2_do_reserve_space(c, minsize, len, sumsize); |
169 | if (ret) { | 180 | if (ret) { |
170 | D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret)); | 181 | jffs2_dbg(1, "%s(): looping, ret is %d\n", |
182 | __func__, ret); | ||
171 | } | 183 | } |
172 | } | 184 | } |
173 | spin_unlock(&c->erase_completion_lock); | 185 | spin_unlock(&c->erase_completion_lock); |
@@ -184,8 +196,8 @@ static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
184 | { | 196 | { |
185 | 197 | ||
186 | if (c->nextblock == NULL) { | 198 | if (c->nextblock == NULL) { |
187 | D1(printk(KERN_DEBUG "jffs2_close_nextblock: Erase block at 0x%08x has already been placed in a list\n", | 199 | jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n", |
188 | jeb->offset)); | 200 | __func__, jeb->offset); |
189 | return; | 201 | return; |
190 | } | 202 | } |
191 | /* Check, if we have a dirty block now, or if it was dirty already */ | 203 | /* Check, if we have a dirty block now, or if it was dirty already */ |
@@ -195,17 +207,20 @@ static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
195 | jeb->dirty_size += jeb->wasted_size; | 207 | jeb->dirty_size += jeb->wasted_size; |
196 | jeb->wasted_size = 0; | 208 | jeb->wasted_size = 0; |
197 | if (VERYDIRTY(c, jeb->dirty_size)) { | 209 | if (VERYDIRTY(c, jeb->dirty_size)) { |
198 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | 210 | jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", |
199 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | 211 | jeb->offset, jeb->free_size, jeb->dirty_size, |
212 | jeb->used_size); | ||
200 | list_add_tail(&jeb->list, &c->very_dirty_list); | 213 | list_add_tail(&jeb->list, &c->very_dirty_list); |
201 | } else { | 214 | } else { |
202 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | 215 | jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", |
203 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | 216 | jeb->offset, jeb->free_size, jeb->dirty_size, |
217 | jeb->used_size); | ||
204 | list_add_tail(&jeb->list, &c->dirty_list); | 218 | list_add_tail(&jeb->list, &c->dirty_list); |
205 | } | 219 | } |
206 | } else { | 220 | } else { |
207 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | 221 | jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", |
208 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | 222 | jeb->offset, jeb->free_size, jeb->dirty_size, |
223 | jeb->used_size); | ||
209 | list_add_tail(&jeb->list, &c->clean_list); | 224 | list_add_tail(&jeb->list, &c->clean_list); |
210 | } | 225 | } |
211 | c->nextblock = NULL; | 226 | c->nextblock = NULL; |
@@ -230,13 +245,14 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c) | |||
230 | list_move_tail(&ejeb->list, &c->erase_pending_list); | 245 | list_move_tail(&ejeb->list, &c->erase_pending_list); |
231 | c->nr_erasing_blocks++; | 246 | c->nr_erasing_blocks++; |
232 | jffs2_garbage_collect_trigger(c); | 247 | jffs2_garbage_collect_trigger(c); |
233 | D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n", | 248 | jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n", |
234 | ejeb->offset)); | 249 | __func__, ejeb->offset); |
235 | } | 250 | } |
236 | 251 | ||
237 | if (!c->nr_erasing_blocks && | 252 | if (!c->nr_erasing_blocks && |
238 | !list_empty(&c->erasable_pending_wbuf_list)) { | 253 | !list_empty(&c->erasable_pending_wbuf_list)) { |
239 | D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n")); | 254 | jffs2_dbg(1, "%s(): Flushing write buffer\n", |
255 | __func__); | ||
240 | /* c->nextblock is NULL, no update to c->nextblock allowed */ | 256 | /* c->nextblock is NULL, no update to c->nextblock allowed */ |
241 | spin_unlock(&c->erase_completion_lock); | 257 | spin_unlock(&c->erase_completion_lock); |
242 | jffs2_flush_wbuf_pad(c); | 258 | jffs2_flush_wbuf_pad(c); |
@@ -248,9 +264,11 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c) | |||
248 | if (!c->nr_erasing_blocks) { | 264 | if (!c->nr_erasing_blocks) { |
249 | /* Ouch. We're in GC, or we wouldn't have got here. | 265 | /* Ouch. We're in GC, or we wouldn't have got here. |
250 | And there's no space left. At all. */ | 266 | And there's no space left. At all. */ |
251 | printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", | 267 | pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", |
252 | c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no", | 268 | c->nr_erasing_blocks, c->nr_free_blocks, |
253 | list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no"); | 269 | list_empty(&c->erasable_list) ? "yes" : "no", |
270 | list_empty(&c->erasing_list) ? "yes" : "no", | ||
271 | list_empty(&c->erase_pending_list) ? "yes" : "no"); | ||
254 | return -ENOSPC; | 272 | return -ENOSPC; |
255 | } | 273 | } |
256 | 274 | ||
@@ -278,7 +296,8 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c) | |||
278 | c->wbuf_ofs = 0xffffffff; | 296 | c->wbuf_ofs = 0xffffffff; |
279 | #endif | 297 | #endif |
280 | 298 | ||
281 | D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset)); | 299 | jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n", |
300 | __func__, c->nextblock->offset); | ||
282 | 301 | ||
283 | return 0; | 302 | return 0; |
284 | } | 303 | } |
@@ -345,7 +364,8 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
345 | 364 | ||
346 | if (jffs2_wbuf_dirty(c)) { | 365 | if (jffs2_wbuf_dirty(c)) { |
347 | spin_unlock(&c->erase_completion_lock); | 366 | spin_unlock(&c->erase_completion_lock); |
348 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n")); | 367 | jffs2_dbg(1, "%s(): Flushing write buffer\n", |
368 | __func__); | ||
349 | jffs2_flush_wbuf_pad(c); | 369 | jffs2_flush_wbuf_pad(c); |
350 | spin_lock(&c->erase_completion_lock); | 370 | spin_lock(&c->erase_completion_lock); |
351 | jeb = c->nextblock; | 371 | jeb = c->nextblock; |
@@ -387,7 +407,8 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
387 | jeb = c->nextblock; | 407 | jeb = c->nextblock; |
388 | 408 | ||
389 | if (jeb->free_size != c->sector_size - c->cleanmarker_size) { | 409 | if (jeb->free_size != c->sector_size - c->cleanmarker_size) { |
390 | printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size); | 410 | pr_warn("Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", |
411 | jeb->offset, jeb->free_size); | ||
391 | goto restart; | 412 | goto restart; |
392 | } | 413 | } |
393 | } | 414 | } |
@@ -408,8 +429,9 @@ static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, | |||
408 | spin_lock(&c->erase_completion_lock); | 429 | spin_lock(&c->erase_completion_lock); |
409 | } | 430 | } |
410 | 431 | ||
411 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", | 432 | jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n", |
412 | *len, jeb->offset + (c->sector_size - jeb->free_size))); | 433 | __func__, |
434 | *len, jeb->offset + (c->sector_size - jeb->free_size)); | ||
413 | return 0; | 435 | return 0; |
414 | } | 436 | } |
415 | 437 | ||
@@ -434,20 +456,22 @@ struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c, | |||
434 | 456 | ||
435 | jeb = &c->blocks[ofs / c->sector_size]; | 457 | jeb = &c->blocks[ofs / c->sector_size]; |
436 | 458 | ||
437 | D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", | 459 | jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n", |
438 | ofs & ~3, ofs & 3, len)); | 460 | __func__, ofs & ~3, ofs & 3, len); |
439 | #if 1 | 461 | #if 1 |
440 | /* Allow non-obsolete nodes only to be added at the end of c->nextblock, | 462 | /* Allow non-obsolete nodes only to be added at the end of c->nextblock, |
441 | if c->nextblock is set. Note that wbuf.c will file obsolete nodes | 463 | if c->nextblock is set. Note that wbuf.c will file obsolete nodes |
442 | even after refiling c->nextblock */ | 464 | even after refiling c->nextblock */ |
443 | if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE)) | 465 | if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE)) |
444 | && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) { | 466 | && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) { |
445 | printk(KERN_WARNING "argh. node added in wrong place at 0x%08x(%d)\n", ofs & ~3, ofs & 3); | 467 | pr_warn("argh. node added in wrong place at 0x%08x(%d)\n", |
468 | ofs & ~3, ofs & 3); | ||
446 | if (c->nextblock) | 469 | if (c->nextblock) |
447 | printk(KERN_WARNING "nextblock 0x%08x", c->nextblock->offset); | 470 | pr_warn("nextblock 0x%08x", c->nextblock->offset); |
448 | else | 471 | else |
449 | printk(KERN_WARNING "No nextblock"); | 472 | pr_warn("No nextblock"); |
450 | printk(", expected at %08x\n", jeb->offset + (c->sector_size - jeb->free_size)); | 473 | pr_cont(", expected at %08x\n", |
474 | jeb->offset + (c->sector_size - jeb->free_size)); | ||
451 | return ERR_PTR(-EINVAL); | 475 | return ERR_PTR(-EINVAL); |
452 | } | 476 | } |
453 | #endif | 477 | #endif |
@@ -457,8 +481,9 @@ struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c, | |||
457 | 481 | ||
458 | if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) { | 482 | if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) { |
459 | /* If it lives on the dirty_list, jffs2_reserve_space will put it there */ | 483 | /* If it lives on the dirty_list, jffs2_reserve_space will put it there */ |
460 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | 484 | jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", |
461 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | 485 | jeb->offset, jeb->free_size, jeb->dirty_size, |
486 | jeb->used_size); | ||
462 | if (jffs2_wbuf_dirty(c)) { | 487 | if (jffs2_wbuf_dirty(c)) { |
463 | /* Flush the last write in the block if it's outstanding */ | 488 | /* Flush the last write in the block if it's outstanding */ |
464 | spin_unlock(&c->erase_completion_lock); | 489 | spin_unlock(&c->erase_completion_lock); |
@@ -480,7 +505,7 @@ struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c, | |||
480 | 505 | ||
481 | void jffs2_complete_reservation(struct jffs2_sb_info *c) | 506 | void jffs2_complete_reservation(struct jffs2_sb_info *c) |
482 | { | 507 | { |
483 | D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n")); | 508 | jffs2_dbg(1, "jffs2_complete_reservation()\n"); |
484 | spin_lock(&c->erase_completion_lock); | 509 | spin_lock(&c->erase_completion_lock); |
485 | jffs2_garbage_collect_trigger(c); | 510 | jffs2_garbage_collect_trigger(c); |
486 | spin_unlock(&c->erase_completion_lock); | 511 | spin_unlock(&c->erase_completion_lock); |
@@ -493,7 +518,7 @@ static inline int on_list(struct list_head *obj, struct list_head *head) | |||
493 | 518 | ||
494 | list_for_each(this, head) { | 519 | list_for_each(this, head) { |
495 | if (this == obj) { | 520 | if (this == obj) { |
496 | D1(printk("%p is on list at %p\n", obj, head)); | 521 | jffs2_dbg(1, "%p is on list at %p\n", obj, head); |
497 | return 1; | 522 | return 1; |
498 | 523 | ||
499 | } | 524 | } |
@@ -511,16 +536,18 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
511 | uint32_t freed_len; | 536 | uint32_t freed_len; |
512 | 537 | ||
513 | if(unlikely(!ref)) { | 538 | if(unlikely(!ref)) { |
514 | printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n"); | 539 | pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n"); |
515 | return; | 540 | return; |
516 | } | 541 | } |
517 | if (ref_obsolete(ref)) { | 542 | if (ref_obsolete(ref)) { |
518 | D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref))); | 543 | jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n", |
544 | __func__, ref_offset(ref)); | ||
519 | return; | 545 | return; |
520 | } | 546 | } |
521 | blocknr = ref->flash_offset / c->sector_size; | 547 | blocknr = ref->flash_offset / c->sector_size; |
522 | if (blocknr >= c->nr_blocks) { | 548 | if (blocknr >= c->nr_blocks) { |
523 | printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset); | 549 | pr_notice("raw node at 0x%08x is off the end of device!\n", |
550 | ref->flash_offset); | ||
524 | BUG(); | 551 | BUG(); |
525 | } | 552 | } |
526 | jeb = &c->blocks[blocknr]; | 553 | jeb = &c->blocks[blocknr]; |
@@ -542,27 +569,31 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
542 | 569 | ||
543 | if (ref_flags(ref) == REF_UNCHECKED) { | 570 | if (ref_flags(ref) == REF_UNCHECKED) { |
544 | D1(if (unlikely(jeb->unchecked_size < freed_len)) { | 571 | D1(if (unlikely(jeb->unchecked_size < freed_len)) { |
545 | printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n", | 572 | pr_notice("raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n", |
546 | freed_len, blocknr, ref->flash_offset, jeb->used_size); | 573 | freed_len, blocknr, |
574 | ref->flash_offset, jeb->used_size); | ||
547 | BUG(); | 575 | BUG(); |
548 | }) | 576 | }) |
549 | D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len)); | 577 | jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n", |
578 | ref_offset(ref), freed_len); | ||
550 | jeb->unchecked_size -= freed_len; | 579 | jeb->unchecked_size -= freed_len; |
551 | c->unchecked_size -= freed_len; | 580 | c->unchecked_size -= freed_len; |
552 | } else { | 581 | } else { |
553 | D1(if (unlikely(jeb->used_size < freed_len)) { | 582 | D1(if (unlikely(jeb->used_size < freed_len)) { |
554 | printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n", | 583 | pr_notice("raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n", |
555 | freed_len, blocknr, ref->flash_offset, jeb->used_size); | 584 | freed_len, blocknr, |
585 | ref->flash_offset, jeb->used_size); | ||
556 | BUG(); | 586 | BUG(); |
557 | }) | 587 | }) |
558 | D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len)); | 588 | jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ", |
589 | ref_offset(ref), freed_len); | ||
559 | jeb->used_size -= freed_len; | 590 | jeb->used_size -= freed_len; |
560 | c->used_size -= freed_len; | 591 | c->used_size -= freed_len; |
561 | } | 592 | } |
562 | 593 | ||
563 | // Take care, that wasted size is taken into concern | 594 | // Take care, that wasted size is taken into concern |
564 | if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) { | 595 | if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) { |
565 | D1(printk("Dirtying\n")); | 596 | jffs2_dbg(1, "Dirtying\n"); |
566 | addedsize = freed_len; | 597 | addedsize = freed_len; |
567 | jeb->dirty_size += freed_len; | 598 | jeb->dirty_size += freed_len; |
568 | c->dirty_size += freed_len; | 599 | c->dirty_size += freed_len; |
@@ -570,12 +601,12 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
570 | /* Convert wasted space to dirty, if not a bad block */ | 601 | /* Convert wasted space to dirty, if not a bad block */ |
571 | if (jeb->wasted_size) { | 602 | if (jeb->wasted_size) { |
572 | if (on_list(&jeb->list, &c->bad_used_list)) { | 603 | if (on_list(&jeb->list, &c->bad_used_list)) { |
573 | D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n", | 604 | jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n", |
574 | jeb->offset)); | 605 | jeb->offset); |
575 | addedsize = 0; /* To fool the refiling code later */ | 606 | addedsize = 0; /* To fool the refiling code later */ |
576 | } else { | 607 | } else { |
577 | D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n", | 608 | jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n", |
578 | jeb->wasted_size, jeb->offset)); | 609 | jeb->wasted_size, jeb->offset); |
579 | addedsize += jeb->wasted_size; | 610 | addedsize += jeb->wasted_size; |
580 | jeb->dirty_size += jeb->wasted_size; | 611 | jeb->dirty_size += jeb->wasted_size; |
581 | c->dirty_size += jeb->wasted_size; | 612 | c->dirty_size += jeb->wasted_size; |
@@ -584,7 +615,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
584 | } | 615 | } |
585 | } | 616 | } |
586 | } else { | 617 | } else { |
587 | D1(printk("Wasting\n")); | 618 | jffs2_dbg(1, "Wasting\n"); |
588 | addedsize = 0; | 619 | addedsize = 0; |
589 | jeb->wasted_size += freed_len; | 620 | jeb->wasted_size += freed_len; |
590 | c->wasted_size += freed_len; | 621 | c->wasted_size += freed_len; |
@@ -606,50 +637,57 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
606 | } | 637 | } |
607 | 638 | ||
608 | if (jeb == c->nextblock) { | 639 | if (jeb == c->nextblock) { |
609 | D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset)); | 640 | jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n", |
641 | jeb->offset); | ||
610 | } else if (!jeb->used_size && !jeb->unchecked_size) { | 642 | } else if (!jeb->used_size && !jeb->unchecked_size) { |
611 | if (jeb == c->gcblock) { | 643 | if (jeb == c->gcblock) { |
612 | D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset)); | 644 | jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", |
645 | jeb->offset); | ||
613 | c->gcblock = NULL; | 646 | c->gcblock = NULL; |
614 | } else { | 647 | } else { |
615 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset)); | 648 | jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", |
649 | jeb->offset); | ||
616 | list_del(&jeb->list); | 650 | list_del(&jeb->list); |
617 | } | 651 | } |
618 | if (jffs2_wbuf_dirty(c)) { | 652 | if (jffs2_wbuf_dirty(c)) { |
619 | D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n")); | 653 | jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n"); |
620 | list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list); | 654 | list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list); |
621 | } else { | 655 | } else { |
622 | if (jiffies & 127) { | 656 | if (jiffies & 127) { |
623 | /* Most of the time, we just erase it immediately. Otherwise we | 657 | /* Most of the time, we just erase it immediately. Otherwise we |
624 | spend ages scanning it on mount, etc. */ | 658 | spend ages scanning it on mount, etc. */ |
625 | D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n")); | 659 | jffs2_dbg(1, "...and adding to erase_pending_list\n"); |
626 | list_add_tail(&jeb->list, &c->erase_pending_list); | 660 | list_add_tail(&jeb->list, &c->erase_pending_list); |
627 | c->nr_erasing_blocks++; | 661 | c->nr_erasing_blocks++; |
628 | jffs2_garbage_collect_trigger(c); | 662 | jffs2_garbage_collect_trigger(c); |
629 | } else { | 663 | } else { |
630 | /* Sometimes, however, we leave it elsewhere so it doesn't get | 664 | /* Sometimes, however, we leave it elsewhere so it doesn't get |
631 | immediately reused, and we spread the load a bit. */ | 665 | immediately reused, and we spread the load a bit. */ |
632 | D1(printk(KERN_DEBUG "...and adding to erasable_list\n")); | 666 | jffs2_dbg(1, "...and adding to erasable_list\n"); |
633 | list_add_tail(&jeb->list, &c->erasable_list); | 667 | list_add_tail(&jeb->list, &c->erasable_list); |
634 | } | 668 | } |
635 | } | 669 | } |
636 | D1(printk(KERN_DEBUG "Done OK\n")); | 670 | jffs2_dbg(1, "Done OK\n"); |
637 | } else if (jeb == c->gcblock) { | 671 | } else if (jeb == c->gcblock) { |
638 | D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset)); | 672 | jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n", |
673 | jeb->offset); | ||
639 | } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) { | 674 | } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) { |
640 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset)); | 675 | jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", |
676 | jeb->offset); | ||
641 | list_del(&jeb->list); | 677 | list_del(&jeb->list); |
642 | D1(printk(KERN_DEBUG "...and adding to dirty_list\n")); | 678 | jffs2_dbg(1, "...and adding to dirty_list\n"); |
643 | list_add_tail(&jeb->list, &c->dirty_list); | 679 | list_add_tail(&jeb->list, &c->dirty_list); |
644 | } else if (VERYDIRTY(c, jeb->dirty_size) && | 680 | } else if (VERYDIRTY(c, jeb->dirty_size) && |
645 | !VERYDIRTY(c, jeb->dirty_size - addedsize)) { | 681 | !VERYDIRTY(c, jeb->dirty_size - addedsize)) { |
646 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset)); | 682 | jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", |
683 | jeb->offset); | ||
647 | list_del(&jeb->list); | 684 | list_del(&jeb->list); |
648 | D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n")); | 685 | jffs2_dbg(1, "...and adding to very_dirty_list\n"); |
649 | list_add_tail(&jeb->list, &c->very_dirty_list); | 686 | list_add_tail(&jeb->list, &c->very_dirty_list); |
650 | } else { | 687 | } else { |
651 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n", | 688 | jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n", |
652 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | 689 | jeb->offset, jeb->free_size, jeb->dirty_size, |
690 | jeb->used_size); | ||
653 | } | 691 | } |
654 | 692 | ||
655 | spin_unlock(&c->erase_completion_lock); | 693 | spin_unlock(&c->erase_completion_lock); |
@@ -665,33 +703,40 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref | |||
665 | the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet | 703 | the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet |
666 | by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */ | 704 | by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */ |
667 | 705 | ||
668 | D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref))); | 706 | jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n", |
707 | ref_offset(ref)); | ||
669 | ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); | 708 | ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); |
670 | if (ret) { | 709 | if (ret) { |
671 | printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret); | 710 | pr_warn("Read error reading from obsoleted node at 0x%08x: %d\n", |
711 | ref_offset(ref), ret); | ||
672 | goto out_erase_sem; | 712 | goto out_erase_sem; |
673 | } | 713 | } |
674 | if (retlen != sizeof(n)) { | 714 | if (retlen != sizeof(n)) { |
675 | printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen); | 715 | pr_warn("Short read from obsoleted node at 0x%08x: %zd\n", |
716 | ref_offset(ref), retlen); | ||
676 | goto out_erase_sem; | 717 | goto out_erase_sem; |
677 | } | 718 | } |
678 | if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) { | 719 | if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) { |
679 | printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len); | 720 | pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", |
721 | je32_to_cpu(n.totlen), freed_len); | ||
680 | goto out_erase_sem; | 722 | goto out_erase_sem; |
681 | } | 723 | } |
682 | if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) { | 724 | if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) { |
683 | D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype))); | 725 | jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", |
726 | ref_offset(ref), je16_to_cpu(n.nodetype)); | ||
684 | goto out_erase_sem; | 727 | goto out_erase_sem; |
685 | } | 728 | } |
686 | /* XXX FIXME: This is ugly now */ | 729 | /* XXX FIXME: This is ugly now */ |
687 | n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE); | 730 | n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE); |
688 | ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); | 731 | ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); |
689 | if (ret) { | 732 | if (ret) { |
690 | printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret); | 733 | pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n", |
734 | ref_offset(ref), ret); | ||
691 | goto out_erase_sem; | 735 | goto out_erase_sem; |
692 | } | 736 | } |
693 | if (retlen != sizeof(n)) { | 737 | if (retlen != sizeof(n)) { |
694 | printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen); | 738 | pr_warn("Short write in obliterating obsoleted node at 0x%08x: %zd\n", |
739 | ref_offset(ref), retlen); | ||
695 | goto out_erase_sem; | 740 | goto out_erase_sem; |
696 | } | 741 | } |
697 | 742 | ||
@@ -751,8 +796,8 @@ int jffs2_thread_should_wake(struct jffs2_sb_info *c) | |||
751 | return 1; | 796 | return 1; |
752 | 797 | ||
753 | if (c->unchecked_size) { | 798 | if (c->unchecked_size) { |
754 | D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n", | 799 | jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n", |
755 | c->unchecked_size, c->checked_ino)); | 800 | c->unchecked_size, c->checked_ino); |
756 | return 1; | 801 | return 1; |
757 | } | 802 | } |
758 | 803 | ||
@@ -780,8 +825,9 @@ int jffs2_thread_should_wake(struct jffs2_sb_info *c) | |||
780 | } | 825 | } |
781 | } | 826 | } |
782 | 827 | ||
783 | D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n", | 828 | jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n", |
784 | c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, nr_very_dirty, ret?"yes":"no")); | 829 | __func__, c->nr_free_blocks, c->nr_erasing_blocks, |
830 | c->dirty_size, nr_very_dirty, ret ? "yes" : "no"); | ||
785 | 831 | ||
786 | return ret; | 832 | return ret; |
787 | } | 833 | } |
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h index ab65ee3ec858..1cd3aec9d9ae 100644 --- a/fs/jffs2/os-linux.h +++ b/fs/jffs2/os-linux.h | |||
@@ -76,7 +76,7 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f) | |||
76 | #define jffs2_write_nand_cleanmarker(c,jeb) (-EIO) | 76 | #define jffs2_write_nand_cleanmarker(c,jeb) (-EIO) |
77 | 77 | ||
78 | #define jffs2_flash_write(c, ofs, len, retlen, buf) jffs2_flash_direct_write(c, ofs, len, retlen, buf) | 78 | #define jffs2_flash_write(c, ofs, len, retlen, buf) jffs2_flash_direct_write(c, ofs, len, retlen, buf) |
79 | #define jffs2_flash_read(c, ofs, len, retlen, buf) ((c)->mtd->read((c)->mtd, ofs, len, retlen, buf)) | 79 | #define jffs2_flash_read(c, ofs, len, retlen, buf) (mtd_read((c)->mtd, ofs, len, retlen, buf)) |
80 | #define jffs2_flush_wbuf_pad(c) ({ do{} while(0); (void)(c), 0; }) | 80 | #define jffs2_flush_wbuf_pad(c) ({ do{} while(0); (void)(c), 0; }) |
81 | #define jffs2_flush_wbuf_gc(c, i) ({ do{} while(0); (void)(c), (void) i, 0; }) | 81 | #define jffs2_flush_wbuf_gc(c, i) ({ do{} while(0); (void)(c), (void) i, 0; }) |
82 | #define jffs2_write_nand_badblock(c,jeb,bad_offset) (1) | 82 | #define jffs2_write_nand_badblock(c,jeb,bad_offset) (1) |
@@ -108,8 +108,6 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f) | |||
108 | 108 | ||
109 | #define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH) | 109 | #define jffs2_cleanmarker_oob(c) (c->mtd->type == MTD_NANDFLASH) |
110 | 110 | ||
111 | #define jffs2_flash_write_oob(c, ofs, len, retlen, buf) ((c)->mtd->write_oob((c)->mtd, ofs, len, retlen, buf)) | ||
112 | #define jffs2_flash_read_oob(c, ofs, len, retlen, buf) ((c)->mtd->read_oob((c)->mtd, ofs, len, retlen, buf)) | ||
113 | #define jffs2_wbuf_dirty(c) (!!(c)->wbuf_len) | 111 | #define jffs2_wbuf_dirty(c) (!!(c)->wbuf_len) |
114 | 112 | ||
115 | /* wbuf.c */ | 113 | /* wbuf.c */ |
diff --git a/fs/jffs2/read.c b/fs/jffs2/read.c index 3f39be1b0455..0b042b1fc82f 100644 --- a/fs/jffs2/read.c +++ b/fs/jffs2/read.c | |||
@@ -9,6 +9,8 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | |||
12 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
13 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
14 | #include <linux/crc32.h> | 16 | #include <linux/crc32.h> |
@@ -36,24 +38,25 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
36 | ret = jffs2_flash_read(c, ref_offset(fd->raw), sizeof(*ri), &readlen, (char *)ri); | 38 | ret = jffs2_flash_read(c, ref_offset(fd->raw), sizeof(*ri), &readlen, (char *)ri); |
37 | if (ret) { | 39 | if (ret) { |
38 | jffs2_free_raw_inode(ri); | 40 | jffs2_free_raw_inode(ri); |
39 | printk(KERN_WARNING "Error reading node from 0x%08x: %d\n", ref_offset(fd->raw), ret); | 41 | pr_warn("Error reading node from 0x%08x: %d\n", |
42 | ref_offset(fd->raw), ret); | ||
40 | return ret; | 43 | return ret; |
41 | } | 44 | } |
42 | if (readlen != sizeof(*ri)) { | 45 | if (readlen != sizeof(*ri)) { |
43 | jffs2_free_raw_inode(ri); | 46 | jffs2_free_raw_inode(ri); |
44 | printk(KERN_WARNING "Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n", | 47 | pr_warn("Short read from 0x%08x: wanted 0x%zx bytes, got 0x%zx\n", |
45 | ref_offset(fd->raw), sizeof(*ri), readlen); | 48 | ref_offset(fd->raw), sizeof(*ri), readlen); |
46 | return -EIO; | 49 | return -EIO; |
47 | } | 50 | } |
48 | crc = crc32(0, ri, sizeof(*ri)-8); | 51 | crc = crc32(0, ri, sizeof(*ri)-8); |
49 | 52 | ||
50 | D1(printk(KERN_DEBUG "Node read from %08x: node_crc %08x, calculated CRC %08x. dsize %x, csize %x, offset %x, buf %p\n", | 53 | jffs2_dbg(1, "Node read from %08x: node_crc %08x, calculated CRC %08x. dsize %x, csize %x, offset %x, buf %p\n", |
51 | ref_offset(fd->raw), je32_to_cpu(ri->node_crc), | 54 | ref_offset(fd->raw), je32_to_cpu(ri->node_crc), |
52 | crc, je32_to_cpu(ri->dsize), je32_to_cpu(ri->csize), | 55 | crc, je32_to_cpu(ri->dsize), je32_to_cpu(ri->csize), |
53 | je32_to_cpu(ri->offset), buf)); | 56 | je32_to_cpu(ri->offset), buf); |
54 | if (crc != je32_to_cpu(ri->node_crc)) { | 57 | if (crc != je32_to_cpu(ri->node_crc)) { |
55 | printk(KERN_WARNING "Node CRC %08x != calculated CRC %08x for node at %08x\n", | 58 | pr_warn("Node CRC %08x != calculated CRC %08x for node at %08x\n", |
56 | je32_to_cpu(ri->node_crc), crc, ref_offset(fd->raw)); | 59 | je32_to_cpu(ri->node_crc), crc, ref_offset(fd->raw)); |
57 | ret = -EIO; | 60 | ret = -EIO; |
58 | goto out_ri; | 61 | goto out_ri; |
59 | } | 62 | } |
@@ -66,8 +69,8 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
66 | } | 69 | } |
67 | 70 | ||
68 | D1(if(ofs + len > je32_to_cpu(ri->dsize)) { | 71 | D1(if(ofs + len > je32_to_cpu(ri->dsize)) { |
69 | printk(KERN_WARNING "jffs2_read_dnode() asked for %d bytes at %d from %d-byte node\n", | 72 | pr_warn("jffs2_read_dnode() asked for %d bytes at %d from %d-byte node\n", |
70 | len, ofs, je32_to_cpu(ri->dsize)); | 73 | len, ofs, je32_to_cpu(ri->dsize)); |
71 | ret = -EINVAL; | 74 | ret = -EINVAL; |
72 | goto out_ri; | 75 | goto out_ri; |
73 | }); | 76 | }); |
@@ -107,8 +110,8 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
107 | decomprbuf = readbuf; | 110 | decomprbuf = readbuf; |
108 | } | 111 | } |
109 | 112 | ||
110 | D2(printk(KERN_DEBUG "Read %d bytes to %p\n", je32_to_cpu(ri->csize), | 113 | jffs2_dbg(2, "Read %d bytes to %p\n", je32_to_cpu(ri->csize), |
111 | readbuf)); | 114 | readbuf); |
112 | ret = jffs2_flash_read(c, (ref_offset(fd->raw)) + sizeof(*ri), | 115 | ret = jffs2_flash_read(c, (ref_offset(fd->raw)) + sizeof(*ri), |
113 | je32_to_cpu(ri->csize), &readlen, readbuf); | 116 | je32_to_cpu(ri->csize), &readlen, readbuf); |
114 | 117 | ||
@@ -119,18 +122,19 @@ int jffs2_read_dnode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
119 | 122 | ||
120 | crc = crc32(0, readbuf, je32_to_cpu(ri->csize)); | 123 | crc = crc32(0, readbuf, je32_to_cpu(ri->csize)); |
121 | if (crc != je32_to_cpu(ri->data_crc)) { | 124 | if (crc != je32_to_cpu(ri->data_crc)) { |
122 | printk(KERN_WARNING "Data CRC %08x != calculated CRC %08x for node at %08x\n", | 125 | pr_warn("Data CRC %08x != calculated CRC %08x for node at %08x\n", |
123 | je32_to_cpu(ri->data_crc), crc, ref_offset(fd->raw)); | 126 | je32_to_cpu(ri->data_crc), crc, ref_offset(fd->raw)); |
124 | ret = -EIO; | 127 | ret = -EIO; |
125 | goto out_decomprbuf; | 128 | goto out_decomprbuf; |
126 | } | 129 | } |
127 | D2(printk(KERN_DEBUG "Data CRC matches calculated CRC %08x\n", crc)); | 130 | jffs2_dbg(2, "Data CRC matches calculated CRC %08x\n", crc); |
128 | if (ri->compr != JFFS2_COMPR_NONE) { | 131 | if (ri->compr != JFFS2_COMPR_NONE) { |
129 | D2(printk(KERN_DEBUG "Decompress %d bytes from %p to %d bytes at %p\n", | 132 | jffs2_dbg(2, "Decompress %d bytes from %p to %d bytes at %p\n", |
130 | je32_to_cpu(ri->csize), readbuf, je32_to_cpu(ri->dsize), decomprbuf)); | 133 | je32_to_cpu(ri->csize), readbuf, |
134 | je32_to_cpu(ri->dsize), decomprbuf); | ||
131 | ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize)); | 135 | ret = jffs2_decompress(c, f, ri->compr | (ri->usercompr << 8), readbuf, decomprbuf, je32_to_cpu(ri->csize), je32_to_cpu(ri->dsize)); |
132 | if (ret) { | 136 | if (ret) { |
133 | printk(KERN_WARNING "Error: jffs2_decompress returned %d\n", ret); | 137 | pr_warn("Error: jffs2_decompress returned %d\n", ret); |
134 | goto out_decomprbuf; | 138 | goto out_decomprbuf; |
135 | } | 139 | } |
136 | } | 140 | } |
@@ -157,8 +161,8 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
157 | struct jffs2_node_frag *frag; | 161 | struct jffs2_node_frag *frag; |
158 | int ret; | 162 | int ret; |
159 | 163 | ||
160 | D1(printk(KERN_DEBUG "jffs2_read_inode_range: ino #%u, range 0x%08x-0x%08x\n", | 164 | jffs2_dbg(1, "%s(): ino #%u, range 0x%08x-0x%08x\n", |
161 | f->inocache->ino, offset, offset+len)); | 165 | __func__, f->inocache->ino, offset, offset + len); |
162 | 166 | ||
163 | frag = jffs2_lookup_node_frag(&f->fragtree, offset); | 167 | frag = jffs2_lookup_node_frag(&f->fragtree, offset); |
164 | 168 | ||
@@ -168,22 +172,27 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
168 | * (or perhaps is before it, if we've been asked to read off the | 172 | * (or perhaps is before it, if we've been asked to read off the |
169 | * end of the file). */ | 173 | * end of the file). */ |
170 | while(offset < end) { | 174 | while(offset < end) { |
171 | D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end)); | 175 | jffs2_dbg(2, "%s(): offset %d, end %d\n", |
176 | __func__, offset, end); | ||
172 | if (unlikely(!frag || frag->ofs > offset || | 177 | if (unlikely(!frag || frag->ofs > offset || |
173 | frag->ofs + frag->size <= offset)) { | 178 | frag->ofs + frag->size <= offset)) { |
174 | uint32_t holesize = end - offset; | 179 | uint32_t holesize = end - offset; |
175 | if (frag && frag->ofs > offset) { | 180 | if (frag && frag->ofs > offset) { |
176 | D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset)); | 181 | jffs2_dbg(1, "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", |
182 | f->inocache->ino, frag->ofs, offset); | ||
177 | holesize = min(holesize, frag->ofs - offset); | 183 | holesize = min(holesize, frag->ofs - offset); |
178 | } | 184 | } |
179 | D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize)); | 185 | jffs2_dbg(1, "Filling non-frag hole from %d-%d\n", |
186 | offset, offset + holesize); | ||
180 | memset(buf, 0, holesize); | 187 | memset(buf, 0, holesize); |
181 | buf += holesize; | 188 | buf += holesize; |
182 | offset += holesize; | 189 | offset += holesize; |
183 | continue; | 190 | continue; |
184 | } else if (unlikely(!frag->node)) { | 191 | } else if (unlikely(!frag->node)) { |
185 | uint32_t holeend = min(end, frag->ofs + frag->size); | 192 | uint32_t holeend = min(end, frag->ofs + frag->size); |
186 | D1(printk(KERN_DEBUG "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size)); | 193 | jffs2_dbg(1, "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", |
194 | offset, holeend, frag->ofs, | ||
195 | frag->ofs + frag->size); | ||
187 | memset(buf, 0, holeend - offset); | 196 | memset(buf, 0, holeend - offset); |
188 | buf += holeend - offset; | 197 | buf += holeend - offset; |
189 | offset = holeend; | 198 | offset = holeend; |
@@ -195,20 +204,23 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
195 | 204 | ||
196 | fragofs = offset - frag->ofs; | 205 | fragofs = offset - frag->ofs; |
197 | readlen = min(frag->size - fragofs, end - offset); | 206 | readlen = min(frag->size - fragofs, end - offset); |
198 | D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%08x (%d)\n", | 207 | jffs2_dbg(1, "Reading %d-%d from node at 0x%08x (%d)\n", |
199 | frag->ofs+fragofs, frag->ofs+fragofs+readlen, | 208 | frag->ofs+fragofs, |
200 | ref_offset(frag->node->raw), ref_flags(frag->node->raw))); | 209 | frag->ofs + fragofs+readlen, |
210 | ref_offset(frag->node->raw), | ||
211 | ref_flags(frag->node->raw)); | ||
201 | ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen); | 212 | ret = jffs2_read_dnode(c, f, frag->node, buf, fragofs + frag->ofs - frag->node->ofs, readlen); |
202 | D2(printk(KERN_DEBUG "node read done\n")); | 213 | jffs2_dbg(2, "node read done\n"); |
203 | if (ret) { | 214 | if (ret) { |
204 | D1(printk(KERN_DEBUG"jffs2_read_inode_range error %d\n",ret)); | 215 | jffs2_dbg(1, "%s(): error %d\n", |
216 | __func__, ret); | ||
205 | memset(buf, 0, readlen); | 217 | memset(buf, 0, readlen); |
206 | return ret; | 218 | return ret; |
207 | } | 219 | } |
208 | buf += readlen; | 220 | buf += readlen; |
209 | offset += readlen; | 221 | offset += readlen; |
210 | frag = frag_next(frag); | 222 | frag = frag_next(frag); |
211 | D2(printk(KERN_DEBUG "node read was OK. Looping\n")); | 223 | jffs2_dbg(2, "node read was OK. Looping\n"); |
212 | } | 224 | } |
213 | } | 225 | } |
214 | return 0; | 226 | return 0; |
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index 3093ac4fb24c..dc0437e84763 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c | |||
@@ -9,6 +9,8 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | |||
12 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
13 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
14 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c index f99464833bb2..7654e87b0428 100644 --- a/fs/jffs2/scan.c +++ b/fs/jffs2/scan.c | |||
@@ -9,6 +9,8 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | |||
12 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
13 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
14 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
@@ -22,15 +24,15 @@ | |||
22 | 24 | ||
23 | #define DEFAULT_EMPTY_SCAN_SIZE 256 | 25 | #define DEFAULT_EMPTY_SCAN_SIZE 256 |
24 | 26 | ||
25 | #define noisy_printk(noise, args...) do { \ | 27 | #define noisy_printk(noise, fmt, ...) \ |
26 | if (*(noise)) { \ | 28 | do { \ |
27 | printk(KERN_NOTICE args); \ | 29 | if (*(noise)) { \ |
28 | (*(noise))--; \ | 30 | pr_notice(fmt, ##__VA_ARGS__); \ |
29 | if (!(*(noise))) { \ | 31 | (*(noise))--; \ |
30 | printk(KERN_NOTICE "Further such events for this erase block will not be printed\n"); \ | 32 | if (!(*(noise))) \ |
31 | } \ | 33 | pr_notice("Further such events for this erase block will not be printed\n"); \ |
32 | } \ | 34 | } \ |
33 | } while(0) | 35 | } while (0) |
34 | 36 | ||
35 | static uint32_t pseudo_random; | 37 | static uint32_t pseudo_random; |
36 | 38 | ||
@@ -96,18 +98,17 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
96 | #ifndef __ECOS | 98 | #ifndef __ECOS |
97 | size_t pointlen, try_size; | 99 | size_t pointlen, try_size; |
98 | 100 | ||
99 | if (c->mtd->point) { | 101 | ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen, |
100 | ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen, | 102 | (void **)&flashbuf, NULL); |
101 | (void **)&flashbuf, NULL); | 103 | if (!ret && pointlen < c->mtd->size) { |
102 | if (!ret && pointlen < c->mtd->size) { | 104 | /* Don't muck about if it won't let us point to the whole flash */ |
103 | /* Don't muck about if it won't let us point to the whole flash */ | 105 | jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n", |
104 | D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen)); | 106 | pointlen); |
105 | mtd_unpoint(c->mtd, 0, pointlen); | 107 | mtd_unpoint(c->mtd, 0, pointlen); |
106 | flashbuf = NULL; | 108 | flashbuf = NULL; |
107 | } | ||
108 | if (ret && ret != -EOPNOTSUPP) | ||
109 | D1(printk(KERN_DEBUG "MTD point failed %d\n", ret)); | ||
110 | } | 109 | } |
110 | if (ret && ret != -EOPNOTSUPP) | ||
111 | jffs2_dbg(1, "MTD point failed %d\n", ret); | ||
111 | #endif | 112 | #endif |
112 | if (!flashbuf) { | 113 | if (!flashbuf) { |
113 | /* For NAND it's quicker to read a whole eraseblock at a time, | 114 | /* For NAND it's quicker to read a whole eraseblock at a time, |
@@ -117,15 +118,15 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
117 | else | 118 | else |
118 | try_size = PAGE_SIZE; | 119 | try_size = PAGE_SIZE; |
119 | 120 | ||
120 | D1(printk(KERN_DEBUG "Trying to allocate readbuf of %zu " | 121 | jffs2_dbg(1, "Trying to allocate readbuf of %zu " |
121 | "bytes\n", try_size)); | 122 | "bytes\n", try_size); |
122 | 123 | ||
123 | flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size); | 124 | flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size); |
124 | if (!flashbuf) | 125 | if (!flashbuf) |
125 | return -ENOMEM; | 126 | return -ENOMEM; |
126 | 127 | ||
127 | D1(printk(KERN_DEBUG "Allocated readbuf of %zu bytes\n", | 128 | jffs2_dbg(1, "Allocated readbuf of %zu bytes\n", |
128 | try_size)); | 129 | try_size); |
129 | 130 | ||
130 | buf_size = (uint32_t)try_size; | 131 | buf_size = (uint32_t)try_size; |
131 | } | 132 | } |
@@ -178,7 +179,8 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
178 | c->nr_free_blocks++; | 179 | c->nr_free_blocks++; |
179 | } else { | 180 | } else { |
180 | /* Dirt */ | 181 | /* Dirt */ |
181 | D1(printk(KERN_DEBUG "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset)); | 182 | jffs2_dbg(1, "Adding all-dirty block at 0x%08x to erase_pending_list\n", |
183 | jeb->offset); | ||
182 | list_add(&jeb->list, &c->erase_pending_list); | 184 | list_add(&jeb->list, &c->erase_pending_list); |
183 | c->nr_erasing_blocks++; | 185 | c->nr_erasing_blocks++; |
184 | } | 186 | } |
@@ -205,7 +207,8 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
205 | } | 207 | } |
206 | /* update collected summary information for the current nextblock */ | 208 | /* update collected summary information for the current nextblock */ |
207 | jffs2_sum_move_collected(c, s); | 209 | jffs2_sum_move_collected(c, s); |
208 | D1(printk(KERN_DEBUG "jffs2_scan_medium(): new nextblock = 0x%08x\n", jeb->offset)); | 210 | jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n", |
211 | __func__, jeb->offset); | ||
209 | c->nextblock = jeb; | 212 | c->nextblock = jeb; |
210 | } else { | 213 | } else { |
211 | ret = file_dirty(c, jeb); | 214 | ret = file_dirty(c, jeb); |
@@ -217,20 +220,21 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
217 | case BLK_STATE_ALLDIRTY: | 220 | case BLK_STATE_ALLDIRTY: |
218 | /* Nothing valid - not even a clean marker. Needs erasing. */ | 221 | /* Nothing valid - not even a clean marker. Needs erasing. */ |
219 | /* For now we just put it on the erasing list. We'll start the erases later */ | 222 | /* For now we just put it on the erasing list. We'll start the erases later */ |
220 | D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset)); | 223 | jffs2_dbg(1, "Erase block at 0x%08x is not formatted. It will be erased\n", |
224 | jeb->offset); | ||
221 | list_add(&jeb->list, &c->erase_pending_list); | 225 | list_add(&jeb->list, &c->erase_pending_list); |
222 | c->nr_erasing_blocks++; | 226 | c->nr_erasing_blocks++; |
223 | break; | 227 | break; |
224 | 228 | ||
225 | case BLK_STATE_BADBLOCK: | 229 | case BLK_STATE_BADBLOCK: |
226 | D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset)); | 230 | jffs2_dbg(1, "Block at 0x%08x is bad\n", jeb->offset); |
227 | list_add(&jeb->list, &c->bad_list); | 231 | list_add(&jeb->list, &c->bad_list); |
228 | c->bad_size += c->sector_size; | 232 | c->bad_size += c->sector_size; |
229 | c->free_size -= c->sector_size; | 233 | c->free_size -= c->sector_size; |
230 | bad_blocks++; | 234 | bad_blocks++; |
231 | break; | 235 | break; |
232 | default: | 236 | default: |
233 | printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n"); | 237 | pr_warn("%s(): unknown block state\n", __func__); |
234 | BUG(); | 238 | BUG(); |
235 | } | 239 | } |
236 | } | 240 | } |
@@ -250,16 +254,17 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) | |||
250 | 254 | ||
251 | uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize; | 255 | uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize; |
252 | 256 | ||
253 | D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n", | 257 | jffs2_dbg(1, "%s(): Skipping %d bytes in nextblock to ensure page alignment\n", |
254 | skip)); | 258 | __func__, skip); |
255 | jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); | 259 | jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); |
256 | jffs2_scan_dirty_space(c, c->nextblock, skip); | 260 | jffs2_scan_dirty_space(c, c->nextblock, skip); |
257 | } | 261 | } |
258 | #endif | 262 | #endif |
259 | if (c->nr_erasing_blocks) { | 263 | if (c->nr_erasing_blocks) { |
260 | if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) { | 264 | if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) { |
261 | printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n"); | 265 | pr_notice("Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n"); |
262 | printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks); | 266 | pr_notice("empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n", |
267 | empty_blocks, bad_blocks, c->nr_blocks); | ||
263 | ret = -EIO; | 268 | ret = -EIO; |
264 | goto out; | 269 | goto out; |
265 | } | 270 | } |
@@ -287,11 +292,13 @@ static int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf, | |||
287 | 292 | ||
288 | ret = jffs2_flash_read(c, ofs, len, &retlen, buf); | 293 | ret = jffs2_flash_read(c, ofs, len, &retlen, buf); |
289 | if (ret) { | 294 | if (ret) { |
290 | D1(printk(KERN_WARNING "mtd->read(0x%x bytes from 0x%x) returned %d\n", len, ofs, ret)); | 295 | jffs2_dbg(1, "mtd->read(0x%x bytes from 0x%x) returned %d\n", |
296 | len, ofs, ret); | ||
291 | return ret; | 297 | return ret; |
292 | } | 298 | } |
293 | if (retlen < len) { | 299 | if (retlen < len) { |
294 | D1(printk(KERN_WARNING "Read at 0x%x gave only 0x%zx bytes\n", ofs, retlen)); | 300 | jffs2_dbg(1, "Read at 0x%x gave only 0x%zx bytes\n", |
301 | ofs, retlen); | ||
295 | return -EIO; | 302 | return -EIO; |
296 | } | 303 | } |
297 | return 0; | 304 | return 0; |
@@ -368,7 +375,7 @@ static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
368 | 375 | ||
369 | if (jffs2_sum_active()) | 376 | if (jffs2_sum_active()) |
370 | jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset); | 377 | jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset); |
371 | dbg_xattr("scaning xdatum at %#08x (xid=%u, version=%u)\n", | 378 | dbg_xattr("scanning xdatum at %#08x (xid=%u, version=%u)\n", |
372 | ofs, xd->xid, xd->version); | 379 | ofs, xd->xid, xd->version); |
373 | return 0; | 380 | return 0; |
374 | } | 381 | } |
@@ -449,7 +456,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
449 | ofs = jeb->offset; | 456 | ofs = jeb->offset; |
450 | prevofs = jeb->offset - 1; | 457 | prevofs = jeb->offset - 1; |
451 | 458 | ||
452 | D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs)); | 459 | jffs2_dbg(1, "%s(): Scanning block at 0x%x\n", __func__, ofs); |
453 | 460 | ||
454 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | 461 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER |
455 | if (jffs2_cleanmarker_oob(c)) { | 462 | if (jffs2_cleanmarker_oob(c)) { |
@@ -459,7 +466,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
459 | return BLK_STATE_BADBLOCK; | 466 | return BLK_STATE_BADBLOCK; |
460 | 467 | ||
461 | ret = jffs2_check_nand_cleanmarker(c, jeb); | 468 | ret = jffs2_check_nand_cleanmarker(c, jeb); |
462 | D2(printk(KERN_NOTICE "jffs_check_nand_cleanmarker returned %d\n",ret)); | 469 | jffs2_dbg(2, "jffs_check_nand_cleanmarker returned %d\n", ret); |
463 | 470 | ||
464 | /* Even if it's not found, we still scan to see | 471 | /* Even if it's not found, we still scan to see |
465 | if the block is empty. We use this information | 472 | if the block is empty. We use this information |
@@ -561,7 +568,8 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
561 | if (jffs2_cleanmarker_oob(c)) { | 568 | if (jffs2_cleanmarker_oob(c)) { |
562 | /* scan oob, take care of cleanmarker */ | 569 | /* scan oob, take care of cleanmarker */ |
563 | int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound); | 570 | int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound); |
564 | D2(printk(KERN_NOTICE "jffs2_check_oob_empty returned %d\n",ret)); | 571 | jffs2_dbg(2, "jffs2_check_oob_empty returned %d\n", |
572 | ret); | ||
565 | switch (ret) { | 573 | switch (ret) { |
566 | case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF; | 574 | case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF; |
567 | case 1: return BLK_STATE_ALLDIRTY; | 575 | case 1: return BLK_STATE_ALLDIRTY; |
@@ -569,15 +577,16 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
569 | } | 577 | } |
570 | } | 578 | } |
571 | #endif | 579 | #endif |
572 | D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset)); | 580 | jffs2_dbg(1, "Block at 0x%08x is empty (erased)\n", |
581 | jeb->offset); | ||
573 | if (c->cleanmarker_size == 0) | 582 | if (c->cleanmarker_size == 0) |
574 | return BLK_STATE_CLEANMARKER; /* don't bother with re-erase */ | 583 | return BLK_STATE_CLEANMARKER; /* don't bother with re-erase */ |
575 | else | 584 | else |
576 | return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */ | 585 | return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */ |
577 | } | 586 | } |
578 | if (ofs) { | 587 | if (ofs) { |
579 | D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset, | 588 | jffs2_dbg(1, "Free space at %08x ends at %08x\n", jeb->offset, |
580 | jeb->offset + ofs)); | 589 | jeb->offset + ofs); |
581 | if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1))) | 590 | if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1))) |
582 | return err; | 591 | return err; |
583 | if ((err = jffs2_scan_dirty_space(c, jeb, ofs))) | 592 | if ((err = jffs2_scan_dirty_space(c, jeb, ofs))) |
@@ -604,12 +613,13 @@ scan_more: | |||
604 | cond_resched(); | 613 | cond_resched(); |
605 | 614 | ||
606 | if (ofs & 3) { | 615 | if (ofs & 3) { |
607 | printk(KERN_WARNING "Eep. ofs 0x%08x not word-aligned!\n", ofs); | 616 | pr_warn("Eep. ofs 0x%08x not word-aligned!\n", ofs); |
608 | ofs = PAD(ofs); | 617 | ofs = PAD(ofs); |
609 | continue; | 618 | continue; |
610 | } | 619 | } |
611 | if (ofs == prevofs) { | 620 | if (ofs == prevofs) { |
612 | printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs); | 621 | pr_warn("ofs 0x%08x has already been seen. Skipping\n", |
622 | ofs); | ||
613 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) | 623 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) |
614 | return err; | 624 | return err; |
615 | ofs += 4; | 625 | ofs += 4; |
@@ -618,8 +628,10 @@ scan_more: | |||
618 | prevofs = ofs; | 628 | prevofs = ofs; |
619 | 629 | ||
620 | if (jeb->offset + c->sector_size < ofs + sizeof(*node)) { | 630 | if (jeb->offset + c->sector_size < ofs + sizeof(*node)) { |
621 | D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node), | 631 | jffs2_dbg(1, "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", |
622 | jeb->offset, c->sector_size, ofs, sizeof(*node))); | 632 | sizeof(struct jffs2_unknown_node), |
633 | jeb->offset, c->sector_size, ofs, | ||
634 | sizeof(*node)); | ||
623 | if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs))) | 635 | if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs))) |
624 | return err; | 636 | return err; |
625 | break; | 637 | break; |
@@ -627,8 +639,9 @@ scan_more: | |||
627 | 639 | ||
628 | if (buf_ofs + buf_len < ofs + sizeof(*node)) { | 640 | if (buf_ofs + buf_len < ofs + sizeof(*node)) { |
629 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 641 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); |
630 | D1(printk(KERN_DEBUG "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n", | 642 | jffs2_dbg(1, "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n", |
631 | sizeof(struct jffs2_unknown_node), buf_len, ofs)); | 643 | sizeof(struct jffs2_unknown_node), |
644 | buf_len, ofs); | ||
632 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 645 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); |
633 | if (err) | 646 | if (err) |
634 | return err; | 647 | return err; |
@@ -645,13 +658,13 @@ scan_more: | |||
645 | ofs += 4; | 658 | ofs += 4; |
646 | scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(c->sector_size)/8, buf_len); | 659 | scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(c->sector_size)/8, buf_len); |
647 | 660 | ||
648 | D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs)); | 661 | jffs2_dbg(1, "Found empty flash at 0x%08x\n", ofs); |
649 | more_empty: | 662 | more_empty: |
650 | inbuf_ofs = ofs - buf_ofs; | 663 | inbuf_ofs = ofs - buf_ofs; |
651 | while (inbuf_ofs < scan_end) { | 664 | while (inbuf_ofs < scan_end) { |
652 | if (unlikely(*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)) { | 665 | if (unlikely(*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)) { |
653 | printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n", | 666 | pr_warn("Empty flash at 0x%08x ends at 0x%08x\n", |
654 | empty_start, ofs); | 667 | empty_start, ofs); |
655 | if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start))) | 668 | if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start))) |
656 | return err; | 669 | return err; |
657 | goto scan_more; | 670 | goto scan_more; |
@@ -661,13 +674,15 @@ scan_more: | |||
661 | ofs += 4; | 674 | ofs += 4; |
662 | } | 675 | } |
663 | /* Ran off end. */ | 676 | /* Ran off end. */ |
664 | D1(printk(KERN_DEBUG "Empty flash to end of buffer at 0x%08x\n", ofs)); | 677 | jffs2_dbg(1, "Empty flash to end of buffer at 0x%08x\n", |
678 | ofs); | ||
665 | 679 | ||
666 | /* If we're only checking the beginning of a block with a cleanmarker, | 680 | /* If we're only checking the beginning of a block with a cleanmarker, |
667 | bail now */ | 681 | bail now */ |
668 | if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && | 682 | if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) && |
669 | c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) { | 683 | c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) { |
670 | D1(printk(KERN_DEBUG "%d bytes at start of block seems clean... assuming all clean\n", EMPTY_SCAN_SIZE(c->sector_size))); | 684 | jffs2_dbg(1, "%d bytes at start of block seems clean... assuming all clean\n", |
685 | EMPTY_SCAN_SIZE(c->sector_size)); | ||
671 | return BLK_STATE_CLEANMARKER; | 686 | return BLK_STATE_CLEANMARKER; |
672 | } | 687 | } |
673 | if (!buf_size && (scan_end != buf_len)) {/* XIP/point case */ | 688 | if (!buf_size && (scan_end != buf_len)) {/* XIP/point case */ |
@@ -680,13 +695,14 @@ scan_more: | |||
680 | if (!buf_len) { | 695 | if (!buf_len) { |
681 | /* No more to read. Break out of main loop without marking | 696 | /* No more to read. Break out of main loop without marking |
682 | this range of empty space as dirty (because it's not) */ | 697 | this range of empty space as dirty (because it's not) */ |
683 | D1(printk(KERN_DEBUG "Empty flash at %08x runs to end of block. Treating as free_space\n", | 698 | jffs2_dbg(1, "Empty flash at %08x runs to end of block. Treating as free_space\n", |
684 | empty_start)); | 699 | empty_start); |
685 | break; | 700 | break; |
686 | } | 701 | } |
687 | /* point never reaches here */ | 702 | /* point never reaches here */ |
688 | scan_end = buf_len; | 703 | scan_end = buf_len; |
689 | D1(printk(KERN_DEBUG "Reading another 0x%x at 0x%08x\n", buf_len, ofs)); | 704 | jffs2_dbg(1, "Reading another 0x%x at 0x%08x\n", |
705 | buf_len, ofs); | ||
690 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 706 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); |
691 | if (err) | 707 | if (err) |
692 | return err; | 708 | return err; |
@@ -695,22 +711,23 @@ scan_more: | |||
695 | } | 711 | } |
696 | 712 | ||
697 | if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) { | 713 | if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) { |
698 | printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs); | 714 | pr_warn("Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", |
715 | ofs); | ||
699 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) | 716 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) |
700 | return err; | 717 | return err; |
701 | ofs += 4; | 718 | ofs += 4; |
702 | continue; | 719 | continue; |
703 | } | 720 | } |
704 | if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) { | 721 | if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) { |
705 | D1(printk(KERN_DEBUG "Dirty bitmask at 0x%08x\n", ofs)); | 722 | jffs2_dbg(1, "Dirty bitmask at 0x%08x\n", ofs); |
706 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) | 723 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) |
707 | return err; | 724 | return err; |
708 | ofs += 4; | 725 | ofs += 4; |
709 | continue; | 726 | continue; |
710 | } | 727 | } |
711 | if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) { | 728 | if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) { |
712 | printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs); | 729 | pr_warn("Old JFFS2 bitmask found at 0x%08x\n", ofs); |
713 | printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n"); | 730 | pr_warn("You cannot use older JFFS2 filesystems with newer kernels\n"); |
714 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) | 731 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) |
715 | return err; | 732 | return err; |
716 | ofs += 4; | 733 | ofs += 4; |
@@ -718,7 +735,8 @@ scan_more: | |||
718 | } | 735 | } |
719 | if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) { | 736 | if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) { |
720 | /* OK. We're out of possibilities. Whinge and move on */ | 737 | /* OK. We're out of possibilities. Whinge and move on */ |
721 | noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", | 738 | noisy_printk(&noise, "%s(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n", |
739 | __func__, | ||
722 | JFFS2_MAGIC_BITMASK, ofs, | 740 | JFFS2_MAGIC_BITMASK, ofs, |
723 | je16_to_cpu(node->magic)); | 741 | je16_to_cpu(node->magic)); |
724 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) | 742 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) |
@@ -733,7 +751,8 @@ scan_more: | |||
733 | hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4); | 751 | hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4); |
734 | 752 | ||
735 | if (hdr_crc != je32_to_cpu(node->hdr_crc)) { | 753 | if (hdr_crc != je32_to_cpu(node->hdr_crc)) { |
736 | noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n", | 754 | noisy_printk(&noise, "%s(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n", |
755 | __func__, | ||
737 | ofs, je16_to_cpu(node->magic), | 756 | ofs, je16_to_cpu(node->magic), |
738 | je16_to_cpu(node->nodetype), | 757 | je16_to_cpu(node->nodetype), |
739 | je32_to_cpu(node->totlen), | 758 | je32_to_cpu(node->totlen), |
@@ -747,9 +766,9 @@ scan_more: | |||
747 | 766 | ||
748 | if (ofs + je32_to_cpu(node->totlen) > jeb->offset + c->sector_size) { | 767 | if (ofs + je32_to_cpu(node->totlen) > jeb->offset + c->sector_size) { |
749 | /* Eep. Node goes over the end of the erase block. */ | 768 | /* Eep. Node goes over the end of the erase block. */ |
750 | printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n", | 769 | pr_warn("Node at 0x%08x with length 0x%08x would run over the end of the erase block\n", |
751 | ofs, je32_to_cpu(node->totlen)); | 770 | ofs, je32_to_cpu(node->totlen)); |
752 | printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n"); | 771 | pr_warn("Perhaps the file system was created with the wrong erase size?\n"); |
753 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) | 772 | if ((err = jffs2_scan_dirty_space(c, jeb, 4))) |
754 | return err; | 773 | return err; |
755 | ofs += 4; | 774 | ofs += 4; |
@@ -758,7 +777,8 @@ scan_more: | |||
758 | 777 | ||
759 | if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) { | 778 | if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) { |
760 | /* Wheee. This is an obsoleted node */ | 779 | /* Wheee. This is an obsoleted node */ |
761 | D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs)); | 780 | jffs2_dbg(2, "Node at 0x%08x is obsolete. Skipping\n", |
781 | ofs); | ||
762 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) | 782 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) |
763 | return err; | 783 | return err; |
764 | ofs += PAD(je32_to_cpu(node->totlen)); | 784 | ofs += PAD(je32_to_cpu(node->totlen)); |
@@ -769,8 +789,9 @@ scan_more: | |||
769 | case JFFS2_NODETYPE_INODE: | 789 | case JFFS2_NODETYPE_INODE: |
770 | if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) { | 790 | if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) { |
771 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 791 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); |
772 | D1(printk(KERN_DEBUG "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n", | 792 | jffs2_dbg(1, "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n", |
773 | sizeof(struct jffs2_raw_inode), buf_len, ofs)); | 793 | sizeof(struct jffs2_raw_inode), |
794 | buf_len, ofs); | ||
774 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 795 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); |
775 | if (err) | 796 | if (err) |
776 | return err; | 797 | return err; |
@@ -785,8 +806,9 @@ scan_more: | |||
785 | case JFFS2_NODETYPE_DIRENT: | 806 | case JFFS2_NODETYPE_DIRENT: |
786 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { | 807 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { |
787 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 808 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); |
788 | D1(printk(KERN_DEBUG "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n", | 809 | jffs2_dbg(1, "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n", |
789 | je32_to_cpu(node->totlen), buf_len, ofs)); | 810 | je32_to_cpu(node->totlen), buf_len, |
811 | ofs); | ||
790 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 812 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); |
791 | if (err) | 813 | if (err) |
792 | return err; | 814 | return err; |
@@ -802,9 +824,9 @@ scan_more: | |||
802 | case JFFS2_NODETYPE_XATTR: | 824 | case JFFS2_NODETYPE_XATTR: |
803 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { | 825 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { |
804 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 826 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); |
805 | D1(printk(KERN_DEBUG "Fewer than %d bytes (xattr node)" | 827 | jffs2_dbg(1, "Fewer than %d bytes (xattr node) left to end of buf. Reading 0x%x at 0x%08x\n", |
806 | " left to end of buf. Reading 0x%x at 0x%08x\n", | 828 | je32_to_cpu(node->totlen), buf_len, |
807 | je32_to_cpu(node->totlen), buf_len, ofs)); | 829 | ofs); |
808 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 830 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); |
809 | if (err) | 831 | if (err) |
810 | return err; | 832 | return err; |
@@ -819,9 +841,9 @@ scan_more: | |||
819 | case JFFS2_NODETYPE_XREF: | 841 | case JFFS2_NODETYPE_XREF: |
820 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { | 842 | if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) { |
821 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); | 843 | buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs); |
822 | D1(printk(KERN_DEBUG "Fewer than %d bytes (xref node)" | 844 | jffs2_dbg(1, "Fewer than %d bytes (xref node) left to end of buf. Reading 0x%x at 0x%08x\n", |
823 | " left to end of buf. Reading 0x%x at 0x%08x\n", | 845 | je32_to_cpu(node->totlen), buf_len, |
824 | je32_to_cpu(node->totlen), buf_len, ofs)); | 846 | ofs); |
825 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); | 847 | err = jffs2_fill_scan_buf(c, buf, ofs, buf_len); |
826 | if (err) | 848 | if (err) |
827 | return err; | 849 | return err; |
@@ -836,15 +858,17 @@ scan_more: | |||
836 | #endif /* CONFIG_JFFS2_FS_XATTR */ | 858 | #endif /* CONFIG_JFFS2_FS_XATTR */ |
837 | 859 | ||
838 | case JFFS2_NODETYPE_CLEANMARKER: | 860 | case JFFS2_NODETYPE_CLEANMARKER: |
839 | D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs)); | 861 | jffs2_dbg(1, "CLEANMARKER node found at 0x%08x\n", ofs); |
840 | if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { | 862 | if (je32_to_cpu(node->totlen) != c->cleanmarker_size) { |
841 | printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", | 863 | pr_notice("CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n", |
842 | ofs, je32_to_cpu(node->totlen), c->cleanmarker_size); | 864 | ofs, je32_to_cpu(node->totlen), |
865 | c->cleanmarker_size); | ||
843 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node))))) | 866 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node))))) |
844 | return err; | 867 | return err; |
845 | ofs += PAD(sizeof(struct jffs2_unknown_node)); | 868 | ofs += PAD(sizeof(struct jffs2_unknown_node)); |
846 | } else if (jeb->first_node) { | 869 | } else if (jeb->first_node) { |
847 | printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset); | 870 | pr_notice("CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", |
871 | ofs, jeb->offset); | ||
848 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node))))) | 872 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node))))) |
849 | return err; | 873 | return err; |
850 | ofs += PAD(sizeof(struct jffs2_unknown_node)); | 874 | ofs += PAD(sizeof(struct jffs2_unknown_node)); |
@@ -866,7 +890,8 @@ scan_more: | |||
866 | default: | 890 | default: |
867 | switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) { | 891 | switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) { |
868 | case JFFS2_FEATURE_ROCOMPAT: | 892 | case JFFS2_FEATURE_ROCOMPAT: |
869 | printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs); | 893 | pr_notice("Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", |
894 | je16_to_cpu(node->nodetype), ofs); | ||
870 | c->flags |= JFFS2_SB_FLAG_RO; | 895 | c->flags |= JFFS2_SB_FLAG_RO; |
871 | if (!(jffs2_is_readonly(c))) | 896 | if (!(jffs2_is_readonly(c))) |
872 | return -EROFS; | 897 | return -EROFS; |
@@ -876,18 +901,21 @@ scan_more: | |||
876 | break; | 901 | break; |
877 | 902 | ||
878 | case JFFS2_FEATURE_INCOMPAT: | 903 | case JFFS2_FEATURE_INCOMPAT: |
879 | printk(KERN_NOTICE "Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs); | 904 | pr_notice("Incompatible feature node (0x%04x) found at offset 0x%08x\n", |
905 | je16_to_cpu(node->nodetype), ofs); | ||
880 | return -EINVAL; | 906 | return -EINVAL; |
881 | 907 | ||
882 | case JFFS2_FEATURE_RWCOMPAT_DELETE: | 908 | case JFFS2_FEATURE_RWCOMPAT_DELETE: |
883 | D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs)); | 909 | jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", |
910 | je16_to_cpu(node->nodetype), ofs); | ||
884 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) | 911 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen))))) |
885 | return err; | 912 | return err; |
886 | ofs += PAD(je32_to_cpu(node->totlen)); | 913 | ofs += PAD(je32_to_cpu(node->totlen)); |
887 | break; | 914 | break; |
888 | 915 | ||
889 | case JFFS2_FEATURE_RWCOMPAT_COPY: { | 916 | case JFFS2_FEATURE_RWCOMPAT_COPY: { |
890 | D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs)); | 917 | jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", |
918 | je16_to_cpu(node->nodetype), ofs); | ||
891 | 919 | ||
892 | jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL); | 920 | jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL); |
893 | 921 | ||
@@ -908,8 +936,9 @@ scan_more: | |||
908 | } | 936 | } |
909 | } | 937 | } |
910 | 938 | ||
911 | D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n", | 939 | jffs2_dbg(1, "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n", |
912 | jeb->offset,jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size, jeb->wasted_size)); | 940 | jeb->offset, jeb->free_size, jeb->dirty_size, |
941 | jeb->unchecked_size, jeb->used_size, jeb->wasted_size); | ||
913 | 942 | ||
914 | /* mark_node_obsolete can add to wasted !! */ | 943 | /* mark_node_obsolete can add to wasted !! */ |
915 | if (jeb->wasted_size) { | 944 | if (jeb->wasted_size) { |
@@ -935,7 +964,7 @@ struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uin | |||
935 | 964 | ||
936 | ic = jffs2_alloc_inode_cache(); | 965 | ic = jffs2_alloc_inode_cache(); |
937 | if (!ic) { | 966 | if (!ic) { |
938 | printk(KERN_NOTICE "jffs2_scan_make_inode_cache(): allocation of inode cache failed\n"); | 967 | pr_notice("%s(): allocation of inode cache failed\n", __func__); |
939 | return NULL; | 968 | return NULL; |
940 | } | 969 | } |
941 | memset(ic, 0, sizeof(*ic)); | 970 | memset(ic, 0, sizeof(*ic)); |
@@ -954,7 +983,7 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
954 | struct jffs2_inode_cache *ic; | 983 | struct jffs2_inode_cache *ic; |
955 | uint32_t crc, ino = je32_to_cpu(ri->ino); | 984 | uint32_t crc, ino = je32_to_cpu(ri->ino); |
956 | 985 | ||
957 | D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs)); | 986 | jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs); |
958 | 987 | ||
959 | /* We do very little here now. Just check the ino# to which we should attribute | 988 | /* We do very little here now. Just check the ino# to which we should attribute |
960 | this node; we can do all the CRC checking etc. later. There's a tradeoff here -- | 989 | this node; we can do all the CRC checking etc. later. There's a tradeoff here -- |
@@ -968,9 +997,8 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
968 | /* Check the node CRC in any case. */ | 997 | /* Check the node CRC in any case. */ |
969 | crc = crc32(0, ri, sizeof(*ri)-8); | 998 | crc = crc32(0, ri, sizeof(*ri)-8); |
970 | if (crc != je32_to_cpu(ri->node_crc)) { | 999 | if (crc != je32_to_cpu(ri->node_crc)) { |
971 | printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on " | 1000 | pr_notice("%s(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", |
972 | "node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 1001 | __func__, ofs, je32_to_cpu(ri->node_crc), crc); |
973 | ofs, je32_to_cpu(ri->node_crc), crc); | ||
974 | /* | 1002 | /* |
975 | * We believe totlen because the CRC on the node | 1003 | * We believe totlen because the CRC on the node |
976 | * _header_ was OK, just the node itself failed. | 1004 | * _header_ was OK, just the node itself failed. |
@@ -989,10 +1017,10 @@ static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_erasebloc | |||
989 | /* Wheee. It worked */ | 1017 | /* Wheee. It worked */ |
990 | jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic); | 1018 | jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic); |
991 | 1019 | ||
992 | D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n", | 1020 | jffs2_dbg(1, "Node is ino #%u, version %d. Range 0x%x-0x%x\n", |
993 | je32_to_cpu(ri->ino), je32_to_cpu(ri->version), | 1021 | je32_to_cpu(ri->ino), je32_to_cpu(ri->version), |
994 | je32_to_cpu(ri->offset), | 1022 | je32_to_cpu(ri->offset), |
995 | je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize))); | 1023 | je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize)); |
996 | 1024 | ||
997 | pseudo_random += je32_to_cpu(ri->version); | 1025 | pseudo_random += je32_to_cpu(ri->version); |
998 | 1026 | ||
@@ -1012,15 +1040,15 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
1012 | uint32_t crc; | 1040 | uint32_t crc; |
1013 | int err; | 1041 | int err; |
1014 | 1042 | ||
1015 | D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs)); | 1043 | jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs); |
1016 | 1044 | ||
1017 | /* We don't get here unless the node is still valid, so we don't have to | 1045 | /* We don't get here unless the node is still valid, so we don't have to |
1018 | mask in the ACCURATE bit any more. */ | 1046 | mask in the ACCURATE bit any more. */ |
1019 | crc = crc32(0, rd, sizeof(*rd)-8); | 1047 | crc = crc32(0, rd, sizeof(*rd)-8); |
1020 | 1048 | ||
1021 | if (crc != je32_to_cpu(rd->node_crc)) { | 1049 | if (crc != je32_to_cpu(rd->node_crc)) { |
1022 | printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 1050 | pr_notice("%s(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", |
1023 | ofs, je32_to_cpu(rd->node_crc), crc); | 1051 | __func__, ofs, je32_to_cpu(rd->node_crc), crc); |
1024 | /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */ | 1052 | /* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */ |
1025 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen))))) | 1053 | if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen))))) |
1026 | return err; | 1054 | return err; |
@@ -1032,7 +1060,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
1032 | /* Should never happen. Did. (OLPC trac #4184)*/ | 1060 | /* Should never happen. Did. (OLPC trac #4184)*/ |
1033 | checkedlen = strnlen(rd->name, rd->nsize); | 1061 | checkedlen = strnlen(rd->name, rd->nsize); |
1034 | if (checkedlen < rd->nsize) { | 1062 | if (checkedlen < rd->nsize) { |
1035 | printk(KERN_ERR "Dirent at %08x has zeroes in name. Truncating to %d chars\n", | 1063 | pr_err("Dirent at %08x has zeroes in name. Truncating to %d chars\n", |
1036 | ofs, checkedlen); | 1064 | ofs, checkedlen); |
1037 | } | 1065 | } |
1038 | fd = jffs2_alloc_full_dirent(checkedlen+1); | 1066 | fd = jffs2_alloc_full_dirent(checkedlen+1); |
@@ -1044,9 +1072,10 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo | |||
1044 | 1072 | ||
1045 | crc = crc32(0, fd->name, rd->nsize); | 1073 | crc = crc32(0, fd->name, rd->nsize); |
1046 | if (crc != je32_to_cpu(rd->name_crc)) { | 1074 | if (crc != je32_to_cpu(rd->name_crc)) { |
1047 | printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", | 1075 | pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n", |
1048 | ofs, je32_to_cpu(rd->name_crc), crc); | 1076 | __func__, ofs, je32_to_cpu(rd->name_crc), crc); |
1049 | D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino))); | 1077 | jffs2_dbg(1, "Name for which CRC failed is (now) '%s', ino #%d\n", |
1078 | fd->name, je32_to_cpu(rd->ino)); | ||
1050 | jffs2_free_full_dirent(fd); | 1079 | jffs2_free_full_dirent(fd); |
1051 | /* FIXME: Why do we believe totlen? */ | 1080 | /* FIXME: Why do we believe totlen? */ |
1052 | /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */ | 1081 | /* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */ |
diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c index 0f20208df602..aca97f35b292 100644 --- a/fs/jffs2/security.c +++ b/fs/jffs2/security.c | |||
@@ -23,8 +23,8 @@ | |||
23 | #include "nodelist.h" | 23 | #include "nodelist.h" |
24 | 24 | ||
25 | /* ---- Initial Security Label(s) Attachment callback --- */ | 25 | /* ---- Initial Security Label(s) Attachment callback --- */ |
26 | int jffs2_initxattrs(struct inode *inode, const struct xattr *xattr_array, | 26 | static int jffs2_initxattrs(struct inode *inode, |
27 | void *fs_info) | 27 | const struct xattr *xattr_array, void *fs_info) |
28 | { | 28 | { |
29 | const struct xattr *xattr; | 29 | const struct xattr *xattr; |
30 | int err = 0; | 30 | int err = 0; |
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c index e537fb0e0184..c522d098bb4f 100644 --- a/fs/jffs2/summary.c +++ b/fs/jffs2/summary.c | |||
@@ -11,6 +11,8 @@ | |||
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
15 | |||
14 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
15 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
16 | #include <linux/mtd/mtd.h> | 18 | #include <linux/mtd/mtd.h> |
@@ -442,13 +444,16 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras | |||
442 | /* This should never happen, but https://dev.laptop.org/ticket/4184 */ | 444 | /* This should never happen, but https://dev.laptop.org/ticket/4184 */ |
443 | checkedlen = strnlen(spd->name, spd->nsize); | 445 | checkedlen = strnlen(spd->name, spd->nsize); |
444 | if (!checkedlen) { | 446 | if (!checkedlen) { |
445 | printk(KERN_ERR "Dirent at %08x has zero at start of name. Aborting mount.\n", | 447 | pr_err("Dirent at %08x has zero at start of name. Aborting mount.\n", |
446 | jeb->offset + je32_to_cpu(spd->offset)); | 448 | jeb->offset + |
449 | je32_to_cpu(spd->offset)); | ||
447 | return -EIO; | 450 | return -EIO; |
448 | } | 451 | } |
449 | if (checkedlen < spd->nsize) { | 452 | if (checkedlen < spd->nsize) { |
450 | printk(KERN_ERR "Dirent at %08x has zeroes in name. Truncating to %d chars\n", | 453 | pr_err("Dirent at %08x has zeroes in name. Truncating to %d chars\n", |
451 | jeb->offset + je32_to_cpu(spd->offset), checkedlen); | 454 | jeb->offset + |
455 | je32_to_cpu(spd->offset), | ||
456 | checkedlen); | ||
452 | } | 457 | } |
453 | 458 | ||
454 | 459 | ||
@@ -808,8 +813,7 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
808 | 813 | ||
809 | sum_ofs = jeb->offset + c->sector_size - jeb->free_size; | 814 | sum_ofs = jeb->offset + c->sector_size - jeb->free_size; |
810 | 815 | ||
811 | dbg_summary("JFFS2: writing out data to flash to pos : 0x%08x\n", | 816 | dbg_summary("writing out data to flash to pos : 0x%08x\n", sum_ofs); |
812 | sum_ofs); | ||
813 | 817 | ||
814 | ret = jffs2_flash_writev(c, vecs, 2, sum_ofs, &retlen, 0); | 818 | ret = jffs2_flash_writev(c, vecs, 2, sum_ofs, &retlen, 0); |
815 | 819 | ||
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index f2d96b5e64f6..f9916f312bd8 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c | |||
@@ -9,6 +9,8 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | |||
12 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
13 | #include <linux/module.h> | 15 | #include <linux/module.h> |
14 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
@@ -69,7 +71,7 @@ static void jffs2_write_super(struct super_block *sb) | |||
69 | sb->s_dirt = 0; | 71 | sb->s_dirt = 0; |
70 | 72 | ||
71 | if (!(sb->s_flags & MS_RDONLY)) { | 73 | if (!(sb->s_flags & MS_RDONLY)) { |
72 | D1(printk(KERN_DEBUG "jffs2_write_super()\n")); | 74 | jffs2_dbg(1, "%s()\n", __func__); |
73 | jffs2_flush_wbuf_gc(c, 0); | 75 | jffs2_flush_wbuf_gc(c, 0); |
74 | } | 76 | } |
75 | 77 | ||
@@ -214,8 +216,8 @@ static int jffs2_parse_options(struct jffs2_sb_info *c, char *data) | |||
214 | JFFS2_COMPR_MODE_FORCEZLIB; | 216 | JFFS2_COMPR_MODE_FORCEZLIB; |
215 | #endif | 217 | #endif |
216 | else { | 218 | else { |
217 | printk(KERN_ERR "JFFS2 Error: unknown compressor \"%s\"", | 219 | pr_err("Error: unknown compressor \"%s\"\n", |
218 | name); | 220 | name); |
219 | kfree(name); | 221 | kfree(name); |
220 | return -EINVAL; | 222 | return -EINVAL; |
221 | } | 223 | } |
@@ -223,8 +225,8 @@ static int jffs2_parse_options(struct jffs2_sb_info *c, char *data) | |||
223 | c->mount_opts.override_compr = true; | 225 | c->mount_opts.override_compr = true; |
224 | break; | 226 | break; |
225 | default: | 227 | default: |
226 | printk(KERN_ERR "JFFS2 Error: unrecognized mount option '%s' or missing value\n", | 228 | pr_err("Error: unrecognized mount option '%s' or missing value\n", |
227 | p); | 229 | p); |
228 | return -EINVAL; | 230 | return -EINVAL; |
229 | } | 231 | } |
230 | } | 232 | } |
@@ -266,9 +268,9 @@ static int jffs2_fill_super(struct super_block *sb, void *data, int silent) | |||
266 | struct jffs2_sb_info *c; | 268 | struct jffs2_sb_info *c; |
267 | int ret; | 269 | int ret; |
268 | 270 | ||
269 | D1(printk(KERN_DEBUG "jffs2_get_sb_mtd():" | 271 | jffs2_dbg(1, "jffs2_get_sb_mtd():" |
270 | " New superblock for device %d (\"%s\")\n", | 272 | " New superblock for device %d (\"%s\")\n", |
271 | sb->s_mtd->index, sb->s_mtd->name)); | 273 | sb->s_mtd->index, sb->s_mtd->name); |
272 | 274 | ||
273 | c = kzalloc(sizeof(*c), GFP_KERNEL); | 275 | c = kzalloc(sizeof(*c), GFP_KERNEL); |
274 | if (!c) | 276 | if (!c) |
@@ -315,7 +317,7 @@ static void jffs2_put_super (struct super_block *sb) | |||
315 | { | 317 | { |
316 | struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); | 318 | struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); |
317 | 319 | ||
318 | D2(printk(KERN_DEBUG "jffs2: jffs2_put_super()\n")); | 320 | jffs2_dbg(2, "%s()\n", __func__); |
319 | 321 | ||
320 | if (sb->s_dirt) | 322 | if (sb->s_dirt) |
321 | jffs2_write_super(sb); | 323 | jffs2_write_super(sb); |
@@ -336,7 +338,7 @@ static void jffs2_put_super (struct super_block *sb) | |||
336 | kfree(c->inocache_list); | 338 | kfree(c->inocache_list); |
337 | jffs2_clear_xattr_subsystem(c); | 339 | jffs2_clear_xattr_subsystem(c); |
338 | mtd_sync(c->mtd); | 340 | mtd_sync(c->mtd); |
339 | D1(printk(KERN_DEBUG "jffs2_put_super returning\n")); | 341 | jffs2_dbg(1, "%s(): returning\n", __func__); |
340 | } | 342 | } |
341 | 343 | ||
342 | static void jffs2_kill_sb(struct super_block *sb) | 344 | static void jffs2_kill_sb(struct super_block *sb) |
@@ -371,7 +373,7 @@ static int __init init_jffs2_fs(void) | |||
371 | BUILD_BUG_ON(sizeof(struct jffs2_raw_inode) != 68); | 373 | BUILD_BUG_ON(sizeof(struct jffs2_raw_inode) != 68); |
372 | BUILD_BUG_ON(sizeof(struct jffs2_raw_summary) != 32); | 374 | BUILD_BUG_ON(sizeof(struct jffs2_raw_summary) != 32); |
373 | 375 | ||
374 | printk(KERN_INFO "JFFS2 version 2.2." | 376 | pr_info("version 2.2." |
375 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER | 377 | #ifdef CONFIG_JFFS2_FS_WRITEBUFFER |
376 | " (NAND)" | 378 | " (NAND)" |
377 | #endif | 379 | #endif |
@@ -386,22 +388,22 @@ static int __init init_jffs2_fs(void) | |||
386 | SLAB_MEM_SPREAD), | 388 | SLAB_MEM_SPREAD), |
387 | jffs2_i_init_once); | 389 | jffs2_i_init_once); |
388 | if (!jffs2_inode_cachep) { | 390 | if (!jffs2_inode_cachep) { |
389 | printk(KERN_ERR "JFFS2 error: Failed to initialise inode cache\n"); | 391 | pr_err("error: Failed to initialise inode cache\n"); |
390 | return -ENOMEM; | 392 | return -ENOMEM; |
391 | } | 393 | } |
392 | ret = jffs2_compressors_init(); | 394 | ret = jffs2_compressors_init(); |
393 | if (ret) { | 395 | if (ret) { |
394 | printk(KERN_ERR "JFFS2 error: Failed to initialise compressors\n"); | 396 | pr_err("error: Failed to initialise compressors\n"); |
395 | goto out; | 397 | goto out; |
396 | } | 398 | } |
397 | ret = jffs2_create_slab_caches(); | 399 | ret = jffs2_create_slab_caches(); |
398 | if (ret) { | 400 | if (ret) { |
399 | printk(KERN_ERR "JFFS2 error: Failed to initialise slab caches\n"); | 401 | pr_err("error: Failed to initialise slab caches\n"); |
400 | goto out_compressors; | 402 | goto out_compressors; |
401 | } | 403 | } |
402 | ret = register_filesystem(&jffs2_fs_type); | 404 | ret = register_filesystem(&jffs2_fs_type); |
403 | if (ret) { | 405 | if (ret) { |
404 | printk(KERN_ERR "JFFS2 error: Failed to register filesystem\n"); | 406 | pr_err("error: Failed to register filesystem\n"); |
405 | goto out_slab; | 407 | goto out_slab; |
406 | } | 408 | } |
407 | return 0; | 409 | return 0; |
diff --git a/fs/jffs2/symlink.c b/fs/jffs2/symlink.c index e3035afb1814..6e563332bb24 100644 --- a/fs/jffs2/symlink.c +++ b/fs/jffs2/symlink.c | |||
@@ -9,6 +9,8 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | |||
12 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
13 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
14 | #include <linux/namei.h> | 16 | #include <linux/namei.h> |
@@ -47,10 +49,11 @@ static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
47 | */ | 49 | */ |
48 | 50 | ||
49 | if (!p) { | 51 | if (!p) { |
50 | printk(KERN_ERR "jffs2_follow_link(): can't find symlink target\n"); | 52 | pr_err("%s(): can't find symlink target\n", __func__); |
51 | p = ERR_PTR(-EIO); | 53 | p = ERR_PTR(-EIO); |
52 | } | 54 | } |
53 | D1(printk(KERN_DEBUG "jffs2_follow_link(): target path is '%s'\n", (char *) f->target)); | 55 | jffs2_dbg(1, "%s(): target path is '%s'\n", |
56 | __func__, (char *)f->target); | ||
54 | 57 | ||
55 | nd_set_link(nd, p); | 58 | nd_set_link(nd, p); |
56 | 59 | ||
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c index 30e8f47e8a23..74d9be19df3f 100644 --- a/fs/jffs2/wbuf.c +++ b/fs/jffs2/wbuf.c | |||
@@ -11,6 +11,8 @@ | |||
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
15 | |||
14 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
15 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
16 | #include <linux/mtd/mtd.h> | 18 | #include <linux/mtd/mtd.h> |
@@ -91,7 +93,7 @@ static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino) | |||
91 | 93 | ||
92 | new = kmalloc(sizeof(*new), GFP_KERNEL); | 94 | new = kmalloc(sizeof(*new), GFP_KERNEL); |
93 | if (!new) { | 95 | if (!new) { |
94 | D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n")); | 96 | jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n"); |
95 | jffs2_clear_wbuf_ino_list(c); | 97 | jffs2_clear_wbuf_ino_list(c); |
96 | c->wbuf_inodes = &inodirty_nomem; | 98 | c->wbuf_inodes = &inodirty_nomem; |
97 | return; | 99 | return; |
@@ -113,19 +115,20 @@ static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c) | |||
113 | list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) { | 115 | list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) { |
114 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | 116 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); |
115 | 117 | ||
116 | D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset)); | 118 | jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", |
119 | jeb->offset); | ||
117 | list_del(this); | 120 | list_del(this); |
118 | if ((jiffies + (n++)) & 127) { | 121 | if ((jiffies + (n++)) & 127) { |
119 | /* Most of the time, we just erase it immediately. Otherwise we | 122 | /* Most of the time, we just erase it immediately. Otherwise we |
120 | spend ages scanning it on mount, etc. */ | 123 | spend ages scanning it on mount, etc. */ |
121 | D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n")); | 124 | jffs2_dbg(1, "...and adding to erase_pending_list\n"); |
122 | list_add_tail(&jeb->list, &c->erase_pending_list); | 125 | list_add_tail(&jeb->list, &c->erase_pending_list); |
123 | c->nr_erasing_blocks++; | 126 | c->nr_erasing_blocks++; |
124 | jffs2_garbage_collect_trigger(c); | 127 | jffs2_garbage_collect_trigger(c); |
125 | } else { | 128 | } else { |
126 | /* Sometimes, however, we leave it elsewhere so it doesn't get | 129 | /* Sometimes, however, we leave it elsewhere so it doesn't get |
127 | immediately reused, and we spread the load a bit. */ | 130 | immediately reused, and we spread the load a bit. */ |
128 | D1(printk(KERN_DEBUG "...and adding to erasable_list\n")); | 131 | jffs2_dbg(1, "...and adding to erasable_list\n"); |
129 | list_add_tail(&jeb->list, &c->erasable_list); | 132 | list_add_tail(&jeb->list, &c->erasable_list); |
130 | } | 133 | } |
131 | } | 134 | } |
@@ -136,7 +139,7 @@ static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c) | |||
136 | 139 | ||
137 | static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty) | 140 | static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty) |
138 | { | 141 | { |
139 | D1(printk("About to refile bad block at %08x\n", jeb->offset)); | 142 | jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset); |
140 | 143 | ||
141 | /* File the existing block on the bad_used_list.... */ | 144 | /* File the existing block on the bad_used_list.... */ |
142 | if (c->nextblock == jeb) | 145 | if (c->nextblock == jeb) |
@@ -144,12 +147,14 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock | |||
144 | else /* Not sure this should ever happen... need more coffee */ | 147 | else /* Not sure this should ever happen... need more coffee */ |
145 | list_del(&jeb->list); | 148 | list_del(&jeb->list); |
146 | if (jeb->first_node) { | 149 | if (jeb->first_node) { |
147 | D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset)); | 150 | jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n", |
151 | jeb->offset); | ||
148 | list_add(&jeb->list, &c->bad_used_list); | 152 | list_add(&jeb->list, &c->bad_used_list); |
149 | } else { | 153 | } else { |
150 | BUG_ON(allow_empty == REFILE_NOTEMPTY); | 154 | BUG_ON(allow_empty == REFILE_NOTEMPTY); |
151 | /* It has to have had some nodes or we couldn't be here */ | 155 | /* It has to have had some nodes or we couldn't be here */ |
152 | D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset)); | 156 | jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n", |
157 | jeb->offset); | ||
153 | list_add(&jeb->list, &c->erase_pending_list); | 158 | list_add(&jeb->list, &c->erase_pending_list); |
154 | c->nr_erasing_blocks++; | 159 | c->nr_erasing_blocks++; |
155 | jffs2_garbage_collect_trigger(c); | 160 | jffs2_garbage_collect_trigger(c); |
@@ -230,10 +235,12 @@ static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf, | |||
230 | 235 | ||
231 | ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify); | 236 | ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify); |
232 | if (ret && ret != -EUCLEAN && ret != -EBADMSG) { | 237 | if (ret && ret != -EUCLEAN && ret != -EBADMSG) { |
233 | printk(KERN_WARNING "jffs2_verify_write(): Read back of page at %08x failed: %d\n", c->wbuf_ofs, ret); | 238 | pr_warn("%s(): Read back of page at %08x failed: %d\n", |
239 | __func__, c->wbuf_ofs, ret); | ||
234 | return ret; | 240 | return ret; |
235 | } else if (retlen != c->wbuf_pagesize) { | 241 | } else if (retlen != c->wbuf_pagesize) { |
236 | printk(KERN_WARNING "jffs2_verify_write(): Read back of page at %08x gave short read: %zd not %d.\n", ofs, retlen, c->wbuf_pagesize); | 242 | pr_warn("%s(): Read back of page at %08x gave short read: %zd not %d\n", |
243 | __func__, ofs, retlen, c->wbuf_pagesize); | ||
237 | return -EIO; | 244 | return -EIO; |
238 | } | 245 | } |
239 | if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize)) | 246 | if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize)) |
@@ -246,12 +253,12 @@ static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf, | |||
246 | else | 253 | else |
247 | eccstr = "OK or unused"; | 254 | eccstr = "OK or unused"; |
248 | 255 | ||
249 | printk(KERN_WARNING "Write verify error (ECC %s) at %08x. Wrote:\n", | 256 | pr_warn("Write verify error (ECC %s) at %08x. Wrote:\n", |
250 | eccstr, c->wbuf_ofs); | 257 | eccstr, c->wbuf_ofs); |
251 | print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, | 258 | print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, |
252 | c->wbuf, c->wbuf_pagesize, 0); | 259 | c->wbuf, c->wbuf_pagesize, 0); |
253 | 260 | ||
254 | printk(KERN_WARNING "Read back:\n"); | 261 | pr_warn("Read back:\n"); |
255 | print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, | 262 | print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, |
256 | c->wbuf_verify, c->wbuf_pagesize, 0); | 263 | c->wbuf_verify, c->wbuf_pagesize, 0); |
257 | 264 | ||
@@ -308,7 +315,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
308 | 315 | ||
309 | if (!first_raw) { | 316 | if (!first_raw) { |
310 | /* All nodes were obsolete. Nothing to recover. */ | 317 | /* All nodes were obsolete. Nothing to recover. */ |
311 | D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n")); | 318 | jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n"); |
312 | c->wbuf_len = 0; | 319 | c->wbuf_len = 0; |
313 | return; | 320 | return; |
314 | } | 321 | } |
@@ -331,7 +338,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
331 | 338 | ||
332 | buf = kmalloc(end - start, GFP_KERNEL); | 339 | buf = kmalloc(end - start, GFP_KERNEL); |
333 | if (!buf) { | 340 | if (!buf) { |
334 | printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n"); | 341 | pr_crit("Malloc failure in wbuf recovery. Data loss ensues.\n"); |
335 | 342 | ||
336 | goto read_failed; | 343 | goto read_failed; |
337 | } | 344 | } |
@@ -346,7 +353,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
346 | ret = 0; | 353 | ret = 0; |
347 | 354 | ||
348 | if (ret || retlen != c->wbuf_ofs - start) { | 355 | if (ret || retlen != c->wbuf_ofs - start) { |
349 | printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n"); | 356 | pr_crit("Old data are already lost in wbuf recovery. Data loss ensues.\n"); |
350 | 357 | ||
351 | kfree(buf); | 358 | kfree(buf); |
352 | buf = NULL; | 359 | buf = NULL; |
@@ -380,7 +387,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
380 | /* ... and get an allocation of space from a shiny new block instead */ | 387 | /* ... and get an allocation of space from a shiny new block instead */ |
381 | ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE); | 388 | ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE); |
382 | if (ret) { | 389 | if (ret) { |
383 | printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n"); | 390 | pr_warn("Failed to allocate space for wbuf recovery. Data loss ensues.\n"); |
384 | kfree(buf); | 391 | kfree(buf); |
385 | return; | 392 | return; |
386 | } | 393 | } |
@@ -390,7 +397,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
390 | 397 | ||
391 | ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile); | 398 | ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile); |
392 | if (ret) { | 399 | if (ret) { |
393 | printk(KERN_WARNING "Failed to allocate node refs for wbuf recovery. Data loss ensues.\n"); | 400 | pr_warn("Failed to allocate node refs for wbuf recovery. Data loss ensues.\n"); |
394 | kfree(buf); | 401 | kfree(buf); |
395 | return; | 402 | return; |
396 | } | 403 | } |
@@ -406,13 +413,13 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
406 | unsigned char *rewrite_buf = buf?:c->wbuf; | 413 | unsigned char *rewrite_buf = buf?:c->wbuf; |
407 | uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); | 414 | uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); |
408 | 415 | ||
409 | D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n", | 416 | jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n", |
410 | towrite, ofs)); | 417 | towrite, ofs); |
411 | 418 | ||
412 | #ifdef BREAKMEHEADER | 419 | #ifdef BREAKMEHEADER |
413 | static int breakme; | 420 | static int breakme; |
414 | if (breakme++ == 20) { | 421 | if (breakme++ == 20) { |
415 | printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs); | 422 | pr_notice("Faking write error at 0x%08x\n", ofs); |
416 | breakme = 0; | 423 | breakme = 0; |
417 | mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf); | 424 | mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf); |
418 | ret = -EIO; | 425 | ret = -EIO; |
@@ -423,7 +430,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
423 | 430 | ||
424 | if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) { | 431 | if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) { |
425 | /* Argh. We tried. Really we did. */ | 432 | /* Argh. We tried. Really we did. */ |
426 | printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n"); | 433 | pr_crit("Recovery of wbuf failed due to a second write error\n"); |
427 | kfree(buf); | 434 | kfree(buf); |
428 | 435 | ||
429 | if (retlen) | 436 | if (retlen) |
@@ -431,7 +438,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
431 | 438 | ||
432 | return; | 439 | return; |
433 | } | 440 | } |
434 | printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs); | 441 | pr_notice("Recovery of wbuf succeeded to %08x\n", ofs); |
435 | 442 | ||
436 | c->wbuf_len = (end - start) - towrite; | 443 | c->wbuf_len = (end - start) - towrite; |
437 | c->wbuf_ofs = ofs + towrite; | 444 | c->wbuf_ofs = ofs + towrite; |
@@ -459,8 +466,8 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
459 | struct jffs2_raw_node_ref **adjust_ref = NULL; | 466 | struct jffs2_raw_node_ref **adjust_ref = NULL; |
460 | struct jffs2_inode_info *f = NULL; | 467 | struct jffs2_inode_info *f = NULL; |
461 | 468 | ||
462 | D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n", | 469 | jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n", |
463 | rawlen, ref_offset(raw), ref_flags(raw), ofs)); | 470 | rawlen, ref_offset(raw), ref_flags(raw), ofs); |
464 | 471 | ||
465 | ic = jffs2_raw_ref_to_ic(raw); | 472 | ic = jffs2_raw_ref_to_ic(raw); |
466 | 473 | ||
@@ -540,7 +547,8 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
540 | 547 | ||
541 | /* Fix up the original jeb now it's on the bad_list */ | 548 | /* Fix up the original jeb now it's on the bad_list */ |
542 | if (first_raw == jeb->first_node) { | 549 | if (first_raw == jeb->first_node) { |
543 | D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset)); | 550 | jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n", |
551 | jeb->offset); | ||
544 | list_move(&jeb->list, &c->erase_pending_list); | 552 | list_move(&jeb->list, &c->erase_pending_list); |
545 | c->nr_erasing_blocks++; | 553 | c->nr_erasing_blocks++; |
546 | jffs2_garbage_collect_trigger(c); | 554 | jffs2_garbage_collect_trigger(c); |
@@ -554,7 +562,8 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) | |||
554 | 562 | ||
555 | spin_unlock(&c->erase_completion_lock); | 563 | spin_unlock(&c->erase_completion_lock); |
556 | 564 | ||
557 | D1(printk(KERN_DEBUG "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len)); | 565 | jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", |
566 | c->wbuf_ofs, c->wbuf_len); | ||
558 | 567 | ||
559 | } | 568 | } |
560 | 569 | ||
@@ -579,7 +588,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
579 | return 0; | 588 | return 0; |
580 | 589 | ||
581 | if (!mutex_is_locked(&c->alloc_sem)) { | 590 | if (!mutex_is_locked(&c->alloc_sem)) { |
582 | printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n"); | 591 | pr_crit("jffs2_flush_wbuf() called with alloc_sem not locked!\n"); |
583 | BUG(); | 592 | BUG(); |
584 | } | 593 | } |
585 | 594 | ||
@@ -617,7 +626,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
617 | #ifdef BREAKME | 626 | #ifdef BREAKME |
618 | static int breakme; | 627 | static int breakme; |
619 | if (breakme++ == 20) { | 628 | if (breakme++ == 20) { |
620 | printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs); | 629 | pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs); |
621 | breakme = 0; | 630 | breakme = 0; |
622 | mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, | 631 | mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, |
623 | brokenbuf); | 632 | brokenbuf); |
@@ -629,11 +638,11 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
629 | &retlen, c->wbuf); | 638 | &retlen, c->wbuf); |
630 | 639 | ||
631 | if (ret) { | 640 | if (ret) { |
632 | printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n", ret); | 641 | pr_warn("jffs2_flush_wbuf(): Write failed with %d\n", ret); |
633 | goto wfail; | 642 | goto wfail; |
634 | } else if (retlen != c->wbuf_pagesize) { | 643 | } else if (retlen != c->wbuf_pagesize) { |
635 | printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n", | 644 | pr_warn("jffs2_flush_wbuf(): Write was short: %zd instead of %d\n", |
636 | retlen, c->wbuf_pagesize); | 645 | retlen, c->wbuf_pagesize); |
637 | ret = -EIO; | 646 | ret = -EIO; |
638 | goto wfail; | 647 | goto wfail; |
639 | } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) { | 648 | } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) { |
@@ -647,17 +656,18 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) | |||
647 | if (pad) { | 656 | if (pad) { |
648 | uint32_t waste = c->wbuf_pagesize - c->wbuf_len; | 657 | uint32_t waste = c->wbuf_pagesize - c->wbuf_len; |
649 | 658 | ||
650 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", | 659 | jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", |
651 | (wbuf_jeb==c->nextblock)?"next":"", wbuf_jeb->offset)); | 660 | (wbuf_jeb == c->nextblock) ? "next" : "", |
661 | wbuf_jeb->offset); | ||
652 | 662 | ||
653 | /* wbuf_pagesize - wbuf_len is the amount of space that's to be | 663 | /* wbuf_pagesize - wbuf_len is the amount of space that's to be |
654 | padded. If there is less free space in the block than that, | 664 | padded. If there is less free space in the block than that, |
655 | something screwed up */ | 665 | something screwed up */ |
656 | if (wbuf_jeb->free_size < waste) { | 666 | if (wbuf_jeb->free_size < waste) { |
657 | printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n", | 667 | pr_crit("jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n", |
658 | c->wbuf_ofs, c->wbuf_len, waste); | 668 | c->wbuf_ofs, c->wbuf_len, waste); |
659 | printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n", | 669 | pr_crit("jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n", |
660 | wbuf_jeb->offset, wbuf_jeb->free_size); | 670 | wbuf_jeb->offset, wbuf_jeb->free_size); |
661 | BUG(); | 671 | BUG(); |
662 | } | 672 | } |
663 | 673 | ||
@@ -694,14 +704,14 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) | |||
694 | uint32_t old_wbuf_len; | 704 | uint32_t old_wbuf_len; |
695 | int ret = 0; | 705 | int ret = 0; |
696 | 706 | ||
697 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino)); | 707 | jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino); |
698 | 708 | ||
699 | if (!c->wbuf) | 709 | if (!c->wbuf) |
700 | return 0; | 710 | return 0; |
701 | 711 | ||
702 | mutex_lock(&c->alloc_sem); | 712 | mutex_lock(&c->alloc_sem); |
703 | if (!jffs2_wbuf_pending_for_ino(c, ino)) { | 713 | if (!jffs2_wbuf_pending_for_ino(c, ino)) { |
704 | D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino)); | 714 | jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino); |
705 | mutex_unlock(&c->alloc_sem); | 715 | mutex_unlock(&c->alloc_sem); |
706 | return 0; | 716 | return 0; |
707 | } | 717 | } |
@@ -711,7 +721,8 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) | |||
711 | 721 | ||
712 | if (c->unchecked_size) { | 722 | if (c->unchecked_size) { |
713 | /* GC won't make any progress for a while */ | 723 | /* GC won't make any progress for a while */ |
714 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n")); | 724 | jffs2_dbg(1, "%s(): padding. Not finished checking\n", |
725 | __func__); | ||
715 | down_write(&c->wbuf_sem); | 726 | down_write(&c->wbuf_sem); |
716 | ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); | 727 | ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); |
717 | /* retry flushing wbuf in case jffs2_wbuf_recover | 728 | /* retry flushing wbuf in case jffs2_wbuf_recover |
@@ -724,7 +735,7 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) | |||
724 | 735 | ||
725 | mutex_unlock(&c->alloc_sem); | 736 | mutex_unlock(&c->alloc_sem); |
726 | 737 | ||
727 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n")); | 738 | jffs2_dbg(1, "%s(): calls gc pass\n", __func__); |
728 | 739 | ||
729 | ret = jffs2_garbage_collect_pass(c); | 740 | ret = jffs2_garbage_collect_pass(c); |
730 | if (ret) { | 741 | if (ret) { |
@@ -742,7 +753,7 @@ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) | |||
742 | mutex_lock(&c->alloc_sem); | 753 | mutex_lock(&c->alloc_sem); |
743 | } | 754 | } |
744 | 755 | ||
745 | D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n")); | 756 | jffs2_dbg(1, "%s(): ends...\n", __func__); |
746 | 757 | ||
747 | mutex_unlock(&c->alloc_sem); | 758 | mutex_unlock(&c->alloc_sem); |
748 | return ret; | 759 | return ret; |
@@ -811,9 +822,8 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, | |||
811 | if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) { | 822 | if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) { |
812 | /* It's a write to a new block */ | 823 | /* It's a write to a new block */ |
813 | if (c->wbuf_len) { | 824 | if (c->wbuf_len) { |
814 | D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx " | 825 | jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n", |
815 | "causes flush of wbuf at 0x%08x\n", | 826 | __func__, (unsigned long)to, c->wbuf_ofs); |
816 | (unsigned long)to, c->wbuf_ofs)); | ||
817 | ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); | 827 | ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); |
818 | if (ret) | 828 | if (ret) |
819 | goto outerr; | 829 | goto outerr; |
@@ -825,11 +835,11 @@ int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, | |||
825 | 835 | ||
826 | if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { | 836 | if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { |
827 | /* We're not writing immediately after the writebuffer. Bad. */ | 837 | /* We're not writing immediately after the writebuffer. Bad. */ |
828 | printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write " | 838 | pr_crit("%s(): Non-contiguous write to %08lx\n", |
829 | "to %08lx\n", (unsigned long)to); | 839 | __func__, (unsigned long)to); |
830 | if (c->wbuf_len) | 840 | if (c->wbuf_len) |
831 | printk(KERN_CRIT "wbuf was previously %08x-%08x\n", | 841 | pr_crit("wbuf was previously %08x-%08x\n", |
832 | c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len); | 842 | c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len); |
833 | BUG(); | 843 | BUG(); |
834 | } | 844 | } |
835 | 845 | ||
@@ -957,8 +967,8 @@ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *re | |||
957 | 967 | ||
958 | if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) { | 968 | if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) { |
959 | if (ret == -EBADMSG) | 969 | if (ret == -EBADMSG) |
960 | printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx)" | 970 | pr_warn("mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n", |
961 | " returned ECC error\n", len, ofs); | 971 | len, ofs); |
962 | /* | 972 | /* |
963 | * We have the raw data without ECC correction in the buffer, | 973 | * We have the raw data without ECC correction in the buffer, |
964 | * maybe we are lucky and all data or parts are correct. We | 974 | * maybe we are lucky and all data or parts are correct. We |
@@ -1034,9 +1044,8 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c, | |||
1034 | 1044 | ||
1035 | ret = mtd_read_oob(c->mtd, jeb->offset, &ops); | 1045 | ret = mtd_read_oob(c->mtd, jeb->offset, &ops); |
1036 | if (ret || ops.oobretlen != ops.ooblen) { | 1046 | if (ret || ops.oobretlen != ops.ooblen) { |
1037 | printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd" | 1047 | pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", |
1038 | " bytes, read %zd bytes, error %d\n", | 1048 | jeb->offset, ops.ooblen, ops.oobretlen, ret); |
1039 | jeb->offset, ops.ooblen, ops.oobretlen, ret); | ||
1040 | if (!ret) | 1049 | if (!ret) |
1041 | ret = -EIO; | 1050 | ret = -EIO; |
1042 | return ret; | 1051 | return ret; |
@@ -1048,8 +1057,8 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c, | |||
1048 | continue; | 1057 | continue; |
1049 | 1058 | ||
1050 | if (ops.oobbuf[i] != 0xFF) { | 1059 | if (ops.oobbuf[i] != 0xFF) { |
1051 | D2(printk(KERN_DEBUG "Found %02x at %x in OOB for " | 1060 | jffs2_dbg(2, "Found %02x at %x in OOB for " |
1052 | "%08x\n", ops.oobbuf[i], i, jeb->offset)); | 1061 | "%08x\n", ops.oobbuf[i], i, jeb->offset); |
1053 | return 1; | 1062 | return 1; |
1054 | } | 1063 | } |
1055 | } | 1064 | } |
@@ -1077,9 +1086,8 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, | |||
1077 | 1086 | ||
1078 | ret = mtd_read_oob(c->mtd, jeb->offset, &ops); | 1087 | ret = mtd_read_oob(c->mtd, jeb->offset, &ops); |
1079 | if (ret || ops.oobretlen != ops.ooblen) { | 1088 | if (ret || ops.oobretlen != ops.ooblen) { |
1080 | printk(KERN_ERR "cannot read OOB for EB at %08x, requested %zd" | 1089 | pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", |
1081 | " bytes, read %zd bytes, error %d\n", | 1090 | jeb->offset, ops.ooblen, ops.oobretlen, ret); |
1082 | jeb->offset, ops.ooblen, ops.oobretlen, ret); | ||
1083 | if (!ret) | 1091 | if (!ret) |
1084 | ret = -EIO; | 1092 | ret = -EIO; |
1085 | return ret; | 1093 | return ret; |
@@ -1103,9 +1111,8 @@ int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, | |||
1103 | 1111 | ||
1104 | ret = mtd_write_oob(c->mtd, jeb->offset, &ops); | 1112 | ret = mtd_write_oob(c->mtd, jeb->offset, &ops); |
1105 | if (ret || ops.oobretlen != ops.ooblen) { | 1113 | if (ret || ops.oobretlen != ops.ooblen) { |
1106 | printk(KERN_ERR "cannot write OOB for EB at %08x, requested %zd" | 1114 | pr_err("cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", |
1107 | " bytes, read %zd bytes, error %d\n", | 1115 | jeb->offset, ops.ooblen, ops.oobretlen, ret); |
1108 | jeb->offset, ops.ooblen, ops.oobretlen, ret); | ||
1109 | if (!ret) | 1116 | if (!ret) |
1110 | ret = -EIO; | 1117 | ret = -EIO; |
1111 | return ret; | 1118 | return ret; |
@@ -1130,11 +1137,12 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock * | |||
1130 | if( ++jeb->bad_count < MAX_ERASE_FAILURES) | 1137 | if( ++jeb->bad_count < MAX_ERASE_FAILURES) |
1131 | return 0; | 1138 | return 0; |
1132 | 1139 | ||
1133 | printk(KERN_WARNING "JFFS2: marking eraseblock at %08x\n as bad", bad_offset); | 1140 | pr_warn("marking eraseblock at %08x as bad\n", bad_offset); |
1134 | ret = mtd_block_markbad(c->mtd, bad_offset); | 1141 | ret = mtd_block_markbad(c->mtd, bad_offset); |
1135 | 1142 | ||
1136 | if (ret) { | 1143 | if (ret) { |
1137 | D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret)); | 1144 | jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n", |
1145 | __func__, jeb->offset, ret); | ||
1138 | return ret; | 1146 | return ret; |
1139 | } | 1147 | } |
1140 | return 1; | 1148 | return 1; |
@@ -1151,11 +1159,11 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c) | |||
1151 | c->cleanmarker_size = 0; | 1159 | c->cleanmarker_size = 0; |
1152 | 1160 | ||
1153 | if (!oinfo || oinfo->oobavail == 0) { | 1161 | if (!oinfo || oinfo->oobavail == 0) { |
1154 | printk(KERN_ERR "inconsistent device description\n"); | 1162 | pr_err("inconsistent device description\n"); |
1155 | return -EINVAL; | 1163 | return -EINVAL; |
1156 | } | 1164 | } |
1157 | 1165 | ||
1158 | D1(printk(KERN_DEBUG "JFFS2 using OOB on NAND\n")); | 1166 | jffs2_dbg(1, "using OOB on NAND\n"); |
1159 | 1167 | ||
1160 | c->oobavail = oinfo->oobavail; | 1168 | c->oobavail = oinfo->oobavail; |
1161 | 1169 | ||
@@ -1222,7 +1230,7 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) { | |||
1222 | 1230 | ||
1223 | if ((c->flash_size % c->sector_size) != 0) { | 1231 | if ((c->flash_size % c->sector_size) != 0) { |
1224 | c->flash_size = (c->flash_size / c->sector_size) * c->sector_size; | 1232 | c->flash_size = (c->flash_size / c->sector_size) * c->sector_size; |
1225 | printk(KERN_WARNING "JFFS2 flash size adjusted to %dKiB\n", c->flash_size); | 1233 | pr_warn("flash size adjusted to %dKiB\n", c->flash_size); |
1226 | }; | 1234 | }; |
1227 | 1235 | ||
1228 | c->wbuf_ofs = 0xFFFFFFFF; | 1236 | c->wbuf_ofs = 0xFFFFFFFF; |
@@ -1239,7 +1247,8 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) { | |||
1239 | } | 1247 | } |
1240 | #endif | 1248 | #endif |
1241 | 1249 | ||
1242 | printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size); | 1250 | pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n", |
1251 | c->wbuf_pagesize, c->sector_size); | ||
1243 | 1252 | ||
1244 | return 0; | 1253 | return 0; |
1245 | } | 1254 | } |
@@ -1297,7 +1306,8 @@ int jffs2_ubivol_setup(struct jffs2_sb_info *c) { | |||
1297 | if (!c->wbuf) | 1306 | if (!c->wbuf) |
1298 | return -ENOMEM; | 1307 | return -ENOMEM; |
1299 | 1308 | ||
1300 | printk(KERN_INFO "JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size); | 1309 | pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n", |
1310 | c->wbuf_pagesize, c->sector_size); | ||
1301 | 1311 | ||
1302 | return 0; | 1312 | return 0; |
1303 | } | 1313 | } |
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c index 30d175b6d290..b634de4c8101 100644 --- a/fs/jffs2/write.c +++ b/fs/jffs2/write.c | |||
@@ -9,6 +9,8 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | |||
12 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
13 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
14 | #include <linux/crc32.h> | 16 | #include <linux/crc32.h> |
@@ -36,7 +38,7 @@ int jffs2_do_new_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
36 | f->inocache->state = INO_STATE_PRESENT; | 38 | f->inocache->state = INO_STATE_PRESENT; |
37 | 39 | ||
38 | jffs2_add_ino_cache(c, f->inocache); | 40 | jffs2_add_ino_cache(c, f->inocache); |
39 | D1(printk(KERN_DEBUG "jffs2_do_new_inode(): Assigned ino# %d\n", f->inocache->ino)); | 41 | jffs2_dbg(1, "%s(): Assigned ino# %d\n", __func__, f->inocache->ino); |
40 | ri->ino = cpu_to_je32(f->inocache->ino); | 42 | ri->ino = cpu_to_je32(f->inocache->ino); |
41 | 43 | ||
42 | ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); | 44 | ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); |
@@ -68,7 +70,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
68 | unsigned long cnt = 2; | 70 | unsigned long cnt = 2; |
69 | 71 | ||
70 | D1(if(je32_to_cpu(ri->hdr_crc) != crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)) { | 72 | D1(if(je32_to_cpu(ri->hdr_crc) != crc32(0, ri, sizeof(struct jffs2_unknown_node)-4)) { |
71 | printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dnode()\n"); | 73 | pr_crit("Eep. CRC not correct in jffs2_write_dnode()\n"); |
72 | BUG(); | 74 | BUG(); |
73 | } | 75 | } |
74 | ); | 76 | ); |
@@ -78,7 +80,9 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
78 | vecs[1].iov_len = datalen; | 80 | vecs[1].iov_len = datalen; |
79 | 81 | ||
80 | if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) { | 82 | if (je32_to_cpu(ri->totlen) != sizeof(*ri) + datalen) { |
81 | printk(KERN_WARNING "jffs2_write_dnode: ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", je32_to_cpu(ri->totlen), sizeof(*ri), datalen); | 83 | pr_warn("%s(): ri->totlen (0x%08x) != sizeof(*ri) (0x%08zx) + datalen (0x%08x)\n", |
84 | __func__, je32_to_cpu(ri->totlen), | ||
85 | sizeof(*ri), datalen); | ||
82 | } | 86 | } |
83 | 87 | ||
84 | fn = jffs2_alloc_full_dnode(); | 88 | fn = jffs2_alloc_full_dnode(); |
@@ -95,9 +99,9 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
95 | 99 | ||
96 | if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) { | 100 | if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(ri->version) < f->highest_version)) { |
97 | BUG_ON(!retried); | 101 | BUG_ON(!retried); |
98 | D1(printk(KERN_DEBUG "jffs2_write_dnode : dnode_version %d, " | 102 | jffs2_dbg(1, "%s(): dnode_version %d, highest version %d -> updating dnode\n", |
99 | "highest version %d -> updating dnode\n", | 103 | __func__, |
100 | je32_to_cpu(ri->version), f->highest_version)); | 104 | je32_to_cpu(ri->version), f->highest_version); |
101 | ri->version = cpu_to_je32(++f->highest_version); | 105 | ri->version = cpu_to_je32(++f->highest_version); |
102 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); | 106 | ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8)); |
103 | } | 107 | } |
@@ -106,8 +110,8 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
106 | (alloc_mode==ALLOC_GC)?0:f->inocache->ino); | 110 | (alloc_mode==ALLOC_GC)?0:f->inocache->ino); |
107 | 111 | ||
108 | if (ret || (retlen != sizeof(*ri) + datalen)) { | 112 | if (ret || (retlen != sizeof(*ri) + datalen)) { |
109 | printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", | 113 | pr_notice("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", |
110 | sizeof(*ri)+datalen, flash_ofs, ret, retlen); | 114 | sizeof(*ri) + datalen, flash_ofs, ret, retlen); |
111 | 115 | ||
112 | /* Mark the space as dirtied */ | 116 | /* Mark the space as dirtied */ |
113 | if (retlen) { | 117 | if (retlen) { |
@@ -118,7 +122,8 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
118 | this node */ | 122 | this node */ |
119 | jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*ri)+datalen), NULL); | 123 | jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*ri)+datalen), NULL); |
120 | } else { | 124 | } else { |
121 | printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", flash_ofs); | 125 | pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", |
126 | flash_ofs); | ||
122 | } | 127 | } |
123 | if (!retried && alloc_mode != ALLOC_NORETRY) { | 128 | if (!retried && alloc_mode != ALLOC_NORETRY) { |
124 | /* Try to reallocate space and retry */ | 129 | /* Try to reallocate space and retry */ |
@@ -127,7 +132,7 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
127 | 132 | ||
128 | retried = 1; | 133 | retried = 1; |
129 | 134 | ||
130 | D1(printk(KERN_DEBUG "Retrying failed write.\n")); | 135 | jffs2_dbg(1, "Retrying failed write.\n"); |
131 | 136 | ||
132 | jffs2_dbg_acct_sanity_check(c,jeb); | 137 | jffs2_dbg_acct_sanity_check(c,jeb); |
133 | jffs2_dbg_acct_paranoia_check(c, jeb); | 138 | jffs2_dbg_acct_paranoia_check(c, jeb); |
@@ -147,14 +152,16 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
147 | 152 | ||
148 | if (!ret) { | 153 | if (!ret) { |
149 | flash_ofs = write_ofs(c); | 154 | flash_ofs = write_ofs(c); |
150 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); | 155 | jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write.\n", |
156 | flash_ofs); | ||
151 | 157 | ||
152 | jffs2_dbg_acct_sanity_check(c,jeb); | 158 | jffs2_dbg_acct_sanity_check(c,jeb); |
153 | jffs2_dbg_acct_paranoia_check(c, jeb); | 159 | jffs2_dbg_acct_paranoia_check(c, jeb); |
154 | 160 | ||
155 | goto retry; | 161 | goto retry; |
156 | } | 162 | } |
157 | D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); | 163 | jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n", |
164 | ret); | ||
158 | } | 165 | } |
159 | /* Release the full_dnode which is now useless, and return */ | 166 | /* Release the full_dnode which is now useless, and return */ |
160 | jffs2_free_full_dnode(fn); | 167 | jffs2_free_full_dnode(fn); |
@@ -183,10 +190,10 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 | |||
183 | fn->size = je32_to_cpu(ri->dsize); | 190 | fn->size = je32_to_cpu(ri->dsize); |
184 | fn->frags = 0; | 191 | fn->frags = 0; |
185 | 192 | ||
186 | D1(printk(KERN_DEBUG "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n", | 193 | jffs2_dbg(1, "jffs2_write_dnode wrote node at 0x%08x(%d) with dsize 0x%x, csize 0x%x, node_crc 0x%08x, data_crc 0x%08x, totlen 0x%08x\n", |
187 | flash_ofs & ~3, flash_ofs & 3, je32_to_cpu(ri->dsize), | 194 | flash_ofs & ~3, flash_ofs & 3, je32_to_cpu(ri->dsize), |
188 | je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc), | 195 | je32_to_cpu(ri->csize), je32_to_cpu(ri->node_crc), |
189 | je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen))); | 196 | je32_to_cpu(ri->data_crc), je32_to_cpu(ri->totlen)); |
190 | 197 | ||
191 | if (retried) { | 198 | if (retried) { |
192 | jffs2_dbg_acct_sanity_check(c,NULL); | 199 | jffs2_dbg_acct_sanity_check(c,NULL); |
@@ -206,22 +213,23 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
206 | int retried = 0; | 213 | int retried = 0; |
207 | int ret; | 214 | int ret; |
208 | 215 | ||
209 | D1(printk(KERN_DEBUG "jffs2_write_dirent(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n", | 216 | jffs2_dbg(1, "%s(ino #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x)\n", |
217 | __func__, | ||
210 | je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino), | 218 | je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino), |
211 | je32_to_cpu(rd->name_crc))); | 219 | je32_to_cpu(rd->name_crc)); |
212 | 220 | ||
213 | D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) { | 221 | D1(if(je32_to_cpu(rd->hdr_crc) != crc32(0, rd, sizeof(struct jffs2_unknown_node)-4)) { |
214 | printk(KERN_CRIT "Eep. CRC not correct in jffs2_write_dirent()\n"); | 222 | pr_crit("Eep. CRC not correct in jffs2_write_dirent()\n"); |
215 | BUG(); | 223 | BUG(); |
216 | }); | 224 | }); |
217 | 225 | ||
218 | if (strnlen(name, namelen) != namelen) { | 226 | if (strnlen(name, namelen) != namelen) { |
219 | /* This should never happen, but seems to have done on at least one | 227 | /* This should never happen, but seems to have done on at least one |
220 | occasion: https://dev.laptop.org/ticket/4184 */ | 228 | occasion: https://dev.laptop.org/ticket/4184 */ |
221 | printk(KERN_CRIT "Error in jffs2_write_dirent() -- name contains zero bytes!\n"); | 229 | pr_crit("Error in jffs2_write_dirent() -- name contains zero bytes!\n"); |
222 | printk(KERN_CRIT "Directory inode #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x\n", | 230 | pr_crit("Directory inode #%u, name at *0x%p \"%s\"->ino #%u, name_crc 0x%08x\n", |
223 | je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino), | 231 | je32_to_cpu(rd->pino), name, name, je32_to_cpu(rd->ino), |
224 | je32_to_cpu(rd->name_crc)); | 232 | je32_to_cpu(rd->name_crc)); |
225 | WARN_ON(1); | 233 | WARN_ON(1); |
226 | return ERR_PTR(-EIO); | 234 | return ERR_PTR(-EIO); |
227 | } | 235 | } |
@@ -249,9 +257,9 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
249 | 257 | ||
250 | if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(rd->version) < f->highest_version)) { | 258 | if ((alloc_mode!=ALLOC_GC) && (je32_to_cpu(rd->version) < f->highest_version)) { |
251 | BUG_ON(!retried); | 259 | BUG_ON(!retried); |
252 | D1(printk(KERN_DEBUG "jffs2_write_dirent : dirent_version %d, " | 260 | jffs2_dbg(1, "%s(): dirent_version %d, highest version %d -> updating dirent\n", |
253 | "highest version %d -> updating dirent\n", | 261 | __func__, |
254 | je32_to_cpu(rd->version), f->highest_version)); | 262 | je32_to_cpu(rd->version), f->highest_version); |
255 | rd->version = cpu_to_je32(++f->highest_version); | 263 | rd->version = cpu_to_je32(++f->highest_version); |
256 | fd->version = je32_to_cpu(rd->version); | 264 | fd->version = je32_to_cpu(rd->version); |
257 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); | 265 | rd->node_crc = cpu_to_je32(crc32(0, rd, sizeof(*rd)-8)); |
@@ -260,13 +268,14 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
260 | ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen, | 268 | ret = jffs2_flash_writev(c, vecs, 2, flash_ofs, &retlen, |
261 | (alloc_mode==ALLOC_GC)?0:je32_to_cpu(rd->pino)); | 269 | (alloc_mode==ALLOC_GC)?0:je32_to_cpu(rd->pino)); |
262 | if (ret || (retlen != sizeof(*rd) + namelen)) { | 270 | if (ret || (retlen != sizeof(*rd) + namelen)) { |
263 | printk(KERN_NOTICE "Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", | 271 | pr_notice("Write of %zd bytes at 0x%08x failed. returned %d, retlen %zd\n", |
264 | sizeof(*rd)+namelen, flash_ofs, ret, retlen); | 272 | sizeof(*rd) + namelen, flash_ofs, ret, retlen); |
265 | /* Mark the space as dirtied */ | 273 | /* Mark the space as dirtied */ |
266 | if (retlen) { | 274 | if (retlen) { |
267 | jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*rd)+namelen), NULL); | 275 | jffs2_add_physical_node_ref(c, flash_ofs | REF_OBSOLETE, PAD(sizeof(*rd)+namelen), NULL); |
268 | } else { | 276 | } else { |
269 | printk(KERN_NOTICE "Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", flash_ofs); | 277 | pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n", |
278 | flash_ofs); | ||
270 | } | 279 | } |
271 | if (!retried) { | 280 | if (!retried) { |
272 | /* Try to reallocate space and retry */ | 281 | /* Try to reallocate space and retry */ |
@@ -275,7 +284,7 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
275 | 284 | ||
276 | retried = 1; | 285 | retried = 1; |
277 | 286 | ||
278 | D1(printk(KERN_DEBUG "Retrying failed write.\n")); | 287 | jffs2_dbg(1, "Retrying failed write.\n"); |
279 | 288 | ||
280 | jffs2_dbg_acct_sanity_check(c,jeb); | 289 | jffs2_dbg_acct_sanity_check(c,jeb); |
281 | jffs2_dbg_acct_paranoia_check(c, jeb); | 290 | jffs2_dbg_acct_paranoia_check(c, jeb); |
@@ -295,12 +304,14 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff | |||
295 | 304 | ||
296 | if (!ret) { | 305 | if (!ret) { |
297 | flash_ofs = write_ofs(c); | 306 | flash_ofs = write_ofs(c); |
298 | D1(printk(KERN_DEBUG "Allocated space at 0x%08x to retry failed write.\n", flash_ofs)); | 307 | jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write\n", |
308 | flash_ofs); | ||
299 | jffs2_dbg_acct_sanity_check(c,jeb); | 309 | jffs2_dbg_acct_sanity_check(c,jeb); |
300 | jffs2_dbg_acct_paranoia_check(c, jeb); | 310 | jffs2_dbg_acct_paranoia_check(c, jeb); |
301 | goto retry; | 311 | goto retry; |
302 | } | 312 | } |
303 | D1(printk(KERN_DEBUG "Failed to allocate space to retry failed write: %d!\n", ret)); | 313 | jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n", |
314 | ret); | ||
304 | } | 315 | } |
305 | /* Release the full_dnode which is now useless, and return */ | 316 | /* Release the full_dnode which is now useless, and return */ |
306 | jffs2_free_full_dirent(fd); | 317 | jffs2_free_full_dirent(fd); |
@@ -333,8 +344,8 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
333 | int ret = 0; | 344 | int ret = 0; |
334 | uint32_t writtenlen = 0; | 345 | uint32_t writtenlen = 0; |
335 | 346 | ||
336 | D1(printk(KERN_DEBUG "jffs2_write_inode_range(): Ino #%u, ofs 0x%x, len 0x%x\n", | 347 | jffs2_dbg(1, "%s(): Ino #%u, ofs 0x%x, len 0x%x\n", |
337 | f->inocache->ino, offset, writelen)); | 348 | __func__, f->inocache->ino, offset, writelen); |
338 | 349 | ||
339 | while(writelen) { | 350 | while(writelen) { |
340 | struct jffs2_full_dnode *fn; | 351 | struct jffs2_full_dnode *fn; |
@@ -345,12 +356,13 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
345 | int retried = 0; | 356 | int retried = 0; |
346 | 357 | ||
347 | retry: | 358 | retry: |
348 | D2(printk(KERN_DEBUG "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", writelen, offset)); | 359 | jffs2_dbg(2, "jffs2_commit_write() loop: 0x%x to write to 0x%x\n", |
360 | writelen, offset); | ||
349 | 361 | ||
350 | ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, | 362 | ret = jffs2_reserve_space(c, sizeof(*ri) + JFFS2_MIN_DATA_LEN, |
351 | &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); | 363 | &alloclen, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); |
352 | if (ret) { | 364 | if (ret) { |
353 | D1(printk(KERN_DEBUG "jffs2_reserve_space returned %d\n", ret)); | 365 | jffs2_dbg(1, "jffs2_reserve_space returned %d\n", ret); |
354 | break; | 366 | break; |
355 | } | 367 | } |
356 | mutex_lock(&f->sem); | 368 | mutex_lock(&f->sem); |
@@ -386,7 +398,7 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
386 | if (!retried) { | 398 | if (!retried) { |
387 | /* Write error to be retried */ | 399 | /* Write error to be retried */ |
388 | retried = 1; | 400 | retried = 1; |
389 | D1(printk(KERN_DEBUG "Retrying node write in jffs2_write_inode_range()\n")); | 401 | jffs2_dbg(1, "Retrying node write in jffs2_write_inode_range()\n"); |
390 | goto retry; | 402 | goto retry; |
391 | } | 403 | } |
392 | break; | 404 | break; |
@@ -399,7 +411,8 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
399 | } | 411 | } |
400 | if (ret) { | 412 | if (ret) { |
401 | /* Eep */ | 413 | /* Eep */ |
402 | D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n", ret)); | 414 | jffs2_dbg(1, "Eep. add_full_dnode_to_inode() failed in commit_write, returned %d\n", |
415 | ret); | ||
403 | jffs2_mark_node_obsolete(c, fn->raw); | 416 | jffs2_mark_node_obsolete(c, fn->raw); |
404 | jffs2_free_full_dnode(fn); | 417 | jffs2_free_full_dnode(fn); |
405 | 418 | ||
@@ -410,11 +423,11 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, | |||
410 | mutex_unlock(&f->sem); | 423 | mutex_unlock(&f->sem); |
411 | jffs2_complete_reservation(c); | 424 | jffs2_complete_reservation(c); |
412 | if (!datalen) { | 425 | if (!datalen) { |
413 | printk(KERN_WARNING "Eep. We didn't actually write any data in jffs2_write_inode_range()\n"); | 426 | pr_warn("Eep. We didn't actually write any data in jffs2_write_inode_range()\n"); |
414 | ret = -EIO; | 427 | ret = -EIO; |
415 | break; | 428 | break; |
416 | } | 429 | } |
417 | D1(printk(KERN_DEBUG "increasing writtenlen by %d\n", datalen)); | 430 | jffs2_dbg(1, "increasing writtenlen by %d\n", datalen); |
418 | writtenlen += datalen; | 431 | writtenlen += datalen; |
419 | offset += datalen; | 432 | offset += datalen; |
420 | writelen -= datalen; | 433 | writelen -= datalen; |
@@ -439,7 +452,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
439 | */ | 452 | */ |
440 | ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL, | 453 | ret = jffs2_reserve_space(c, sizeof(*ri), &alloclen, ALLOC_NORMAL, |
441 | JFFS2_SUMMARY_INODE_SIZE); | 454 | JFFS2_SUMMARY_INODE_SIZE); |
442 | D1(printk(KERN_DEBUG "jffs2_do_create(): reserved 0x%x bytes\n", alloclen)); | 455 | jffs2_dbg(1, "%s(): reserved 0x%x bytes\n", __func__, alloclen); |
443 | if (ret) | 456 | if (ret) |
444 | return ret; | 457 | return ret; |
445 | 458 | ||
@@ -450,11 +463,11 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
450 | 463 | ||
451 | fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL); | 464 | fn = jffs2_write_dnode(c, f, ri, NULL, 0, ALLOC_NORMAL); |
452 | 465 | ||
453 | D1(printk(KERN_DEBUG "jffs2_do_create created file with mode 0x%x\n", | 466 | jffs2_dbg(1, "jffs2_do_create created file with mode 0x%x\n", |
454 | jemode_to_cpu(ri->mode))); | 467 | jemode_to_cpu(ri->mode)); |
455 | 468 | ||
456 | if (IS_ERR(fn)) { | 469 | if (IS_ERR(fn)) { |
457 | D1(printk(KERN_DEBUG "jffs2_write_dnode() failed\n")); | 470 | jffs2_dbg(1, "jffs2_write_dnode() failed\n"); |
458 | /* Eeek. Wave bye bye */ | 471 | /* Eeek. Wave bye bye */ |
459 | mutex_unlock(&f->sem); | 472 | mutex_unlock(&f->sem); |
460 | jffs2_complete_reservation(c); | 473 | jffs2_complete_reservation(c); |
@@ -480,7 +493,7 @@ int jffs2_do_create(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
480 | 493 | ||
481 | if (ret) { | 494 | if (ret) { |
482 | /* Eep. */ | 495 | /* Eep. */ |
483 | D1(printk(KERN_DEBUG "jffs2_reserve_space() for dirent failed\n")); | 496 | jffs2_dbg(1, "jffs2_reserve_space() for dirent failed\n"); |
484 | return ret; | 497 | return ret; |
485 | } | 498 | } |
486 | 499 | ||
@@ -597,8 +610,8 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
597 | !memcmp(fd->name, name, namelen) && | 610 | !memcmp(fd->name, name, namelen) && |
598 | !fd->name[namelen]) { | 611 | !fd->name[namelen]) { |
599 | 612 | ||
600 | D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n", | 613 | jffs2_dbg(1, "Marking old dirent node (ino #%u) @%08x obsolete\n", |
601 | fd->ino, ref_offset(fd->raw))); | 614 | fd->ino, ref_offset(fd->raw)); |
602 | jffs2_mark_node_obsolete(c, fd->raw); | 615 | jffs2_mark_node_obsolete(c, fd->raw); |
603 | /* We don't want to remove it from the list immediately, | 616 | /* We don't want to remove it from the list immediately, |
604 | because that screws up getdents()/seek() semantics even | 617 | because that screws up getdents()/seek() semantics even |
@@ -627,11 +640,13 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f, | |||
627 | dead_f->dents = fd->next; | 640 | dead_f->dents = fd->next; |
628 | 641 | ||
629 | if (fd->ino) { | 642 | if (fd->ino) { |
630 | printk(KERN_WARNING "Deleting inode #%u with active dentry \"%s\"->ino #%u\n", | 643 | pr_warn("Deleting inode #%u with active dentry \"%s\"->ino #%u\n", |
631 | dead_f->inocache->ino, fd->name, fd->ino); | 644 | dead_f->inocache->ino, |
645 | fd->name, fd->ino); | ||
632 | } else { | 646 | } else { |
633 | D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n", | 647 | jffs2_dbg(1, "Removing deletion dirent for \"%s\" from dir ino #%u\n", |
634 | fd->name, dead_f->inocache->ino)); | 648 | fd->name, |
649 | dead_f->inocache->ino); | ||
635 | } | 650 | } |
636 | if (fd->raw) | 651 | if (fd->raw) |
637 | jffs2_mark_node_obsolete(c, fd->raw); | 652 | jffs2_mark_node_obsolete(c, fd->raw); |
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c index 3e93cdd19005..b55b803eddcb 100644 --- a/fs/jffs2/xattr.c +++ b/fs/jffs2/xattr.c | |||
@@ -9,6 +9,8 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
13 | |||
12 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
13 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
14 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c index 71e2b4d50a0a..f86f51f99ace 100644 --- a/fs/romfs/storage.c +++ b/fs/romfs/storage.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #endif | 19 | #endif |
20 | 20 | ||
21 | #ifdef CONFIG_ROMFS_ON_MTD | 21 | #ifdef CONFIG_ROMFS_ON_MTD |
22 | #define ROMFS_MTD_READ(sb, ...) ((sb)->s_mtd->read((sb)->s_mtd, ##__VA_ARGS__)) | 22 | #define ROMFS_MTD_READ(sb, ...) mtd_read((sb)->s_mtd, ##__VA_ARGS__) |
23 | 23 | ||
24 | /* | 24 | /* |
25 | * read data from an romfs image on an MTD device | 25 | * read data from an romfs image on an MTD device |
diff --git a/arch/arm/mach-mxs/include/mach/dma.h b/include/linux/fsl/mxs-dma.h index 203d7c4a3e11..203d7c4a3e11 100644 --- a/arch/arm/mach-mxs/include/mach/dma.h +++ b/include/linux/fsl/mxs-dma.h | |||
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h index c4eec228eef9..650ef352f045 100644 --- a/include/linux/mtd/bbm.h +++ b/include/linux/mtd/bbm.h | |||
@@ -112,6 +112,11 @@ struct nand_bbt_descr { | |||
112 | #define NAND_BBT_USE_FLASH 0x00020000 | 112 | #define NAND_BBT_USE_FLASH 0x00020000 |
113 | /* Do not store flash based bad block table in OOB area; store it in-band */ | 113 | /* Do not store flash based bad block table in OOB area; store it in-band */ |
114 | #define NAND_BBT_NO_OOB 0x00040000 | 114 | #define NAND_BBT_NO_OOB 0x00040000 |
115 | /* | ||
116 | * Do not write new bad block markers to OOB; useful, e.g., when ECC covers | ||
117 | * entire spare area. Must be used with NAND_BBT_USE_FLASH. | ||
118 | */ | ||
119 | #define NAND_BBT_NO_OOB_BBM 0x00080000 | ||
115 | 120 | ||
116 | /* | 121 | /* |
117 | * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr | 122 | * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr |
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h index 1bbd9f289245..ed270bd2e4df 100644 --- a/include/linux/mtd/blktrans.h +++ b/include/linux/mtd/blktrans.h | |||
@@ -47,6 +47,7 @@ struct mtd_blktrans_dev { | |||
47 | struct request_queue *rq; | 47 | struct request_queue *rq; |
48 | spinlock_t queue_lock; | 48 | spinlock_t queue_lock; |
49 | void *priv; | 49 | void *priv; |
50 | fmode_t file_mode; | ||
50 | }; | 51 | }; |
51 | 52 | ||
52 | struct mtd_blktrans_ops { | 53 | struct mtd_blktrans_ops { |
diff --git a/include/linux/mtd/fsmc.h b/include/linux/mtd/fsmc.h index 6987995ad3cf..b20029221fb1 100644 --- a/include/linux/mtd/fsmc.h +++ b/include/linux/mtd/fsmc.h | |||
@@ -26,95 +26,83 @@ | |||
26 | #define FSMC_NAND_BW8 1 | 26 | #define FSMC_NAND_BW8 1 |
27 | #define FSMC_NAND_BW16 2 | 27 | #define FSMC_NAND_BW16 2 |
28 | 28 | ||
29 | /* | ||
30 | * The placement of the Command Latch Enable (CLE) and | ||
31 | * Address Latch Enable (ALE) is twisted around in the | ||
32 | * SPEAR310 implementation. | ||
33 | */ | ||
34 | #if defined(CONFIG_MACH_SPEAR310) | ||
35 | #define PLAT_NAND_CLE (1 << 17) | ||
36 | #define PLAT_NAND_ALE (1 << 16) | ||
37 | #else | ||
38 | #define PLAT_NAND_CLE (1 << 16) | ||
39 | #define PLAT_NAND_ALE (1 << 17) | ||
40 | #endif | ||
41 | |||
42 | #define FSMC_MAX_NOR_BANKS 4 | 29 | #define FSMC_MAX_NOR_BANKS 4 |
43 | #define FSMC_MAX_NAND_BANKS 4 | 30 | #define FSMC_MAX_NAND_BANKS 4 |
44 | 31 | ||
45 | #define FSMC_FLASH_WIDTH8 1 | 32 | #define FSMC_FLASH_WIDTH8 1 |
46 | #define FSMC_FLASH_WIDTH16 2 | 33 | #define FSMC_FLASH_WIDTH16 2 |
47 | 34 | ||
48 | struct fsmc_nor_bank_regs { | 35 | /* fsmc controller registers for NOR flash */ |
49 | uint32_t ctrl; | 36 | #define CTRL 0x0 |
50 | uint32_t ctrl_tim; | 37 | /* ctrl register definitions */ |
51 | }; | 38 | #define BANK_ENABLE (1 << 0) |
52 | 39 | #define MUXED (1 << 1) | |
53 | /* ctrl register definitions */ | 40 | #define NOR_DEV (2 << 2) |
54 | #define BANK_ENABLE (1 << 0) | 41 | #define WIDTH_8 (0 << 4) |
55 | #define MUXED (1 << 1) | 42 | #define WIDTH_16 (1 << 4) |
56 | #define NOR_DEV (2 << 2) | 43 | #define RSTPWRDWN (1 << 6) |
57 | #define WIDTH_8 (0 << 4) | 44 | #define WPROT (1 << 7) |
58 | #define WIDTH_16 (1 << 4) | 45 | #define WRT_ENABLE (1 << 12) |
59 | #define RSTPWRDWN (1 << 6) | 46 | #define WAIT_ENB (1 << 13) |
60 | #define WPROT (1 << 7) | 47 | |
61 | #define WRT_ENABLE (1 << 12) | 48 | #define CTRL_TIM 0x4 |
62 | #define WAIT_ENB (1 << 13) | 49 | /* ctrl_tim register definitions */ |
63 | 50 | ||
64 | /* ctrl_tim register definitions */ | 51 | #define FSMC_NOR_BANK_SZ 0x8 |
65 | |||
66 | struct fsmc_nand_bank_regs { | ||
67 | uint32_t pc; | ||
68 | uint32_t sts; | ||
69 | uint32_t comm; | ||
70 | uint32_t attrib; | ||
71 | uint32_t ioata; | ||
72 | uint32_t ecc1; | ||
73 | uint32_t ecc2; | ||
74 | uint32_t ecc3; | ||
75 | }; | ||
76 | |||
77 | #define FSMC_NOR_REG_SIZE 0x40 | 52 | #define FSMC_NOR_REG_SIZE 0x40 |
78 | 53 | ||
79 | struct fsmc_regs { | 54 | #define FSMC_NOR_REG(base, bank, reg) (base + \ |
80 | struct fsmc_nor_bank_regs nor_bank_regs[FSMC_MAX_NOR_BANKS]; | 55 | FSMC_NOR_BANK_SZ * (bank) + \ |
81 | uint8_t reserved_1[0x40 - 0x20]; | 56 | reg) |
82 | struct fsmc_nand_bank_regs bank_regs[FSMC_MAX_NAND_BANKS]; | 57 | |
83 | uint8_t reserved_2[0xfe0 - 0xc0]; | 58 | /* fsmc controller registers for NAND flash */ |
84 | uint32_t peripid0; /* 0xfe0 */ | 59 | #define PC 0x00 |
85 | uint32_t peripid1; /* 0xfe4 */ | 60 | /* pc register definitions */ |
86 | uint32_t peripid2; /* 0xfe8 */ | 61 | #define FSMC_RESET (1 << 0) |
87 | uint32_t peripid3; /* 0xfec */ | 62 | #define FSMC_WAITON (1 << 1) |
88 | uint32_t pcellid0; /* 0xff0 */ | 63 | #define FSMC_ENABLE (1 << 2) |
89 | uint32_t pcellid1; /* 0xff4 */ | 64 | #define FSMC_DEVTYPE_NAND (1 << 3) |
90 | uint32_t pcellid2; /* 0xff8 */ | 65 | #define FSMC_DEVWID_8 (0 << 4) |
91 | uint32_t pcellid3; /* 0xffc */ | 66 | #define FSMC_DEVWID_16 (1 << 4) |
92 | }; | 67 | #define FSMC_ECCEN (1 << 6) |
68 | #define FSMC_ECCPLEN_512 (0 << 7) | ||
69 | #define FSMC_ECCPLEN_256 (1 << 7) | ||
70 | #define FSMC_TCLR_1 (1) | ||
71 | #define FSMC_TCLR_SHIFT (9) | ||
72 | #define FSMC_TCLR_MASK (0xF) | ||
73 | #define FSMC_TAR_1 (1) | ||
74 | #define FSMC_TAR_SHIFT (13) | ||
75 | #define FSMC_TAR_MASK (0xF) | ||
76 | #define STS 0x04 | ||
77 | /* sts register definitions */ | ||
78 | #define FSMC_CODE_RDY (1 << 15) | ||
79 | #define COMM 0x08 | ||
80 | /* comm register definitions */ | ||
81 | #define FSMC_TSET_0 0 | ||
82 | #define FSMC_TSET_SHIFT 0 | ||
83 | #define FSMC_TSET_MASK 0xFF | ||
84 | #define FSMC_TWAIT_6 6 | ||
85 | #define FSMC_TWAIT_SHIFT 8 | ||
86 | #define FSMC_TWAIT_MASK 0xFF | ||
87 | #define FSMC_THOLD_4 4 | ||
88 | #define FSMC_THOLD_SHIFT 16 | ||
89 | #define FSMC_THOLD_MASK 0xFF | ||
90 | #define FSMC_THIZ_1 1 | ||
91 | #define FSMC_THIZ_SHIFT 24 | ||
92 | #define FSMC_THIZ_MASK 0xFF | ||
93 | #define ATTRIB 0x0C | ||
94 | #define IOATA 0x10 | ||
95 | #define ECC1 0x14 | ||
96 | #define ECC2 0x18 | ||
97 | #define ECC3 0x1C | ||
98 | #define FSMC_NAND_BANK_SZ 0x20 | ||
99 | |||
100 | #define FSMC_NAND_REG(base, bank, reg) (base + FSMC_NOR_REG_SIZE + \ | ||
101 | (FSMC_NAND_BANK_SZ * (bank)) + \ | ||
102 | reg) | ||
93 | 103 | ||
94 | #define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ) | 104 | #define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ) |
95 | 105 | ||
96 | /* pc register definitions */ | ||
97 | #define FSMC_RESET (1 << 0) | ||
98 | #define FSMC_WAITON (1 << 1) | ||
99 | #define FSMC_ENABLE (1 << 2) | ||
100 | #define FSMC_DEVTYPE_NAND (1 << 3) | ||
101 | #define FSMC_DEVWID_8 (0 << 4) | ||
102 | #define FSMC_DEVWID_16 (1 << 4) | ||
103 | #define FSMC_ECCEN (1 << 6) | ||
104 | #define FSMC_ECCPLEN_512 (0 << 7) | ||
105 | #define FSMC_ECCPLEN_256 (1 << 7) | ||
106 | #define FSMC_TCLR_1 (1 << 9) | ||
107 | #define FSMC_TAR_1 (1 << 13) | ||
108 | |||
109 | /* sts register definitions */ | ||
110 | #define FSMC_CODE_RDY (1 << 15) | ||
111 | |||
112 | /* comm register definitions */ | ||
113 | #define FSMC_TSET_0 (0 << 0) | ||
114 | #define FSMC_TWAIT_6 (6 << 8) | ||
115 | #define FSMC_THOLD_4 (4 << 16) | ||
116 | #define FSMC_THIZ_1 (1 << 24) | ||
117 | |||
118 | /* | 106 | /* |
119 | * There are 13 bytes of ecc for every 512 byte block in FSMC version 8 | 107 | * There are 13 bytes of ecc for every 512 byte block in FSMC version 8 |
120 | * and it has to be read consecutively and immediately after the 512 | 108 | * and it has to be read consecutively and immediately after the 512 |
@@ -133,6 +121,20 @@ struct fsmc_eccplace { | |||
133 | struct fsmc_nand_eccplace eccplace[MAX_ECCPLACE_ENTRIES]; | 121 | struct fsmc_nand_eccplace eccplace[MAX_ECCPLACE_ENTRIES]; |
134 | }; | 122 | }; |
135 | 123 | ||
124 | struct fsmc_nand_timings { | ||
125 | uint8_t tclr; | ||
126 | uint8_t tar; | ||
127 | uint8_t thiz; | ||
128 | uint8_t thold; | ||
129 | uint8_t twait; | ||
130 | uint8_t tset; | ||
131 | }; | ||
132 | |||
133 | enum access_mode { | ||
134 | USE_DMA_ACCESS = 1, | ||
135 | USE_WORD_ACCESS, | ||
136 | }; | ||
137 | |||
136 | /** | 138 | /** |
137 | * fsmc_nand_platform_data - platform specific NAND controller config | 139 | * fsmc_nand_platform_data - platform specific NAND controller config |
138 | * @partitions: partition table for the platform, use a default fallback | 140 | * @partitions: partition table for the platform, use a default fallback |
@@ -146,12 +148,23 @@ struct fsmc_eccplace { | |||
146 | * this may be set to NULL | 148 | * this may be set to NULL |
147 | */ | 149 | */ |
148 | struct fsmc_nand_platform_data { | 150 | struct fsmc_nand_platform_data { |
151 | struct fsmc_nand_timings *nand_timings; | ||
149 | struct mtd_partition *partitions; | 152 | struct mtd_partition *partitions; |
150 | unsigned int nr_partitions; | 153 | unsigned int nr_partitions; |
151 | unsigned int options; | 154 | unsigned int options; |
152 | unsigned int width; | 155 | unsigned int width; |
153 | unsigned int bank; | 156 | unsigned int bank; |
157 | |||
158 | /* CLE, ALE offsets */ | ||
159 | unsigned int cle_off; | ||
160 | unsigned int ale_off; | ||
161 | enum access_mode mode; | ||
162 | |||
154 | void (*select_bank)(uint32_t bank, uint32_t busw); | 163 | void (*select_bank)(uint32_t bank, uint32_t busw); |
164 | |||
165 | /* priv structures for dma accesses */ | ||
166 | void *read_dma_priv; | ||
167 | void *write_dma_priv; | ||
155 | }; | 168 | }; |
156 | 169 | ||
157 | extern int __init fsmc_nor_init(struct platform_device *pdev, | 170 | extern int __init fsmc_nor_init(struct platform_device *pdev, |
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index d43dc25af82e..cf5ea8cdcf8e 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h | |||
@@ -164,6 +164,9 @@ struct mtd_info { | |||
164 | /* ECC layout structure pointer - read only! */ | 164 | /* ECC layout structure pointer - read only! */ |
165 | struct nand_ecclayout *ecclayout; | 165 | struct nand_ecclayout *ecclayout; |
166 | 166 | ||
167 | /* max number of correctible bit errors per writesize */ | ||
168 | unsigned int ecc_strength; | ||
169 | |||
167 | /* Data for variable erase regions. If numeraseregions is zero, | 170 | /* Data for variable erase regions. If numeraseregions is zero, |
168 | * it means that the whole device has erasesize as given above. | 171 | * it means that the whole device has erasesize as given above. |
169 | */ | 172 | */ |
@@ -174,52 +177,52 @@ struct mtd_info { | |||
174 | * Do not call via these pointers, use corresponding mtd_*() | 177 | * Do not call via these pointers, use corresponding mtd_*() |
175 | * wrappers instead. | 178 | * wrappers instead. |
176 | */ | 179 | */ |
177 | int (*erase) (struct mtd_info *mtd, struct erase_info *instr); | 180 | int (*_erase) (struct mtd_info *mtd, struct erase_info *instr); |
178 | int (*point) (struct mtd_info *mtd, loff_t from, size_t len, | 181 | int (*_point) (struct mtd_info *mtd, loff_t from, size_t len, |
179 | size_t *retlen, void **virt, resource_size_t *phys); | 182 | size_t *retlen, void **virt, resource_size_t *phys); |
180 | void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len); | 183 | int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len); |
181 | unsigned long (*get_unmapped_area) (struct mtd_info *mtd, | 184 | unsigned long (*_get_unmapped_area) (struct mtd_info *mtd, |
182 | unsigned long len, | 185 | unsigned long len, |
183 | unsigned long offset, | 186 | unsigned long offset, |
184 | unsigned long flags); | 187 | unsigned long flags); |
185 | int (*read) (struct mtd_info *mtd, loff_t from, size_t len, | 188 | int (*_read) (struct mtd_info *mtd, loff_t from, size_t len, |
186 | size_t *retlen, u_char *buf); | 189 | size_t *retlen, u_char *buf); |
187 | int (*write) (struct mtd_info *mtd, loff_t to, size_t len, | 190 | int (*_write) (struct mtd_info *mtd, loff_t to, size_t len, |
188 | size_t *retlen, const u_char *buf); | 191 | size_t *retlen, const u_char *buf); |
189 | int (*panic_write) (struct mtd_info *mtd, loff_t to, size_t len, | 192 | int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len, |
190 | size_t *retlen, const u_char *buf); | 193 | size_t *retlen, const u_char *buf); |
191 | int (*read_oob) (struct mtd_info *mtd, loff_t from, | 194 | int (*_read_oob) (struct mtd_info *mtd, loff_t from, |
192 | struct mtd_oob_ops *ops); | ||
193 | int (*write_oob) (struct mtd_info *mtd, loff_t to, | ||
194 | struct mtd_oob_ops *ops); | 195 | struct mtd_oob_ops *ops); |
195 | int (*get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf, | 196 | int (*_write_oob) (struct mtd_info *mtd, loff_t to, |
196 | size_t len); | 197 | struct mtd_oob_ops *ops); |
197 | int (*read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, | 198 | int (*_get_fact_prot_info) (struct mtd_info *mtd, struct otp_info *buf, |
198 | size_t len, size_t *retlen, u_char *buf); | 199 | size_t len); |
199 | int (*get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf, | 200 | int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, |
200 | size_t len); | 201 | size_t len, size_t *retlen, u_char *buf); |
201 | int (*read_user_prot_reg) (struct mtd_info *mtd, loff_t from, | 202 | int (*_get_user_prot_info) (struct mtd_info *mtd, struct otp_info *buf, |
202 | size_t len, size_t *retlen, u_char *buf); | 203 | size_t len); |
203 | int (*write_user_prot_reg) (struct mtd_info *mtd, loff_t to, size_t len, | 204 | int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from, |
204 | size_t *retlen, u_char *buf); | 205 | size_t len, size_t *retlen, u_char *buf); |
205 | int (*lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, | 206 | int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to, |
206 | size_t len); | 207 | size_t len, size_t *retlen, u_char *buf); |
207 | int (*writev) (struct mtd_info *mtd, const struct kvec *vecs, | 208 | int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, |
209 | size_t len); | ||
210 | int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs, | ||
208 | unsigned long count, loff_t to, size_t *retlen); | 211 | unsigned long count, loff_t to, size_t *retlen); |
209 | void (*sync) (struct mtd_info *mtd); | 212 | void (*_sync) (struct mtd_info *mtd); |
210 | int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); | 213 | int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); |
211 | int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); | 214 | int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); |
212 | int (*is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len); | 215 | int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len); |
213 | int (*block_isbad) (struct mtd_info *mtd, loff_t ofs); | 216 | int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs); |
214 | int (*block_markbad) (struct mtd_info *mtd, loff_t ofs); | 217 | int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs); |
215 | int (*suspend) (struct mtd_info *mtd); | 218 | int (*_suspend) (struct mtd_info *mtd); |
216 | void (*resume) (struct mtd_info *mtd); | 219 | void (*_resume) (struct mtd_info *mtd); |
217 | /* | 220 | /* |
218 | * If the driver is something smart, like UBI, it may need to maintain | 221 | * If the driver is something smart, like UBI, it may need to maintain |
219 | * its own reference counting. The below functions are only for driver. | 222 | * its own reference counting. The below functions are only for driver. |
220 | */ | 223 | */ |
221 | int (*get_device) (struct mtd_info *mtd); | 224 | int (*_get_device) (struct mtd_info *mtd); |
222 | void (*put_device) (struct mtd_info *mtd); | 225 | void (*_put_device) (struct mtd_info *mtd); |
223 | 226 | ||
224 | /* Backing device capabilities for this device | 227 | /* Backing device capabilities for this device |
225 | * - provides mmap capabilities | 228 | * - provides mmap capabilities |
@@ -240,214 +243,75 @@ struct mtd_info { | |||
240 | int usecount; | 243 | int usecount; |
241 | }; | 244 | }; |
242 | 245 | ||
243 | /* | 246 | int mtd_erase(struct mtd_info *mtd, struct erase_info *instr); |
244 | * Erase is an asynchronous operation. Device drivers are supposed | 247 | int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, |
245 | * to call instr->callback() whenever the operation completes, even | 248 | void **virt, resource_size_t *phys); |
246 | * if it completes with a failure. | 249 | int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len); |
247 | * Callers are supposed to pass a callback function and wait for it | 250 | unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, |
248 | * to be called before writing to the block. | 251 | unsigned long offset, unsigned long flags); |
249 | */ | 252 | int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, |
250 | static inline int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) | 253 | u_char *buf); |
251 | { | 254 | int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, |
252 | return mtd->erase(mtd, instr); | 255 | const u_char *buf); |
253 | } | 256 | int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, |
254 | 257 | const u_char *buf); | |
255 | /* | ||
256 | * This stuff for eXecute-In-Place. phys is optional and may be set to NULL. | ||
257 | */ | ||
258 | static inline int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, | ||
259 | size_t *retlen, void **virt, resource_size_t *phys) | ||
260 | { | ||
261 | *retlen = 0; | ||
262 | if (!mtd->point) | ||
263 | return -EOPNOTSUPP; | ||
264 | return mtd->point(mtd, from, len, retlen, virt, phys); | ||
265 | } | ||
266 | |||
267 | /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ | ||
268 | static inline void mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) | ||
269 | { | ||
270 | return mtd->unpoint(mtd, from, len); | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * Allow NOMMU mmap() to directly map the device (if not NULL) | ||
275 | * - return the address to which the offset maps | ||
276 | * - return -ENOSYS to indicate refusal to do the mapping | ||
277 | */ | ||
278 | static inline unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, | ||
279 | unsigned long len, | ||
280 | unsigned long offset, | ||
281 | unsigned long flags) | ||
282 | { | ||
283 | if (!mtd->get_unmapped_area) | ||
284 | return -EOPNOTSUPP; | ||
285 | return mtd->get_unmapped_area(mtd, len, offset, flags); | ||
286 | } | ||
287 | |||
288 | static inline int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, | ||
289 | size_t *retlen, u_char *buf) | ||
290 | { | ||
291 | return mtd->read(mtd, from, len, retlen, buf); | ||
292 | } | ||
293 | |||
294 | static inline int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, | ||
295 | size_t *retlen, const u_char *buf) | ||
296 | { | ||
297 | *retlen = 0; | ||
298 | if (!mtd->write) | ||
299 | return -EROFS; | ||
300 | return mtd->write(mtd, to, len, retlen, buf); | ||
301 | } | ||
302 | |||
303 | /* | ||
304 | * In blackbox flight recorder like scenarios we want to make successful writes | ||
305 | * in interrupt context. panic_write() is only intended to be called when its | ||
306 | * known the kernel is about to panic and we need the write to succeed. Since | ||
307 | * the kernel is not going to be running for much longer, this function can | ||
308 | * break locks and delay to ensure the write succeeds (but not sleep). | ||
309 | */ | ||
310 | static inline int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, | ||
311 | size_t *retlen, const u_char *buf) | ||
312 | { | ||
313 | *retlen = 0; | ||
314 | if (!mtd->panic_write) | ||
315 | return -EOPNOTSUPP; | ||
316 | return mtd->panic_write(mtd, to, len, retlen, buf); | ||
317 | } | ||
318 | 258 | ||
319 | static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from, | 259 | static inline int mtd_read_oob(struct mtd_info *mtd, loff_t from, |
320 | struct mtd_oob_ops *ops) | 260 | struct mtd_oob_ops *ops) |
321 | { | 261 | { |
322 | ops->retlen = ops->oobretlen = 0; | 262 | ops->retlen = ops->oobretlen = 0; |
323 | if (!mtd->read_oob) | 263 | if (!mtd->_read_oob) |
324 | return -EOPNOTSUPP; | 264 | return -EOPNOTSUPP; |
325 | return mtd->read_oob(mtd, from, ops); | 265 | return mtd->_read_oob(mtd, from, ops); |
326 | } | 266 | } |
327 | 267 | ||
328 | static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to, | 268 | static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to, |
329 | struct mtd_oob_ops *ops) | 269 | struct mtd_oob_ops *ops) |
330 | { | 270 | { |
331 | ops->retlen = ops->oobretlen = 0; | 271 | ops->retlen = ops->oobretlen = 0; |
332 | if (!mtd->write_oob) | 272 | if (!mtd->_write_oob) |
333 | return -EOPNOTSUPP; | ||
334 | return mtd->write_oob(mtd, to, ops); | ||
335 | } | ||
336 | |||
337 | /* | ||
338 | * Method to access the protection register area, present in some flash | ||
339 | * devices. The user data is one time programmable but the factory data is read | ||
340 | * only. | ||
341 | */ | ||
342 | static inline int mtd_get_fact_prot_info(struct mtd_info *mtd, | ||
343 | struct otp_info *buf, size_t len) | ||
344 | { | ||
345 | if (!mtd->get_fact_prot_info) | ||
346 | return -EOPNOTSUPP; | 273 | return -EOPNOTSUPP; |
347 | return mtd->get_fact_prot_info(mtd, buf, len); | 274 | if (!(mtd->flags & MTD_WRITEABLE)) |
348 | } | 275 | return -EROFS; |
349 | 276 | return mtd->_write_oob(mtd, to, ops); | |
350 | static inline int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, | ||
351 | size_t len, size_t *retlen, | ||
352 | u_char *buf) | ||
353 | { | ||
354 | *retlen = 0; | ||
355 | if (!mtd->read_fact_prot_reg) | ||
356 | return -EOPNOTSUPP; | ||
357 | return mtd->read_fact_prot_reg(mtd, from, len, retlen, buf); | ||
358 | } | ||
359 | |||
360 | static inline int mtd_get_user_prot_info(struct mtd_info *mtd, | ||
361 | struct otp_info *buf, | ||
362 | size_t len) | ||
363 | { | ||
364 | if (!mtd->get_user_prot_info) | ||
365 | return -EOPNOTSUPP; | ||
366 | return mtd->get_user_prot_info(mtd, buf, len); | ||
367 | } | ||
368 | |||
369 | static inline int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, | ||
370 | size_t len, size_t *retlen, | ||
371 | u_char *buf) | ||
372 | { | ||
373 | *retlen = 0; | ||
374 | if (!mtd->read_user_prot_reg) | ||
375 | return -EOPNOTSUPP; | ||
376 | return mtd->read_user_prot_reg(mtd, from, len, retlen, buf); | ||
377 | } | ||
378 | |||
379 | static inline int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, | ||
380 | size_t len, size_t *retlen, | ||
381 | u_char *buf) | ||
382 | { | ||
383 | *retlen = 0; | ||
384 | if (!mtd->write_user_prot_reg) | ||
385 | return -EOPNOTSUPP; | ||
386 | return mtd->write_user_prot_reg(mtd, to, len, retlen, buf); | ||
387 | } | 277 | } |
388 | 278 | ||
389 | static inline int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, | 279 | int mtd_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf, |
390 | size_t len) | 280 | size_t len); |
391 | { | 281 | int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, |
392 | if (!mtd->lock_user_prot_reg) | 282 | size_t *retlen, u_char *buf); |
393 | return -EOPNOTSUPP; | 283 | int mtd_get_user_prot_info(struct mtd_info *mtd, struct otp_info *buf, |
394 | return mtd->lock_user_prot_reg(mtd, from, len); | 284 | size_t len); |
395 | } | 285 | int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, |
286 | size_t *retlen, u_char *buf); | ||
287 | int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, | ||
288 | size_t *retlen, u_char *buf); | ||
289 | int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len); | ||
396 | 290 | ||
397 | int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, | 291 | int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, |
398 | unsigned long count, loff_t to, size_t *retlen); | 292 | unsigned long count, loff_t to, size_t *retlen); |
399 | 293 | ||
400 | static inline void mtd_sync(struct mtd_info *mtd) | 294 | static inline void mtd_sync(struct mtd_info *mtd) |
401 | { | 295 | { |
402 | if (mtd->sync) | 296 | if (mtd->_sync) |
403 | mtd->sync(mtd); | 297 | mtd->_sync(mtd); |
404 | } | ||
405 | |||
406 | /* Chip-supported device locking */ | ||
407 | static inline int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | ||
408 | { | ||
409 | if (!mtd->lock) | ||
410 | return -EOPNOTSUPP; | ||
411 | return mtd->lock(mtd, ofs, len); | ||
412 | } | 298 | } |
413 | 299 | ||
414 | static inline int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) | 300 | int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
415 | { | 301 | int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
416 | if (!mtd->unlock) | 302 | int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len); |
417 | return -EOPNOTSUPP; | 303 | int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs); |
418 | return mtd->unlock(mtd, ofs, len); | 304 | int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs); |
419 | } | ||
420 | |||
421 | static inline int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) | ||
422 | { | ||
423 | if (!mtd->is_locked) | ||
424 | return -EOPNOTSUPP; | ||
425 | return mtd->is_locked(mtd, ofs, len); | ||
426 | } | ||
427 | 305 | ||
428 | static inline int mtd_suspend(struct mtd_info *mtd) | 306 | static inline int mtd_suspend(struct mtd_info *mtd) |
429 | { | 307 | { |
430 | return mtd->suspend ? mtd->suspend(mtd) : 0; | 308 | return mtd->_suspend ? mtd->_suspend(mtd) : 0; |
431 | } | 309 | } |
432 | 310 | ||
433 | static inline void mtd_resume(struct mtd_info *mtd) | 311 | static inline void mtd_resume(struct mtd_info *mtd) |
434 | { | 312 | { |
435 | if (mtd->resume) | 313 | if (mtd->_resume) |
436 | mtd->resume(mtd); | 314 | mtd->_resume(mtd); |
437 | } | ||
438 | |||
439 | static inline int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) | ||
440 | { | ||
441 | if (!mtd->block_isbad) | ||
442 | return 0; | ||
443 | return mtd->block_isbad(mtd, ofs); | ||
444 | } | ||
445 | |||
446 | static inline int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) | ||
447 | { | ||
448 | if (!mtd->block_markbad) | ||
449 | return -EOPNOTSUPP; | ||
450 | return mtd->block_markbad(mtd, ofs); | ||
451 | } | 315 | } |
452 | 316 | ||
453 | static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) | 317 | static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) |
@@ -482,12 +346,12 @@ static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd) | |||
482 | 346 | ||
483 | static inline int mtd_has_oob(const struct mtd_info *mtd) | 347 | static inline int mtd_has_oob(const struct mtd_info *mtd) |
484 | { | 348 | { |
485 | return mtd->read_oob && mtd->write_oob; | 349 | return mtd->_read_oob && mtd->_write_oob; |
486 | } | 350 | } |
487 | 351 | ||
488 | static inline int mtd_can_have_bb(const struct mtd_info *mtd) | 352 | static inline int mtd_can_have_bb(const struct mtd_info *mtd) |
489 | { | 353 | { |
490 | return !!mtd->block_isbad; | 354 | return !!mtd->_block_isbad; |
491 | } | 355 | } |
492 | 356 | ||
493 | /* Kernel-side ioctl definitions */ | 357 | /* Kernel-side ioctl definitions */ |
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 63b5a8b6dfbd..1482340d3d9f 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h | |||
@@ -324,6 +324,7 @@ struct nand_hw_control { | |||
324 | * @steps: number of ECC steps per page | 324 | * @steps: number of ECC steps per page |
325 | * @size: data bytes per ECC step | 325 | * @size: data bytes per ECC step |
326 | * @bytes: ECC bytes per step | 326 | * @bytes: ECC bytes per step |
327 | * @strength: max number of correctible bits per ECC step | ||
327 | * @total: total number of ECC bytes per page | 328 | * @total: total number of ECC bytes per page |
328 | * @prepad: padding information for syndrome based ECC generators | 329 | * @prepad: padding information for syndrome based ECC generators |
329 | * @postpad: padding information for syndrome based ECC generators | 330 | * @postpad: padding information for syndrome based ECC generators |
@@ -351,6 +352,7 @@ struct nand_ecc_ctrl { | |||
351 | int size; | 352 | int size; |
352 | int bytes; | 353 | int bytes; |
353 | int total; | 354 | int total; |
355 | int strength; | ||
354 | int prepad; | 356 | int prepad; |
355 | int postpad; | 357 | int postpad; |
356 | struct nand_ecclayout *layout; | 358 | struct nand_ecclayout *layout; |
@@ -448,8 +450,9 @@ struct nand_buffers { | |||
448 | * will be copied to the appropriate nand_bbt_descr's. | 450 | * will be copied to the appropriate nand_bbt_descr's. |
449 | * @badblockpos: [INTERN] position of the bad block marker in the oob | 451 | * @badblockpos: [INTERN] position of the bad block marker in the oob |
450 | * area. | 452 | * area. |
451 | * @badblockbits: [INTERN] number of bits to left-shift the bad block | 453 | * @badblockbits: [INTERN] minimum number of set bits in a good block's |
452 | * number | 454 | * bad block marker position; i.e., BBM == 11110111b is |
455 | * not bad when badblockbits == 7 | ||
453 | * @cellinfo: [INTERN] MLC/multichip data from chip ident | 456 | * @cellinfo: [INTERN] MLC/multichip data from chip ident |
454 | * @numchips: [INTERN] number of physical chips | 457 | * @numchips: [INTERN] number of physical chips |
455 | * @chipsize: [INTERN] the size of one chip for multichip arrays | 458 | * @chipsize: [INTERN] the size of one chip for multichip arrays |
diff --git a/include/linux/mtd/pmc551.h b/include/linux/mtd/pmc551.h deleted file mode 100644 index 27ad40aed19f..000000000000 --- a/include/linux/mtd/pmc551.h +++ /dev/null | |||
@@ -1,78 +0,0 @@ | |||
1 | /* | ||
2 | * PMC551 PCI Mezzanine Ram Device | ||
3 | * | ||
4 | * Author: | ||
5 | * Mark Ferrell | ||
6 | * Copyright 1999,2000 Nortel Networks | ||
7 | * | ||
8 | * License: | ||
9 | * As part of this driver was derrived from the slram.c driver it falls | ||
10 | * under the same license, which is GNU General Public License v2 | ||
11 | */ | ||
12 | |||
13 | #ifndef __MTD_PMC551_H__ | ||
14 | #define __MTD_PMC551_H__ | ||
15 | |||
16 | #include <linux/mtd/mtd.h> | ||
17 | |||
18 | #define PMC551_VERSION \ | ||
19 | "Ramix PMC551 PCI Mezzanine Ram Driver. (C) 1999,2000 Nortel Networks.\n" | ||
20 | |||
21 | /* | ||
22 | * Our personal and private information | ||
23 | */ | ||
24 | struct mypriv { | ||
25 | struct pci_dev *dev; | ||
26 | u_char *start; | ||
27 | u32 base_map0; | ||
28 | u32 curr_map0; | ||
29 | u32 asize; | ||
30 | struct mtd_info *nextpmc551; | ||
31 | }; | ||
32 | |||
33 | /* | ||
34 | * Function Prototypes | ||
35 | */ | ||
36 | static int pmc551_erase(struct mtd_info *, struct erase_info *); | ||
37 | static void pmc551_unpoint(struct mtd_info *, loff_t, size_t); | ||
38 | static int pmc551_point(struct mtd_info *mtd, loff_t from, size_t len, | ||
39 | size_t *retlen, void **virt, resource_size_t *phys); | ||
40 | static int pmc551_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *); | ||
41 | static int pmc551_write(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); | ||
42 | |||
43 | |||
44 | /* | ||
45 | * Define the PCI ID's if the kernel doesn't define them for us | ||
46 | */ | ||
47 | #ifndef PCI_VENDOR_ID_V3_SEMI | ||
48 | #define PCI_VENDOR_ID_V3_SEMI 0x11b0 | ||
49 | #endif | ||
50 | |||
51 | #ifndef PCI_DEVICE_ID_V3_SEMI_V370PDC | ||
52 | #define PCI_DEVICE_ID_V3_SEMI_V370PDC 0x0200 | ||
53 | #endif | ||
54 | |||
55 | |||
56 | #define PMC551_PCI_MEM_MAP0 0x50 | ||
57 | #define PMC551_PCI_MEM_MAP1 0x54 | ||
58 | #define PMC551_PCI_MEM_MAP_MAP_ADDR_MASK 0x3ff00000 | ||
59 | #define PMC551_PCI_MEM_MAP_APERTURE_MASK 0x000000f0 | ||
60 | #define PMC551_PCI_MEM_MAP_REG_EN 0x00000002 | ||
61 | #define PMC551_PCI_MEM_MAP_ENABLE 0x00000001 | ||
62 | |||
63 | #define PMC551_SDRAM_MA 0x60 | ||
64 | #define PMC551_SDRAM_CMD 0x62 | ||
65 | #define PMC551_DRAM_CFG 0x64 | ||
66 | #define PMC551_SYS_CTRL_REG 0x78 | ||
67 | |||
68 | #define PMC551_DRAM_BLK0 0x68 | ||
69 | #define PMC551_DRAM_BLK1 0x6c | ||
70 | #define PMC551_DRAM_BLK2 0x70 | ||
71 | #define PMC551_DRAM_BLK3 0x74 | ||
72 | #define PMC551_DRAM_BLK_GET_SIZE(x) (524288<<((x>>4)&0x0f)) | ||
73 | #define PMC551_DRAM_BLK_SET_COL_MUX(x,v) (((x) & ~0x00007000) | (((v) & 0x7) << 12)) | ||
74 | #define PMC551_DRAM_BLK_SET_ROW_MUX(x,v) (((x) & ~0x00000f00) | (((v) & 0xf) << 8)) | ||
75 | |||
76 | |||
77 | #endif /* __MTD_PMC551_H__ */ | ||
78 | |||
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h index 9cf4c4c79555..a38e1fa8af01 100644 --- a/include/linux/mtd/sh_flctl.h +++ b/include/linux/mtd/sh_flctl.h | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/mtd/mtd.h> | 23 | #include <linux/mtd/mtd.h> |
24 | #include <linux/mtd/nand.h> | 24 | #include <linux/mtd/nand.h> |
25 | #include <linux/mtd/partitions.h> | 25 | #include <linux/mtd/partitions.h> |
26 | #include <linux/pm_qos.h> | ||
26 | 27 | ||
27 | /* FLCTL registers */ | 28 | /* FLCTL registers */ |
28 | #define FLCMNCR(f) (f->reg + 0x0) | 29 | #define FLCMNCR(f) (f->reg + 0x0) |
@@ -38,6 +39,7 @@ | |||
38 | #define FLDTFIFO(f) (f->reg + 0x24) | 39 | #define FLDTFIFO(f) (f->reg + 0x24) |
39 | #define FLECFIFO(f) (f->reg + 0x28) | 40 | #define FLECFIFO(f) (f->reg + 0x28) |
40 | #define FLTRCR(f) (f->reg + 0x2C) | 41 | #define FLTRCR(f) (f->reg + 0x2C) |
42 | #define FLHOLDCR(f) (f->reg + 0x38) | ||
41 | #define FL4ECCRESULT0(f) (f->reg + 0x80) | 43 | #define FL4ECCRESULT0(f) (f->reg + 0x80) |
42 | #define FL4ECCRESULT1(f) (f->reg + 0x84) | 44 | #define FL4ECCRESULT1(f) (f->reg + 0x84) |
43 | #define FL4ECCRESULT2(f) (f->reg + 0x88) | 45 | #define FL4ECCRESULT2(f) (f->reg + 0x88) |
@@ -67,6 +69,30 @@ | |||
67 | #define CE0_ENABLE (0x1 << 3) /* Chip Enable 0 */ | 69 | #define CE0_ENABLE (0x1 << 3) /* Chip Enable 0 */ |
68 | #define TYPESEL_SET (0x1 << 0) | 70 | #define TYPESEL_SET (0x1 << 0) |
69 | 71 | ||
72 | /* | ||
73 | * Clock settings using the PULSEx registers from FLCMNCR | ||
74 | * | ||
75 | * Some hardware uses bits called PULSEx instead of FCKSEL_E and QTSEL_E | ||
76 | * to control the clock divider used between the High-Speed Peripheral Clock | ||
77 | * and the FLCTL internal clock. If so, use CLK_8_BIT_xxx for connecting 8 bit | ||
78 | * and CLK_16_BIT_xxx for connecting 16 bit bus bandwith NAND chips. For the 16 | ||
79 | * bit version the divider is seperate for the pulse width of high and low | ||
80 | * signals. | ||
81 | */ | ||
82 | #define PULSE3 (0x1 << 27) | ||
83 | #define PULSE2 (0x1 << 17) | ||
84 | #define PULSE1 (0x1 << 15) | ||
85 | #define PULSE0 (0x1 << 9) | ||
86 | #define CLK_8B_0_5 PULSE1 | ||
87 | #define CLK_8B_1 0x0 | ||
88 | #define CLK_8B_1_5 (PULSE1 | PULSE2) | ||
89 | #define CLK_8B_2 PULSE0 | ||
90 | #define CLK_8B_3 (PULSE0 | PULSE1 | PULSE2) | ||
91 | #define CLK_8B_4 (PULSE0 | PULSE2) | ||
92 | #define CLK_16B_6L_2H PULSE0 | ||
93 | #define CLK_16B_9L_3H (PULSE0 | PULSE1 | PULSE2) | ||
94 | #define CLK_16B_12L_4H (PULSE0 | PULSE2) | ||
95 | |||
70 | /* FLCMDCR control bits */ | 96 | /* FLCMDCR control bits */ |
71 | #define ADRCNT2_E (0x1 << 31) /* 5byte address enable */ | 97 | #define ADRCNT2_E (0x1 << 31) /* 5byte address enable */ |
72 | #define ADRMD_E (0x1 << 26) /* Sector address access */ | 98 | #define ADRMD_E (0x1 << 26) /* Sector address access */ |
@@ -85,6 +111,15 @@ | |||
85 | #define TRSTRT (0x1 << 0) /* translation start */ | 111 | #define TRSTRT (0x1 << 0) /* translation start */ |
86 | #define TREND (0x1 << 1) /* translation end */ | 112 | #define TREND (0x1 << 1) /* translation end */ |
87 | 113 | ||
114 | /* | ||
115 | * FLHOLDCR control bits | ||
116 | * | ||
117 | * HOLDEN: Bus Occupancy Enable (inverted) | ||
118 | * Enable this bit when the external bus might be used in between transfers. | ||
119 | * If not set and the bus gets used by other modules, a deadlock occurs. | ||
120 | */ | ||
121 | #define HOLDEN (0x1 << 0) | ||
122 | |||
88 | /* FL4ECCCR control bits */ | 123 | /* FL4ECCCR control bits */ |
89 | #define _4ECCFA (0x1 << 2) /* 4 symbols correct fault */ | 124 | #define _4ECCFA (0x1 << 2) /* 4 symbols correct fault */ |
90 | #define _4ECCEND (0x1 << 1) /* 4 symbols end */ | 125 | #define _4ECCEND (0x1 << 1) /* 4 symbols end */ |
@@ -97,6 +132,7 @@ struct sh_flctl { | |||
97 | struct mtd_info mtd; | 132 | struct mtd_info mtd; |
98 | struct nand_chip chip; | 133 | struct nand_chip chip; |
99 | struct platform_device *pdev; | 134 | struct platform_device *pdev; |
135 | struct dev_pm_qos_request pm_qos; | ||
100 | void __iomem *reg; | 136 | void __iomem *reg; |
101 | 137 | ||
102 | uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */ | 138 | uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */ |
@@ -108,11 +144,14 @@ struct sh_flctl { | |||
108 | int erase1_page_addr; /* page_addr in ERASE1 cmd */ | 144 | int erase1_page_addr; /* page_addr in ERASE1 cmd */ |
109 | uint32_t erase_ADRCNT; /* bits of FLCMDCR in ERASE1 cmd */ | 145 | uint32_t erase_ADRCNT; /* bits of FLCMDCR in ERASE1 cmd */ |
110 | uint32_t rw_ADRCNT; /* bits of FLCMDCR in READ WRITE cmd */ | 146 | uint32_t rw_ADRCNT; /* bits of FLCMDCR in READ WRITE cmd */ |
147 | uint32_t flcmncr_base; /* base value of FLCMNCR */ | ||
111 | 148 | ||
112 | int hwecc_cant_correct[4]; | 149 | int hwecc_cant_correct[4]; |
113 | 150 | ||
114 | unsigned page_size:1; /* NAND page size (0 = 512, 1 = 2048) */ | 151 | unsigned page_size:1; /* NAND page size (0 = 512, 1 = 2048) */ |
115 | unsigned hwecc:1; /* Hardware ECC (0 = disabled, 1 = enabled) */ | 152 | unsigned hwecc:1; /* Hardware ECC (0 = disabled, 1 = enabled) */ |
153 | unsigned holden:1; /* Hardware has FLHOLDCR and HOLDEN is set */ | ||
154 | unsigned qos_request:1; /* QoS request to prevent deep power shutdown */ | ||
116 | }; | 155 | }; |
117 | 156 | ||
118 | struct sh_flctl_platform_data { | 157 | struct sh_flctl_platform_data { |
@@ -121,6 +160,7 @@ struct sh_flctl_platform_data { | |||
121 | unsigned long flcmncr_val; | 160 | unsigned long flcmncr_val; |
122 | 161 | ||
123 | unsigned has_hwecc:1; | 162 | unsigned has_hwecc:1; |
163 | unsigned use_holden:1; | ||
124 | }; | 164 | }; |
125 | 165 | ||
126 | static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo) | 166 | static inline struct sh_flctl *mtd_to_flctl(struct mtd_info *mtdinfo) |
diff --git a/include/linux/mtd/spear_smi.h b/include/linux/mtd/spear_smi.h new file mode 100644 index 000000000000..8ae1726044c3 --- /dev/null +++ b/include/linux/mtd/spear_smi.h | |||
@@ -0,0 +1,65 @@ | |||
1 | /* | ||
2 | * Copyright © 2010 ST Microelectronics | ||
3 | * Shiraz Hashim <shiraz.hashim@st.com> | ||
4 | * | ||
5 | * This file is licensed under the terms of the GNU General Public | ||
6 | * License version 2. This program is licensed "as is" without any | ||
7 | * warranty of any kind, whether express or implied. | ||
8 | */ | ||
9 | |||
10 | #ifndef __MTD_SPEAR_SMI_H | ||
11 | #define __MTD_SPEAR_SMI_H | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | #include <linux/mtd/mtd.h> | ||
15 | #include <linux/mtd/partitions.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/of.h> | ||
18 | |||
19 | /* max possible slots for serial-nor flash chip in the SMI controller */ | ||
20 | #define MAX_NUM_FLASH_CHIP 4 | ||
21 | |||
22 | /* macro to define partitions for flash devices */ | ||
23 | #define DEFINE_PARTS(n, of, s) \ | ||
24 | { \ | ||
25 | .name = n, \ | ||
26 | .offset = of, \ | ||
27 | .size = s, \ | ||
28 | } | ||
29 | |||
30 | /** | ||
31 | * struct spear_smi_flash_info - platform structure for passing flash | ||
32 | * information | ||
33 | * | ||
34 | * name: name of the serial nor flash for identification | ||
35 | * mem_base: the memory base on which the flash is mapped | ||
36 | * size: size of the flash in bytes | ||
37 | * partitions: parition details | ||
38 | * nr_partitions: number of partitions | ||
39 | * fast_mode: whether flash supports fast mode | ||
40 | */ | ||
41 | |||
42 | struct spear_smi_flash_info { | ||
43 | char *name; | ||
44 | unsigned long mem_base; | ||
45 | unsigned long size; | ||
46 | struct mtd_partition *partitions; | ||
47 | int nr_partitions; | ||
48 | u8 fast_mode; | ||
49 | }; | ||
50 | |||
51 | /** | ||
52 | * struct spear_smi_plat_data - platform structure for configuring smi | ||
53 | * | ||
54 | * clk_rate: clk rate at which SMI must operate | ||
55 | * num_flashes: number of flashes present on board | ||
56 | * board_flash_info: specific details of each flash present on board | ||
57 | */ | ||
58 | struct spear_smi_plat_data { | ||
59 | unsigned long clk_rate; | ||
60 | int num_flashes; | ||
61 | struct spear_smi_flash_info *board_flash_info; | ||
62 | struct device_node *np[MAX_NUM_FLASH_CHIP]; | ||
63 | }; | ||
64 | |||
65 | #endif /* __MTD_SPEAR_SMI_H */ | ||
diff --git a/sound/soc/mxs/mxs-pcm.c b/sound/soc/mxs/mxs-pcm.c index 6ca1f46d84a4..e373fbbc97a0 100644 --- a/sound/soc/mxs/mxs-pcm.c +++ b/sound/soc/mxs/mxs-pcm.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/dmaengine.h> | 30 | #include <linux/dmaengine.h> |
31 | #include <linux/fsl/mxs-dma.h> | ||
31 | 32 | ||
32 | #include <sound/core.h> | 33 | #include <sound/core.h> |
33 | #include <sound/initval.h> | 34 | #include <sound/initval.h> |
@@ -36,7 +37,6 @@ | |||
36 | #include <sound/soc.h> | 37 | #include <sound/soc.h> |
37 | #include <sound/dmaengine_pcm.h> | 38 | #include <sound/dmaengine_pcm.h> |
38 | 39 | ||
39 | #include <mach/dma.h> | ||
40 | #include "mxs-pcm.h" | 40 | #include "mxs-pcm.h" |
41 | 41 | ||
42 | struct mxs_pcm_dma_data { | 42 | struct mxs_pcm_dma_data { |
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c index 12be05b16880..53f4fd8feced 100644 --- a/sound/soc/mxs/mxs-saif.c +++ b/sound/soc/mxs/mxs-saif.c | |||
@@ -24,12 +24,12 @@ | |||
24 | #include <linux/clk.h> | 24 | #include <linux/clk.h> |
25 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
26 | #include <linux/time.h> | 26 | #include <linux/time.h> |
27 | #include <linux/fsl/mxs-dma.h> | ||
27 | #include <sound/core.h> | 28 | #include <sound/core.h> |
28 | #include <sound/pcm.h> | 29 | #include <sound/pcm.h> |
29 | #include <sound/pcm_params.h> | 30 | #include <sound/pcm_params.h> |
30 | #include <sound/soc.h> | 31 | #include <sound/soc.h> |
31 | #include <sound/saif.h> | 32 | #include <sound/saif.h> |
32 | #include <mach/dma.h> | ||
33 | #include <asm/mach-types.h> | 33 | #include <asm/mach-types.h> |
34 | #include <mach/hardware.h> | 34 | #include <mach/hardware.h> |
35 | #include <mach/mxs.h> | 35 | #include <mach/mxs.h> |