diff options
author | Brian Norris <computersforpeace@gmail.com> | 2016-07-14 15:49:28 -0400 |
---|---|---|
committer | Brian Norris <computersforpeace@gmail.com> | 2016-07-15 20:06:26 -0400 |
commit | 1ed106914abdd6d73f7efba333cd6e044c59b316 (patch) | |
tree | b30cc450702e74080b6dc49c14fd8912e092e9b1 | |
parent | 7ddf7c1ea77d10424ae5dfa3c3a4d6a39cdf70a9 (diff) | |
parent | 8490c03bd9d40ce71d9b67dcf93e73788ba0516d (diff) |
Merge tag 'nand/for-4.8' of github.com:linux-nand/linux into mtd
Pull NAND changes from Boris Brezillon:
"""
This pull request contains only one notable change:
* Addition of the MTK NAND controller driver
And a bunch of specific NAND driver improvements/fixes. Here are the
changes that are worth mentioning:
* A few fixes/improvements for the xway NAND controller driver
* A few fixes for the sunxi NAND controller driver
* Support for DMA in the sunxi NAND driver
* Support for the sunxi NAND controller IP embedded in A23/A33 SoCs
* Addition for bitflips detection in erased pages to the brcmnand driver
* Support for new brcmnand IPs
* Update of the OMAP-GPMC binding to support DMA channel description
"""
-rw-r--r-- | Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt | 7 | ||||
-rw-r--r-- | Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt | 1 | ||||
-rw-r--r-- | Documentation/devicetree/bindings/mtd/gpmc-nand.txt | 2 | ||||
-rw-r--r-- | Documentation/devicetree/bindings/mtd/mtk-nand.txt | 160 | ||||
-rw-r--r-- | Documentation/devicetree/bindings/mtd/sunxi-nand.txt | 6 | ||||
-rw-r--r-- | drivers/mtd/nand/Kconfig | 8 | ||||
-rw-r--r-- | drivers/mtd/nand/Makefile | 1 | ||||
-rw-r--r-- | drivers/mtd/nand/brcmnand/brcmnand.c | 171 | ||||
-rw-r--r-- | drivers/mtd/nand/jz4780_bch.c | 2 | ||||
-rw-r--r-- | drivers/mtd/nand/jz4780_nand.c | 2 | ||||
-rw-r--r-- | drivers/mtd/nand/mtk_ecc.c | 530 | ||||
-rw-r--r-- | drivers/mtd/nand/mtk_ecc.h | 50 | ||||
-rw-r--r-- | drivers/mtd/nand/mtk_nand.c | 1526 | ||||
-rw-r--r-- | drivers/mtd/nand/nand_ids.c | 1 | ||||
-rw-r--r-- | drivers/mtd/nand/omap2.c | 7 | ||||
-rw-r--r-- | drivers/mtd/nand/sunxi_nand.c | 397 | ||||
-rw-r--r-- | drivers/mtd/nand/xway_nand.c | 231 | ||||
-rw-r--r-- | drivers/mtd/tests/nandbiterrs.c | 2 | ||||
-rw-r--r-- | include/linux/mtd/nand.h | 1 |
19 files changed, 2981 insertions, 124 deletions
diff --git a/Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt b/Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt index 21055e210234..c1359f4d48d7 100644 --- a/Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt +++ b/Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt | |||
@@ -46,6 +46,10 @@ Required properties: | |||
46 | 0 maps to GPMC_WAIT0 pin. | 46 | 0 maps to GPMC_WAIT0 pin. |
47 | - gpio-cells: Must be set to 2 | 47 | - gpio-cells: Must be set to 2 |
48 | 48 | ||
49 | Required properties when using NAND prefetch dma: | ||
50 | - dmas GPMC NAND prefetch dma channel | ||
51 | - dma-names Must be set to "rxtx" | ||
52 | |||
49 | Timing properties for child nodes. All are optional and default to 0. | 53 | Timing properties for child nodes. All are optional and default to 0. |
50 | 54 | ||
51 | - gpmc,sync-clk-ps: Minimum clock period for synchronous mode, in picoseconds | 55 | - gpmc,sync-clk-ps: Minimum clock period for synchronous mode, in picoseconds |
@@ -137,7 +141,8 @@ Example for an AM33xx board: | |||
137 | ti,hwmods = "gpmc"; | 141 | ti,hwmods = "gpmc"; |
138 | reg = <0x50000000 0x2000>; | 142 | reg = <0x50000000 0x2000>; |
139 | interrupts = <100>; | 143 | interrupts = <100>; |
140 | 144 | dmas = <&edma 52 0>; | |
145 | dma-names = "rxtx"; | ||
141 | gpmc,num-cs = <8>; | 146 | gpmc,num-cs = <8>; |
142 | gpmc,num-waitpins = <2>; | 147 | gpmc,num-waitpins = <2>; |
143 | #address-cells = <2>; | 148 | #address-cells = <2>; |
diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt index 7066597c9a81..b40f3a492800 100644 --- a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt +++ b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt | |||
@@ -27,6 +27,7 @@ Required properties: | |||
27 | brcm,brcmnand-v6.2 | 27 | brcm,brcmnand-v6.2 |
28 | brcm,brcmnand-v7.0 | 28 | brcm,brcmnand-v7.0 |
29 | brcm,brcmnand-v7.1 | 29 | brcm,brcmnand-v7.1 |
30 | brcm,brcmnand-v7.2 | ||
30 | brcm,brcmnand | 31 | brcm,brcmnand |
31 | - reg : the register start and length for NAND register region. | 32 | - reg : the register start and length for NAND register region. |
32 | (optional) Flash DMA register range (if present) | 33 | (optional) Flash DMA register range (if present) |
diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt index 3ee7e202657c..174f68c26c1b 100644 --- a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt +++ b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt | |||
@@ -39,7 +39,7 @@ Optional properties: | |||
39 | 39 | ||
40 | "prefetch-polled" Prefetch polled mode (default) | 40 | "prefetch-polled" Prefetch polled mode (default) |
41 | "polled" Polled mode, without prefetch | 41 | "polled" Polled mode, without prefetch |
42 | "prefetch-dma" Prefetch enabled sDMA mode | 42 | "prefetch-dma" Prefetch enabled DMA mode |
43 | "prefetch-irq" Prefetch enabled irq mode | 43 | "prefetch-irq" Prefetch enabled irq mode |
44 | 44 | ||
45 | - elm_id: <deprecated> use "ti,elm-id" instead | 45 | - elm_id: <deprecated> use "ti,elm-id" instead |
diff --git a/Documentation/devicetree/bindings/mtd/mtk-nand.txt b/Documentation/devicetree/bindings/mtd/mtk-nand.txt new file mode 100644 index 000000000000..069c192ed5c2 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/mtk-nand.txt | |||
@@ -0,0 +1,160 @@ | |||
1 | MTK SoCs NAND FLASH controller (NFC) DT binding | ||
2 | |||
3 | This file documents the device tree bindings for MTK SoCs NAND controllers. | ||
4 | The functional split of the controller requires two drivers to operate: | ||
5 | the nand controller interface driver and the ECC engine driver. | ||
6 | |||
7 | The hardware description for both devices must be captured as device | ||
8 | tree nodes. | ||
9 | |||
10 | 1) NFC NAND Controller Interface (NFI): | ||
11 | ======================================= | ||
12 | |||
13 | The first part of NFC is NAND Controller Interface (NFI) HW. | ||
14 | Required NFI properties: | ||
15 | - compatible: Should be "mediatek,mtxxxx-nfc". | ||
16 | - reg: Base physical address and size of NFI. | ||
17 | - interrupts: Interrupts of NFI. | ||
18 | - clocks: NFI required clocks. | ||
19 | - clock-names: NFI clocks internal name. | ||
20 | - status: Disabled default. Then set "okay" by platform. | ||
21 | - ecc-engine: Required ECC Engine node. | ||
22 | - #address-cells: NAND chip index, should be 1. | ||
23 | - #size-cells: Should be 0. | ||
24 | |||
25 | Example: | ||
26 | |||
27 | nandc: nfi@1100d000 { | ||
28 | compatible = "mediatek,mt2701-nfc"; | ||
29 | reg = <0 0x1100d000 0 0x1000>; | ||
30 | interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_LOW>; | ||
31 | clocks = <&pericfg CLK_PERI_NFI>, | ||
32 | <&pericfg CLK_PERI_NFI_PAD>; | ||
33 | clock-names = "nfi_clk", "pad_clk"; | ||
34 | status = "disabled"; | ||
35 | ecc-engine = <&bch>; | ||
36 | #address-cells = <1>; | ||
37 | #size-cells = <0>; | ||
38 | }; | ||
39 | |||
40 | Platform related properties, should be set in {platform_name}.dts: | ||
41 | - children nodes: NAND chips. | ||
42 | |||
43 | Children nodes properties: | ||
44 | - reg: Chip Select Signal, default 0. | ||
45 | Set as reg = <0>, <1> when need 2 CS. | ||
46 | Optional: | ||
47 | - nand-on-flash-bbt: Store BBT on NAND Flash. | ||
48 | - nand-ecc-mode: the NAND ecc mode (check driver for supported modes) | ||
49 | - nand-ecc-step-size: Number of data bytes covered by a single ECC step. | ||
50 | valid values: 512 and 1024. | ||
51 | 1024 is recommended for large page NANDs. | ||
52 | - nand-ecc-strength: Number of bits to correct per ECC step. | ||
53 | The valid values that the controller supports are: 4, 6, | ||
54 | 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, 40, 44, | ||
55 | 48, 52, 56, 60. | ||
56 | The strength should be calculated as follows: | ||
57 | E = (S - F) * 8 / 14 | ||
58 | S = O / (P / Q) | ||
59 | E : nand-ecc-strength. | ||
60 | S : spare size per sector. | ||
61 | F : FDM size, should be in the range [1,8]. | ||
62 | It is used to store free oob data. | ||
63 | O : oob size. | ||
64 | P : page size. | ||
65 | Q : nand-ecc-step-size. | ||
66 | If the result does not match any one of the listed | ||
67 | choices above, please select the smaller valid value from | ||
68 | the list. | ||
69 | (otherwise the driver will do the adjustment at runtime) | ||
70 | - pinctrl-names: Default NAND pin GPIO setting name. | ||
71 | - pinctrl-0: GPIO setting node. | ||
72 | |||
73 | Example: | ||
74 | &pio { | ||
75 | nand_pins_default: nanddefault { | ||
76 | pins_dat { | ||
77 | pinmux = <MT2701_PIN_111_MSDC0_DAT7__FUNC_NLD7>, | ||
78 | <MT2701_PIN_112_MSDC0_DAT6__FUNC_NLD6>, | ||
79 | <MT2701_PIN_114_MSDC0_DAT4__FUNC_NLD4>, | ||
80 | <MT2701_PIN_118_MSDC0_DAT3__FUNC_NLD3>, | ||
81 | <MT2701_PIN_121_MSDC0_DAT0__FUNC_NLD0>, | ||
82 | <MT2701_PIN_120_MSDC0_DAT1__FUNC_NLD1>, | ||
83 | <MT2701_PIN_113_MSDC0_DAT5__FUNC_NLD5>, | ||
84 | <MT2701_PIN_115_MSDC0_RSTB__FUNC_NLD8>, | ||
85 | <MT2701_PIN_119_MSDC0_DAT2__FUNC_NLD2>; | ||
86 | input-enable; | ||
87 | drive-strength = <MTK_DRIVE_8mA>; | ||
88 | bias-pull-up; | ||
89 | }; | ||
90 | |||
91 | pins_we { | ||
92 | pinmux = <MT2701_PIN_117_MSDC0_CLK__FUNC_NWEB>; | ||
93 | drive-strength = <MTK_DRIVE_8mA>; | ||
94 | bias-pull-up = <MTK_PUPD_SET_R1R0_10>; | ||
95 | }; | ||
96 | |||
97 | pins_ale { | ||
98 | pinmux = <MT2701_PIN_116_MSDC0_CMD__FUNC_NALE>; | ||
99 | drive-strength = <MTK_DRIVE_8mA>; | ||
100 | bias-pull-down = <MTK_PUPD_SET_R1R0_10>; | ||
101 | }; | ||
102 | }; | ||
103 | }; | ||
104 | |||
105 | &nandc { | ||
106 | status = "okay"; | ||
107 | pinctrl-names = "default"; | ||
108 | pinctrl-0 = <&nand_pins_default>; | ||
109 | nand@0 { | ||
110 | reg = <0>; | ||
111 | nand-on-flash-bbt; | ||
112 | nand-ecc-mode = "hw"; | ||
113 | nand-ecc-strength = <24>; | ||
114 | nand-ecc-step-size = <1024>; | ||
115 | }; | ||
116 | }; | ||
117 | |||
118 | NAND chip optional subnodes: | ||
119 | - Partitions, see Documentation/devicetree/bindings/mtd/partition.txt | ||
120 | |||
121 | Example: | ||
122 | nand@0 { | ||
123 | partitions { | ||
124 | compatible = "fixed-partitions"; | ||
125 | #address-cells = <1>; | ||
126 | #size-cells = <1>; | ||
127 | |||
128 | preloader@0 { | ||
129 | label = "pl"; | ||
130 | read-only; | ||
131 | reg = <0x00000000 0x00400000>; | ||
132 | }; | ||
133 | android@0x00400000 { | ||
134 | label = "android"; | ||
135 | reg = <0x00400000 0x12c00000>; | ||
136 | }; | ||
137 | }; | ||
138 | }; | ||
139 | |||
140 | 2) ECC Engine: | ||
141 | ============== | ||
142 | |||
143 | Required BCH properties: | ||
144 | - compatible: Should be "mediatek,mtxxxx-ecc". | ||
145 | - reg: Base physical address and size of ECC. | ||
146 | - interrupts: Interrupts of ECC. | ||
147 | - clocks: ECC required clocks. | ||
148 | - clock-names: ECC clocks internal name. | ||
149 | - status: Disabled default. Then set "okay" by platform. | ||
150 | |||
151 | Example: | ||
152 | |||
153 | bch: ecc@1100e000 { | ||
154 | compatible = "mediatek,mt2701-ecc"; | ||
155 | reg = <0 0x1100e000 0 0x1000>; | ||
156 | interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_LOW>; | ||
157 | clocks = <&pericfg CLK_PERI_NFI_ECC>; | ||
158 | clock-names = "nfiecc_clk"; | ||
159 | status = "disabled"; | ||
160 | }; | ||
diff --git a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt index 086d6f44c4b9..f322f56aef74 100644 --- a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt +++ b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt | |||
@@ -11,10 +11,16 @@ Required properties: | |||
11 | * "ahb" : AHB gating clock | 11 | * "ahb" : AHB gating clock |
12 | * "mod" : nand controller clock | 12 | * "mod" : nand controller clock |
13 | 13 | ||
14 | Optional properties: | ||
15 | - dmas : shall reference DMA channel associated to the NAND controller. | ||
16 | - dma-names : shall be "rxtx". | ||
17 | |||
14 | Optional children nodes: | 18 | Optional children nodes: |
15 | Children nodes represent the available nand chips. | 19 | Children nodes represent the available nand chips. |
16 | 20 | ||
17 | Optional properties: | 21 | Optional properties: |
22 | - reset : phandle + reset specifier pair | ||
23 | - reset-names : must contain "ahb" | ||
18 | - allwinner,rb : shall contain the native Ready/Busy ids. | 24 | - allwinner,rb : shall contain the native Ready/Busy ids. |
19 | or | 25 | or |
20 | - rb-gpios : shall contain the gpios used as R/B pins. | 26 | - rb-gpios : shall contain the gpios used as R/B pins. |
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index eace3ef10d9d..21ff58099f3b 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
@@ -539,7 +539,6 @@ config MTD_NAND_FSMC | |||
539 | config MTD_NAND_XWAY | 539 | config MTD_NAND_XWAY |
540 | tristate "Support for NAND on Lantiq XWAY SoC" | 540 | tristate "Support for NAND on Lantiq XWAY SoC" |
541 | depends on LANTIQ && SOC_TYPE_XWAY | 541 | depends on LANTIQ && SOC_TYPE_XWAY |
542 | select MTD_NAND_PLATFORM | ||
543 | help | 542 | help |
544 | Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached | 543 | Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached |
545 | to the External Bus Unit (EBU). | 544 | to the External Bus Unit (EBU). |
@@ -563,4 +562,11 @@ config MTD_NAND_QCOM | |||
563 | Enables support for NAND flash chips on SoCs containing the EBI2 NAND | 562 | Enables support for NAND flash chips on SoCs containing the EBI2 NAND |
564 | controller. This controller is found on IPQ806x SoC. | 563 | controller. This controller is found on IPQ806x SoC. |
565 | 564 | ||
565 | config MTD_NAND_MTK | ||
566 | tristate "Support for NAND controller on MTK SoCs" | ||
567 | depends on HAS_DMA | ||
568 | help | ||
569 | Enables support for NAND controller on MTK SoCs. | ||
570 | This controller is found on mt27xx, mt81xx, mt65xx SoCs. | ||
571 | |||
566 | endif # MTD_NAND | 572 | endif # MTD_NAND |
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index f55335373f7c..cafde6f3d957 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile | |||
@@ -57,5 +57,6 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o | |||
57 | obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o | 57 | obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o |
58 | obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ | 58 | obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ |
59 | obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o | 59 | obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o |
60 | obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o | ||
60 | 61 | ||
61 | nand-objs := nand_base.o nand_bbt.o nand_timings.o | 62 | nand-objs := nand_base.o nand_bbt.o nand_timings.o |
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c index b76ad7c0144f..faca01d6e0f9 100644 --- a/drivers/mtd/nand/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/brcmnand/brcmnand.c | |||
@@ -340,6 +340,36 @@ static const u16 brcmnand_regs_v71[] = { | |||
340 | [BRCMNAND_FC_BASE] = 0x400, | 340 | [BRCMNAND_FC_BASE] = 0x400, |
341 | }; | 341 | }; |
342 | 342 | ||
343 | /* BRCMNAND v7.2 */ | ||
344 | static const u16 brcmnand_regs_v72[] = { | ||
345 | [BRCMNAND_CMD_START] = 0x04, | ||
346 | [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, | ||
347 | [BRCMNAND_CMD_ADDRESS] = 0x0c, | ||
348 | [BRCMNAND_INTFC_STATUS] = 0x14, | ||
349 | [BRCMNAND_CS_SELECT] = 0x18, | ||
350 | [BRCMNAND_CS_XOR] = 0x1c, | ||
351 | [BRCMNAND_LL_OP] = 0x20, | ||
352 | [BRCMNAND_CS0_BASE] = 0x50, | ||
353 | [BRCMNAND_CS1_BASE] = 0, | ||
354 | [BRCMNAND_CORR_THRESHOLD] = 0xdc, | ||
355 | [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0, | ||
356 | [BRCMNAND_UNCORR_COUNT] = 0xfc, | ||
357 | [BRCMNAND_CORR_COUNT] = 0x100, | ||
358 | [BRCMNAND_CORR_EXT_ADDR] = 0x10c, | ||
359 | [BRCMNAND_CORR_ADDR] = 0x110, | ||
360 | [BRCMNAND_UNCORR_EXT_ADDR] = 0x114, | ||
361 | [BRCMNAND_UNCORR_ADDR] = 0x118, | ||
362 | [BRCMNAND_SEMAPHORE] = 0x150, | ||
363 | [BRCMNAND_ID] = 0x194, | ||
364 | [BRCMNAND_ID_EXT] = 0x198, | ||
365 | [BRCMNAND_LL_RDATA] = 0x19c, | ||
366 | [BRCMNAND_OOB_READ_BASE] = 0x200, | ||
367 | [BRCMNAND_OOB_READ_10_BASE] = 0, | ||
368 | [BRCMNAND_OOB_WRITE_BASE] = 0x400, | ||
369 | [BRCMNAND_OOB_WRITE_10_BASE] = 0, | ||
370 | [BRCMNAND_FC_BASE] = 0x600, | ||
371 | }; | ||
372 | |||
343 | enum brcmnand_cs_reg { | 373 | enum brcmnand_cs_reg { |
344 | BRCMNAND_CS_CFG_EXT = 0, | 374 | BRCMNAND_CS_CFG_EXT = 0, |
345 | BRCMNAND_CS_CFG, | 375 | BRCMNAND_CS_CFG, |
@@ -435,7 +465,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) | |||
435 | } | 465 | } |
436 | 466 | ||
437 | /* Register offsets */ | 467 | /* Register offsets */ |
438 | if (ctrl->nand_version >= 0x0701) | 468 | if (ctrl->nand_version >= 0x0702) |
469 | ctrl->reg_offsets = brcmnand_regs_v72; | ||
470 | else if (ctrl->nand_version >= 0x0701) | ||
439 | ctrl->reg_offsets = brcmnand_regs_v71; | 471 | ctrl->reg_offsets = brcmnand_regs_v71; |
440 | else if (ctrl->nand_version >= 0x0600) | 472 | else if (ctrl->nand_version >= 0x0600) |
441 | ctrl->reg_offsets = brcmnand_regs_v60; | 473 | ctrl->reg_offsets = brcmnand_regs_v60; |
@@ -480,7 +512,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) | |||
480 | } | 512 | } |
481 | 513 | ||
482 | /* Maximum spare area sector size (per 512B) */ | 514 | /* Maximum spare area sector size (per 512B) */ |
483 | if (ctrl->nand_version >= 0x0600) | 515 | if (ctrl->nand_version >= 0x0702) |
516 | ctrl->max_oob = 128; | ||
517 | else if (ctrl->nand_version >= 0x0600) | ||
484 | ctrl->max_oob = 64; | 518 | ctrl->max_oob = 64; |
485 | else if (ctrl->nand_version >= 0x0500) | 519 | else if (ctrl->nand_version >= 0x0500) |
486 | ctrl->max_oob = 32; | 520 | ctrl->max_oob = 32; |
@@ -583,14 +617,20 @@ static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val) | |||
583 | enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD; | 617 | enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD; |
584 | int cs = host->cs; | 618 | int cs = host->cs; |
585 | 619 | ||
586 | if (ctrl->nand_version >= 0x0600) | 620 | if (ctrl->nand_version >= 0x0702) |
621 | bits = 7; | ||
622 | else if (ctrl->nand_version >= 0x0600) | ||
587 | bits = 6; | 623 | bits = 6; |
588 | else if (ctrl->nand_version >= 0x0500) | 624 | else if (ctrl->nand_version >= 0x0500) |
589 | bits = 5; | 625 | bits = 5; |
590 | else | 626 | else |
591 | bits = 4; | 627 | bits = 4; |
592 | 628 | ||
593 | if (ctrl->nand_version >= 0x0600) { | 629 | if (ctrl->nand_version >= 0x0702) { |
630 | if (cs >= 4) | ||
631 | reg = BRCMNAND_CORR_THRESHOLD_EXT; | ||
632 | shift = (cs % 4) * bits; | ||
633 | } else if (ctrl->nand_version >= 0x0600) { | ||
594 | if (cs >= 5) | 634 | if (cs >= 5) |
595 | reg = BRCMNAND_CORR_THRESHOLD_EXT; | 635 | reg = BRCMNAND_CORR_THRESHOLD_EXT; |
596 | shift = (cs % 5) * bits; | 636 | shift = (cs % 5) * bits; |
@@ -631,19 +671,28 @@ enum { | |||
631 | 671 | ||
632 | static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl) | 672 | static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl) |
633 | { | 673 | { |
634 | if (ctrl->nand_version >= 0x0600) | 674 | if (ctrl->nand_version >= 0x0702) |
675 | return GENMASK(7, 0); | ||
676 | else if (ctrl->nand_version >= 0x0600) | ||
635 | return GENMASK(6, 0); | 677 | return GENMASK(6, 0); |
636 | else | 678 | else |
637 | return GENMASK(5, 0); | 679 | return GENMASK(5, 0); |
638 | } | 680 | } |
639 | 681 | ||
640 | #define NAND_ACC_CONTROL_ECC_SHIFT 16 | 682 | #define NAND_ACC_CONTROL_ECC_SHIFT 16 |
683 | #define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13 | ||
641 | 684 | ||
642 | static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl) | 685 | static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl) |
643 | { | 686 | { |
644 | u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f; | 687 | u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f; |
645 | 688 | ||
646 | return mask << NAND_ACC_CONTROL_ECC_SHIFT; | 689 | mask <<= NAND_ACC_CONTROL_ECC_SHIFT; |
690 | |||
691 | /* v7.2 includes additional ECC levels */ | ||
692 | if (ctrl->nand_version >= 0x0702) | ||
693 | mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT; | ||
694 | |||
695 | return mask; | ||
647 | } | 696 | } |
648 | 697 | ||
649 | static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en) | 698 | static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en) |
@@ -667,7 +716,9 @@ static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en) | |||
667 | 716 | ||
668 | static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl) | 717 | static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl) |
669 | { | 718 | { |
670 | if (ctrl->nand_version >= 0x0600) | 719 | if (ctrl->nand_version >= 0x0702) |
720 | return 9; | ||
721 | else if (ctrl->nand_version >= 0x0600) | ||
671 | return 7; | 722 | return 7; |
672 | else if (ctrl->nand_version >= 0x0500) | 723 | else if (ctrl->nand_version >= 0x0500) |
673 | return 6; | 724 | return 6; |
@@ -773,10 +824,16 @@ enum brcmnand_llop_type { | |||
773 | * Internal support functions | 824 | * Internal support functions |
774 | ***********************************************************************/ | 825 | ***********************************************************************/ |
775 | 826 | ||
776 | static inline bool is_hamming_ecc(struct brcmnand_cfg *cfg) | 827 | static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl, |
828 | struct brcmnand_cfg *cfg) | ||
777 | { | 829 | { |
778 | return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 && | 830 | if (ctrl->nand_version <= 0x0701) |
779 | cfg->ecc_level == 15; | 831 | return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 && |
832 | cfg->ecc_level == 15; | ||
833 | else | ||
834 | return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 && | ||
835 | cfg->ecc_level == 15) || | ||
836 | (cfg->spare_area_size == 28 && cfg->ecc_level == 16)); | ||
780 | } | 837 | } |
781 | 838 | ||
782 | /* | 839 | /* |
@@ -931,7 +988,7 @@ static int brcmstb_choose_ecc_layout(struct brcmnand_host *host) | |||
931 | if (p->sector_size_1k) | 988 | if (p->sector_size_1k) |
932 | ecc_level <<= 1; | 989 | ecc_level <<= 1; |
933 | 990 | ||
934 | if (is_hamming_ecc(p)) { | 991 | if (is_hamming_ecc(host->ctrl, p)) { |
935 | ecc->bytes = 3 * sectors; | 992 | ecc->bytes = 3 * sectors; |
936 | mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops); | 993 | mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops); |
937 | return 0; | 994 | return 0; |
@@ -1545,6 +1602,56 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, | |||
1545 | return ret; | 1602 | return ret; |
1546 | } | 1603 | } |
1547 | 1604 | ||
1605 | /* | ||
1606 | * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC | ||
1607 | * error | ||
1608 | * | ||
1609 | * Because the HW ECC signals an ECC error if an erase paged has even a single | ||
1610 | * bitflip, we must check each ECC error to see if it is actually an erased | ||
1611 | * page with bitflips, not a truly corrupted page. | ||
1612 | * | ||
1613 | * On a real error, return a negative error code (-EBADMSG for ECC error), and | ||
1614 | * buf will contain raw data. | ||
1615 | * Otherwise, buf gets filled with 0xffs and return the maximum number of | ||
1616 | * bitflips-per-ECC-sector to the caller. | ||
1617 | * | ||
1618 | */ | ||
1619 | static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd, | ||
1620 | struct nand_chip *chip, void *buf, u64 addr) | ||
1621 | { | ||
1622 | int i, sas; | ||
1623 | void *oob = chip->oob_poi; | ||
1624 | int bitflips = 0; | ||
1625 | int page = addr >> chip->page_shift; | ||
1626 | int ret; | ||
1627 | |||
1628 | if (!buf) { | ||
1629 | buf = chip->buffers->databuf; | ||
1630 | /* Invalidate page cache */ | ||
1631 | chip->pagebuf = -1; | ||
1632 | } | ||
1633 | |||
1634 | sas = mtd->oobsize / chip->ecc.steps; | ||
1635 | |||
1636 | /* read without ecc for verification */ | ||
1637 | chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page); | ||
1638 | ret = chip->ecc.read_page_raw(mtd, chip, buf, true, page); | ||
1639 | if (ret) | ||
1640 | return ret; | ||
1641 | |||
1642 | for (i = 0; i < chip->ecc.steps; i++, oob += sas) { | ||
1643 | ret = nand_check_erased_ecc_chunk(buf, chip->ecc.size, | ||
1644 | oob, sas, NULL, 0, | ||
1645 | chip->ecc.strength); | ||
1646 | if (ret < 0) | ||
1647 | return ret; | ||
1648 | |||
1649 | bitflips = max(bitflips, ret); | ||
1650 | } | ||
1651 | |||
1652 | return bitflips; | ||
1653 | } | ||
1654 | |||
1548 | static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, | 1655 | static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, |
1549 | u64 addr, unsigned int trans, u32 *buf, u8 *oob) | 1656 | u64 addr, unsigned int trans, u32 *buf, u8 *oob) |
1550 | { | 1657 | { |
@@ -1552,9 +1659,11 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, | |||
1552 | struct brcmnand_controller *ctrl = host->ctrl; | 1659 | struct brcmnand_controller *ctrl = host->ctrl; |
1553 | u64 err_addr = 0; | 1660 | u64 err_addr = 0; |
1554 | int err; | 1661 | int err; |
1662 | bool retry = true; | ||
1555 | 1663 | ||
1556 | dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf); | 1664 | dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf); |
1557 | 1665 | ||
1666 | try_dmaread: | ||
1558 | brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0); | 1667 | brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0); |
1559 | 1668 | ||
1560 | if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) { | 1669 | if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) { |
@@ -1575,6 +1684,34 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, | |||
1575 | } | 1684 | } |
1576 | 1685 | ||
1577 | if (mtd_is_eccerr(err)) { | 1686 | if (mtd_is_eccerr(err)) { |
1687 | /* | ||
1688 | * On controller version and 7.0, 7.1 , DMA read after a | ||
1689 | * prior PIO read that reported uncorrectable error, | ||
1690 | * the DMA engine captures this error following DMA read | ||
1691 | * cleared only on subsequent DMA read, so just retry once | ||
1692 | * to clear a possible false error reported for current DMA | ||
1693 | * read | ||
1694 | */ | ||
1695 | if ((ctrl->nand_version == 0x0700) || | ||
1696 | (ctrl->nand_version == 0x0701)) { | ||
1697 | if (retry) { | ||
1698 | retry = false; | ||
1699 | goto try_dmaread; | ||
1700 | } | ||
1701 | } | ||
1702 | |||
1703 | /* | ||
1704 | * Controller version 7.2 has hw encoder to detect erased page | ||
1705 | * bitflips, apply sw verification for older controllers only | ||
1706 | */ | ||
1707 | if (ctrl->nand_version < 0x0702) { | ||
1708 | err = brcmstb_nand_verify_erased_page(mtd, chip, buf, | ||
1709 | addr); | ||
1710 | /* erased page bitflips corrected */ | ||
1711 | if (err > 0) | ||
1712 | return err; | ||
1713 | } | ||
1714 | |||
1578 | dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n", | 1715 | dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n", |
1579 | (unsigned long long)err_addr); | 1716 | (unsigned long long)err_addr); |
1580 | mtd->ecc_stats.failed++; | 1717 | mtd->ecc_stats.failed++; |
@@ -1857,7 +1994,8 @@ static int brcmnand_set_cfg(struct brcmnand_host *host, | |||
1857 | return 0; | 1994 | return 0; |
1858 | } | 1995 | } |
1859 | 1996 | ||
1860 | static void brcmnand_print_cfg(char *buf, struct brcmnand_cfg *cfg) | 1997 | static void brcmnand_print_cfg(struct brcmnand_host *host, |
1998 | char *buf, struct brcmnand_cfg *cfg) | ||
1861 | { | 1999 | { |
1862 | buf += sprintf(buf, | 2000 | buf += sprintf(buf, |
1863 | "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit", | 2001 | "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit", |
@@ -1868,7 +2006,7 @@ static void brcmnand_print_cfg(char *buf, struct brcmnand_cfg *cfg) | |||
1868 | cfg->spare_area_size, cfg->device_width); | 2006 | cfg->spare_area_size, cfg->device_width); |
1869 | 2007 | ||
1870 | /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */ | 2008 | /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */ |
1871 | if (is_hamming_ecc(cfg)) | 2009 | if (is_hamming_ecc(host->ctrl, cfg)) |
1872 | sprintf(buf, ", Hamming ECC"); | 2010 | sprintf(buf, ", Hamming ECC"); |
1873 | else if (cfg->sector_size_1k) | 2011 | else if (cfg->sector_size_1k) |
1874 | sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1); | 2012 | sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1); |
@@ -1987,7 +2125,7 @@ static int brcmnand_setup_dev(struct brcmnand_host *host) | |||
1987 | 2125 | ||
1988 | brcmnand_set_ecc_enabled(host, 1); | 2126 | brcmnand_set_ecc_enabled(host, 1); |
1989 | 2127 | ||
1990 | brcmnand_print_cfg(msg, cfg); | 2128 | brcmnand_print_cfg(host, msg, cfg); |
1991 | dev_info(ctrl->dev, "detected %s\n", msg); | 2129 | dev_info(ctrl->dev, "detected %s\n", msg); |
1992 | 2130 | ||
1993 | /* Configure ACC_CONTROL */ | 2131 | /* Configure ACC_CONTROL */ |
@@ -1995,6 +2133,10 @@ static int brcmnand_setup_dev(struct brcmnand_host *host) | |||
1995 | tmp = nand_readreg(ctrl, offs); | 2133 | tmp = nand_readreg(ctrl, offs); |
1996 | tmp &= ~ACC_CONTROL_PARTIAL_PAGE; | 2134 | tmp &= ~ACC_CONTROL_PARTIAL_PAGE; |
1997 | tmp &= ~ACC_CONTROL_RD_ERASED; | 2135 | tmp &= ~ACC_CONTROL_RD_ERASED; |
2136 | |||
2137 | /* We need to turn on Read from erased paged protected by ECC */ | ||
2138 | if (ctrl->nand_version >= 0x0702) | ||
2139 | tmp |= ACC_CONTROL_RD_ERASED; | ||
1998 | tmp &= ~ACC_CONTROL_FAST_PGM_RDIN; | 2140 | tmp &= ~ACC_CONTROL_FAST_PGM_RDIN; |
1999 | if (ctrl->features & BRCMNAND_HAS_PREFETCH) { | 2141 | if (ctrl->features & BRCMNAND_HAS_PREFETCH) { |
2000 | /* | 2142 | /* |
@@ -2195,6 +2337,7 @@ static const struct of_device_id brcmnand_of_match[] = { | |||
2195 | { .compatible = "brcm,brcmnand-v6.2" }, | 2337 | { .compatible = "brcm,brcmnand-v6.2" }, |
2196 | { .compatible = "brcm,brcmnand-v7.0" }, | 2338 | { .compatible = "brcm,brcmnand-v7.0" }, |
2197 | { .compatible = "brcm,brcmnand-v7.1" }, | 2339 | { .compatible = "brcm,brcmnand-v7.1" }, |
2340 | { .compatible = "brcm,brcmnand-v7.2" }, | ||
2198 | {}, | 2341 | {}, |
2199 | }; | 2342 | }; |
2200 | MODULE_DEVICE_TABLE(of, brcmnand_of_match); | 2343 | MODULE_DEVICE_TABLE(of, brcmnand_of_match); |
diff --git a/drivers/mtd/nand/jz4780_bch.c b/drivers/mtd/nand/jz4780_bch.c index d74f4ba4a6f4..731c6051d91e 100644 --- a/drivers/mtd/nand/jz4780_bch.c +++ b/drivers/mtd/nand/jz4780_bch.c | |||
@@ -375,6 +375,6 @@ static struct platform_driver jz4780_bch_driver = { | |||
375 | module_platform_driver(jz4780_bch_driver); | 375 | module_platform_driver(jz4780_bch_driver); |
376 | 376 | ||
377 | MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>"); | 377 | MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>"); |
378 | MODULE_AUTHOR("Harvey Hunt <harvey.hunt@imgtec.com>"); | 378 | MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>"); |
379 | MODULE_DESCRIPTION("Ingenic JZ4780 BCH error correction driver"); | 379 | MODULE_DESCRIPTION("Ingenic JZ4780 BCH error correction driver"); |
380 | MODULE_LICENSE("GPL v2"); | 380 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/mtd/nand/jz4780_nand.c b/drivers/mtd/nand/jz4780_nand.c index daf3c4217f4d..175f67da25af 100644 --- a/drivers/mtd/nand/jz4780_nand.c +++ b/drivers/mtd/nand/jz4780_nand.c | |||
@@ -412,6 +412,6 @@ static struct platform_driver jz4780_nand_driver = { | |||
412 | module_platform_driver(jz4780_nand_driver); | 412 | module_platform_driver(jz4780_nand_driver); |
413 | 413 | ||
414 | MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>"); | 414 | MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>"); |
415 | MODULE_AUTHOR("Harvey Hunt <harvey.hunt@imgtec.com>"); | 415 | MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>"); |
416 | MODULE_DESCRIPTION("Ingenic JZ4780 NAND driver"); | 416 | MODULE_DESCRIPTION("Ingenic JZ4780 NAND driver"); |
417 | MODULE_LICENSE("GPL v2"); | 417 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c new file mode 100644 index 000000000000..25a4fbd4d24a --- /dev/null +++ b/drivers/mtd/nand/mtk_ecc.c | |||
@@ -0,0 +1,530 @@ | |||
1 | /* | ||
2 | * MTK ECC controller driver. | ||
3 | * Copyright (C) 2016 MediaTek Inc. | ||
4 | * Authors: Xiaolei Li <xiaolei.li@mediatek.com> | ||
5 | * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | */ | ||
16 | |||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/dma-mapping.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/clk.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/iopoll.h> | ||
23 | #include <linux/of.h> | ||
24 | #include <linux/of_platform.h> | ||
25 | #include <linux/mutex.h> | ||
26 | |||
27 | #include "mtk_ecc.h" | ||
28 | |||
29 | #define ECC_IDLE_MASK BIT(0) | ||
30 | #define ECC_IRQ_EN BIT(0) | ||
31 | #define ECC_OP_ENABLE (1) | ||
32 | #define ECC_OP_DISABLE (0) | ||
33 | |||
34 | #define ECC_ENCCON (0x00) | ||
35 | #define ECC_ENCCNFG (0x04) | ||
36 | #define ECC_CNFG_4BIT (0) | ||
37 | #define ECC_CNFG_6BIT (1) | ||
38 | #define ECC_CNFG_8BIT (2) | ||
39 | #define ECC_CNFG_10BIT (3) | ||
40 | #define ECC_CNFG_12BIT (4) | ||
41 | #define ECC_CNFG_14BIT (5) | ||
42 | #define ECC_CNFG_16BIT (6) | ||
43 | #define ECC_CNFG_18BIT (7) | ||
44 | #define ECC_CNFG_20BIT (8) | ||
45 | #define ECC_CNFG_22BIT (9) | ||
46 | #define ECC_CNFG_24BIT (0xa) | ||
47 | #define ECC_CNFG_28BIT (0xb) | ||
48 | #define ECC_CNFG_32BIT (0xc) | ||
49 | #define ECC_CNFG_36BIT (0xd) | ||
50 | #define ECC_CNFG_40BIT (0xe) | ||
51 | #define ECC_CNFG_44BIT (0xf) | ||
52 | #define ECC_CNFG_48BIT (0x10) | ||
53 | #define ECC_CNFG_52BIT (0x11) | ||
54 | #define ECC_CNFG_56BIT (0x12) | ||
55 | #define ECC_CNFG_60BIT (0x13) | ||
56 | #define ECC_MODE_SHIFT (5) | ||
57 | #define ECC_MS_SHIFT (16) | ||
58 | #define ECC_ENCDIADDR (0x08) | ||
59 | #define ECC_ENCIDLE (0x0C) | ||
60 | #define ECC_ENCPAR(x) (0x10 + (x) * sizeof(u32)) | ||
61 | #define ECC_ENCIRQ_EN (0x80) | ||
62 | #define ECC_ENCIRQ_STA (0x84) | ||
63 | #define ECC_DECCON (0x100) | ||
64 | #define ECC_DECCNFG (0x104) | ||
65 | #define DEC_EMPTY_EN BIT(31) | ||
66 | #define DEC_CNFG_CORRECT (0x3 << 12) | ||
67 | #define ECC_DECIDLE (0x10C) | ||
68 | #define ECC_DECENUM0 (0x114) | ||
69 | #define ERR_MASK (0x3f) | ||
70 | #define ECC_DECDONE (0x124) | ||
71 | #define ECC_DECIRQ_EN (0x200) | ||
72 | #define ECC_DECIRQ_STA (0x204) | ||
73 | |||
74 | #define ECC_TIMEOUT (500000) | ||
75 | |||
76 | #define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE) | ||
77 | #define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON) | ||
78 | #define ECC_IRQ_REG(op) ((op) == ECC_ENCODE ? \ | ||
79 | ECC_ENCIRQ_EN : ECC_DECIRQ_EN) | ||
80 | |||
81 | struct mtk_ecc { | ||
82 | struct device *dev; | ||
83 | void __iomem *regs; | ||
84 | struct clk *clk; | ||
85 | |||
86 | struct completion done; | ||
87 | struct mutex lock; | ||
88 | u32 sectors; | ||
89 | }; | ||
90 | |||
91 | static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc, | ||
92 | enum mtk_ecc_operation op) | ||
93 | { | ||
94 | struct device *dev = ecc->dev; | ||
95 | u32 val; | ||
96 | int ret; | ||
97 | |||
98 | ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val, | ||
99 | val & ECC_IDLE_MASK, | ||
100 | 10, ECC_TIMEOUT); | ||
101 | if (ret) | ||
102 | dev_warn(dev, "%s NOT idle\n", | ||
103 | op == ECC_ENCODE ? "encoder" : "decoder"); | ||
104 | } | ||
105 | |||
106 | static irqreturn_t mtk_ecc_irq(int irq, void *id) | ||
107 | { | ||
108 | struct mtk_ecc *ecc = id; | ||
109 | enum mtk_ecc_operation op; | ||
110 | u32 dec, enc; | ||
111 | |||
112 | dec = readw(ecc->regs + ECC_DECIRQ_STA) & ECC_IRQ_EN; | ||
113 | if (dec) { | ||
114 | op = ECC_DECODE; | ||
115 | dec = readw(ecc->regs + ECC_DECDONE); | ||
116 | if (dec & ecc->sectors) { | ||
117 | ecc->sectors = 0; | ||
118 | complete(&ecc->done); | ||
119 | } else { | ||
120 | return IRQ_HANDLED; | ||
121 | } | ||
122 | } else { | ||
123 | enc = readl(ecc->regs + ECC_ENCIRQ_STA) & ECC_IRQ_EN; | ||
124 | if (enc) { | ||
125 | op = ECC_ENCODE; | ||
126 | complete(&ecc->done); | ||
127 | } else { | ||
128 | return IRQ_NONE; | ||
129 | } | ||
130 | } | ||
131 | |||
132 | writel(0, ecc->regs + ECC_IRQ_REG(op)); | ||
133 | |||
134 | return IRQ_HANDLED; | ||
135 | } | ||
136 | |||
137 | static void mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config) | ||
138 | { | ||
139 | u32 ecc_bit = ECC_CNFG_4BIT, dec_sz, enc_sz; | ||
140 | u32 reg; | ||
141 | |||
142 | switch (config->strength) { | ||
143 | case 4: | ||
144 | ecc_bit = ECC_CNFG_4BIT; | ||
145 | break; | ||
146 | case 6: | ||
147 | ecc_bit = ECC_CNFG_6BIT; | ||
148 | break; | ||
149 | case 8: | ||
150 | ecc_bit = ECC_CNFG_8BIT; | ||
151 | break; | ||
152 | case 10: | ||
153 | ecc_bit = ECC_CNFG_10BIT; | ||
154 | break; | ||
155 | case 12: | ||
156 | ecc_bit = ECC_CNFG_12BIT; | ||
157 | break; | ||
158 | case 14: | ||
159 | ecc_bit = ECC_CNFG_14BIT; | ||
160 | break; | ||
161 | case 16: | ||
162 | ecc_bit = ECC_CNFG_16BIT; | ||
163 | break; | ||
164 | case 18: | ||
165 | ecc_bit = ECC_CNFG_18BIT; | ||
166 | break; | ||
167 | case 20: | ||
168 | ecc_bit = ECC_CNFG_20BIT; | ||
169 | break; | ||
170 | case 22: | ||
171 | ecc_bit = ECC_CNFG_22BIT; | ||
172 | break; | ||
173 | case 24: | ||
174 | ecc_bit = ECC_CNFG_24BIT; | ||
175 | break; | ||
176 | case 28: | ||
177 | ecc_bit = ECC_CNFG_28BIT; | ||
178 | break; | ||
179 | case 32: | ||
180 | ecc_bit = ECC_CNFG_32BIT; | ||
181 | break; | ||
182 | case 36: | ||
183 | ecc_bit = ECC_CNFG_36BIT; | ||
184 | break; | ||
185 | case 40: | ||
186 | ecc_bit = ECC_CNFG_40BIT; | ||
187 | break; | ||
188 | case 44: | ||
189 | ecc_bit = ECC_CNFG_44BIT; | ||
190 | break; | ||
191 | case 48: | ||
192 | ecc_bit = ECC_CNFG_48BIT; | ||
193 | break; | ||
194 | case 52: | ||
195 | ecc_bit = ECC_CNFG_52BIT; | ||
196 | break; | ||
197 | case 56: | ||
198 | ecc_bit = ECC_CNFG_56BIT; | ||
199 | break; | ||
200 | case 60: | ||
201 | ecc_bit = ECC_CNFG_60BIT; | ||
202 | break; | ||
203 | default: | ||
204 | dev_err(ecc->dev, "invalid strength %d, default to 4 bits\n", | ||
205 | config->strength); | ||
206 | } | ||
207 | |||
208 | if (config->op == ECC_ENCODE) { | ||
209 | /* configure ECC encoder (in bits) */ | ||
210 | enc_sz = config->len << 3; | ||
211 | |||
212 | reg = ecc_bit | (config->mode << ECC_MODE_SHIFT); | ||
213 | reg |= (enc_sz << ECC_MS_SHIFT); | ||
214 | writel(reg, ecc->regs + ECC_ENCCNFG); | ||
215 | |||
216 | if (config->mode != ECC_NFI_MODE) | ||
217 | writel(lower_32_bits(config->addr), | ||
218 | ecc->regs + ECC_ENCDIADDR); | ||
219 | |||
220 | } else { | ||
221 | /* configure ECC decoder (in bits) */ | ||
222 | dec_sz = (config->len << 3) + | ||
223 | config->strength * ECC_PARITY_BITS; | ||
224 | |||
225 | reg = ecc_bit | (config->mode << ECC_MODE_SHIFT); | ||
226 | reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT; | ||
227 | reg |= DEC_EMPTY_EN; | ||
228 | writel(reg, ecc->regs + ECC_DECCNFG); | ||
229 | |||
230 | if (config->sectors) | ||
231 | ecc->sectors = 1 << (config->sectors - 1); | ||
232 | } | ||
233 | } | ||
234 | |||
235 | void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats, | ||
236 | int sectors) | ||
237 | { | ||
238 | u32 offset, i, err; | ||
239 | u32 bitflips = 0; | ||
240 | |||
241 | stats->corrected = 0; | ||
242 | stats->failed = 0; | ||
243 | |||
244 | for (i = 0; i < sectors; i++) { | ||
245 | offset = (i >> 2) << 2; | ||
246 | err = readl(ecc->regs + ECC_DECENUM0 + offset); | ||
247 | err = err >> ((i % 4) * 8); | ||
248 | err &= ERR_MASK; | ||
249 | if (err == ERR_MASK) { | ||
250 | /* uncorrectable errors */ | ||
251 | stats->failed++; | ||
252 | continue; | ||
253 | } | ||
254 | |||
255 | stats->corrected += err; | ||
256 | bitflips = max_t(u32, bitflips, err); | ||
257 | } | ||
258 | |||
259 | stats->bitflips = bitflips; | ||
260 | } | ||
261 | EXPORT_SYMBOL(mtk_ecc_get_stats); | ||
262 | |||
263 | void mtk_ecc_release(struct mtk_ecc *ecc) | ||
264 | { | ||
265 | clk_disable_unprepare(ecc->clk); | ||
266 | put_device(ecc->dev); | ||
267 | } | ||
268 | EXPORT_SYMBOL(mtk_ecc_release); | ||
269 | |||
270 | static void mtk_ecc_hw_init(struct mtk_ecc *ecc) | ||
271 | { | ||
272 | mtk_ecc_wait_idle(ecc, ECC_ENCODE); | ||
273 | writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON); | ||
274 | |||
275 | mtk_ecc_wait_idle(ecc, ECC_DECODE); | ||
276 | writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON); | ||
277 | } | ||
278 | |||
279 | static struct mtk_ecc *mtk_ecc_get(struct device_node *np) | ||
280 | { | ||
281 | struct platform_device *pdev; | ||
282 | struct mtk_ecc *ecc; | ||
283 | |||
284 | pdev = of_find_device_by_node(np); | ||
285 | if (!pdev || !platform_get_drvdata(pdev)) | ||
286 | return ERR_PTR(-EPROBE_DEFER); | ||
287 | |||
288 | get_device(&pdev->dev); | ||
289 | ecc = platform_get_drvdata(pdev); | ||
290 | clk_prepare_enable(ecc->clk); | ||
291 | mtk_ecc_hw_init(ecc); | ||
292 | |||
293 | return ecc; | ||
294 | } | ||
295 | |||
296 | struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node) | ||
297 | { | ||
298 | struct mtk_ecc *ecc = NULL; | ||
299 | struct device_node *np; | ||
300 | |||
301 | np = of_parse_phandle(of_node, "ecc-engine", 0); | ||
302 | if (np) { | ||
303 | ecc = mtk_ecc_get(np); | ||
304 | of_node_put(np); | ||
305 | } | ||
306 | |||
307 | return ecc; | ||
308 | } | ||
309 | EXPORT_SYMBOL(of_mtk_ecc_get); | ||
310 | |||
311 | int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config) | ||
312 | { | ||
313 | enum mtk_ecc_operation op = config->op; | ||
314 | int ret; | ||
315 | |||
316 | ret = mutex_lock_interruptible(&ecc->lock); | ||
317 | if (ret) { | ||
318 | dev_err(ecc->dev, "interrupted when attempting to lock\n"); | ||
319 | return ret; | ||
320 | } | ||
321 | |||
322 | mtk_ecc_wait_idle(ecc, op); | ||
323 | mtk_ecc_config(ecc, config); | ||
324 | writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op)); | ||
325 | |||
326 | init_completion(&ecc->done); | ||
327 | writew(ECC_IRQ_EN, ecc->regs + ECC_IRQ_REG(op)); | ||
328 | |||
329 | return 0; | ||
330 | } | ||
331 | EXPORT_SYMBOL(mtk_ecc_enable); | ||
332 | |||
333 | void mtk_ecc_disable(struct mtk_ecc *ecc) | ||
334 | { | ||
335 | enum mtk_ecc_operation op = ECC_ENCODE; | ||
336 | |||
337 | /* find out the running operation */ | ||
338 | if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE) | ||
339 | op = ECC_DECODE; | ||
340 | |||
341 | /* disable it */ | ||
342 | mtk_ecc_wait_idle(ecc, op); | ||
343 | writew(0, ecc->regs + ECC_IRQ_REG(op)); | ||
344 | writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op)); | ||
345 | |||
346 | mutex_unlock(&ecc->lock); | ||
347 | } | ||
348 | EXPORT_SYMBOL(mtk_ecc_disable); | ||
349 | |||
350 | int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op) | ||
351 | { | ||
352 | int ret; | ||
353 | |||
354 | ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500)); | ||
355 | if (!ret) { | ||
356 | dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n", | ||
357 | (op == ECC_ENCODE) ? "encoder" : "decoder"); | ||
358 | return -ETIMEDOUT; | ||
359 | } | ||
360 | |||
361 | return 0; | ||
362 | } | ||
363 | EXPORT_SYMBOL(mtk_ecc_wait_done); | ||
364 | |||
365 | int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config, | ||
366 | u8 *data, u32 bytes) | ||
367 | { | ||
368 | dma_addr_t addr; | ||
369 | u32 *p, len, i; | ||
370 | int ret = 0; | ||
371 | |||
372 | addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE); | ||
373 | ret = dma_mapping_error(ecc->dev, addr); | ||
374 | if (ret) { | ||
375 | dev_err(ecc->dev, "dma mapping error\n"); | ||
376 | return -EINVAL; | ||
377 | } | ||
378 | |||
379 | config->op = ECC_ENCODE; | ||
380 | config->addr = addr; | ||
381 | ret = mtk_ecc_enable(ecc, config); | ||
382 | if (ret) { | ||
383 | dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); | ||
384 | return ret; | ||
385 | } | ||
386 | |||
387 | ret = mtk_ecc_wait_done(ecc, ECC_ENCODE); | ||
388 | if (ret) | ||
389 | goto timeout; | ||
390 | |||
391 | mtk_ecc_wait_idle(ecc, ECC_ENCODE); | ||
392 | |||
393 | /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */ | ||
394 | len = (config->strength * ECC_PARITY_BITS + 7) >> 3; | ||
395 | p = (u32 *)(data + bytes); | ||
396 | |||
397 | /* write the parity bytes generated by the ECC back to the OOB region */ | ||
398 | for (i = 0; i < len; i++) | ||
399 | p[i] = readl(ecc->regs + ECC_ENCPAR(i)); | ||
400 | timeout: | ||
401 | |||
402 | dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); | ||
403 | mtk_ecc_disable(ecc); | ||
404 | |||
405 | return ret; | ||
406 | } | ||
407 | EXPORT_SYMBOL(mtk_ecc_encode); | ||
408 | |||
409 | void mtk_ecc_adjust_strength(u32 *p) | ||
410 | { | ||
411 | u32 ecc[] = {4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, | ||
412 | 40, 44, 48, 52, 56, 60}; | ||
413 | int i; | ||
414 | |||
415 | for (i = 0; i < ARRAY_SIZE(ecc); i++) { | ||
416 | if (*p <= ecc[i]) { | ||
417 | if (!i) | ||
418 | *p = ecc[i]; | ||
419 | else if (*p != ecc[i]) | ||
420 | *p = ecc[i - 1]; | ||
421 | return; | ||
422 | } | ||
423 | } | ||
424 | |||
425 | *p = ecc[ARRAY_SIZE(ecc) - 1]; | ||
426 | } | ||
427 | EXPORT_SYMBOL(mtk_ecc_adjust_strength); | ||
428 | |||
429 | static int mtk_ecc_probe(struct platform_device *pdev) | ||
430 | { | ||
431 | struct device *dev = &pdev->dev; | ||
432 | struct mtk_ecc *ecc; | ||
433 | struct resource *res; | ||
434 | int irq, ret; | ||
435 | |||
436 | ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); | ||
437 | if (!ecc) | ||
438 | return -ENOMEM; | ||
439 | |||
440 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
441 | ecc->regs = devm_ioremap_resource(dev, res); | ||
442 | if (IS_ERR(ecc->regs)) { | ||
443 | dev_err(dev, "failed to map regs: %ld\n", PTR_ERR(ecc->regs)); | ||
444 | return PTR_ERR(ecc->regs); | ||
445 | } | ||
446 | |||
447 | ecc->clk = devm_clk_get(dev, NULL); | ||
448 | if (IS_ERR(ecc->clk)) { | ||
449 | dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk)); | ||
450 | return PTR_ERR(ecc->clk); | ||
451 | } | ||
452 | |||
453 | irq = platform_get_irq(pdev, 0); | ||
454 | if (irq < 0) { | ||
455 | dev_err(dev, "failed to get irq\n"); | ||
456 | return -EINVAL; | ||
457 | } | ||
458 | |||
459 | ret = dma_set_mask(dev, DMA_BIT_MASK(32)); | ||
460 | if (ret) { | ||
461 | dev_err(dev, "failed to set DMA mask\n"); | ||
462 | return ret; | ||
463 | } | ||
464 | |||
465 | ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc); | ||
466 | if (ret) { | ||
467 | dev_err(dev, "failed to request irq\n"); | ||
468 | return -EINVAL; | ||
469 | } | ||
470 | |||
471 | ecc->dev = dev; | ||
472 | mutex_init(&ecc->lock); | ||
473 | platform_set_drvdata(pdev, ecc); | ||
474 | dev_info(dev, "probed\n"); | ||
475 | |||
476 | return 0; | ||
477 | } | ||
478 | |||
479 | #ifdef CONFIG_PM_SLEEP | ||
480 | static int mtk_ecc_suspend(struct device *dev) | ||
481 | { | ||
482 | struct mtk_ecc *ecc = dev_get_drvdata(dev); | ||
483 | |||
484 | clk_disable_unprepare(ecc->clk); | ||
485 | |||
486 | return 0; | ||
487 | } | ||
488 | |||
489 | static int mtk_ecc_resume(struct device *dev) | ||
490 | { | ||
491 | struct mtk_ecc *ecc = dev_get_drvdata(dev); | ||
492 | int ret; | ||
493 | |||
494 | ret = clk_prepare_enable(ecc->clk); | ||
495 | if (ret) { | ||
496 | dev_err(dev, "failed to enable clk\n"); | ||
497 | return ret; | ||
498 | } | ||
499 | |||
500 | mtk_ecc_hw_init(ecc); | ||
501 | |||
502 | return 0; | ||
503 | } | ||
504 | |||
505 | static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume); | ||
506 | #endif | ||
507 | |||
508 | static const struct of_device_id mtk_ecc_dt_match[] = { | ||
509 | { .compatible = "mediatek,mt2701-ecc" }, | ||
510 | {}, | ||
511 | }; | ||
512 | |||
513 | MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match); | ||
514 | |||
515 | static struct platform_driver mtk_ecc_driver = { | ||
516 | .probe = mtk_ecc_probe, | ||
517 | .driver = { | ||
518 | .name = "mtk-ecc", | ||
519 | .of_match_table = of_match_ptr(mtk_ecc_dt_match), | ||
520 | #ifdef CONFIG_PM_SLEEP | ||
521 | .pm = &mtk_ecc_pm_ops, | ||
522 | #endif | ||
523 | }, | ||
524 | }; | ||
525 | |||
526 | module_platform_driver(mtk_ecc_driver); | ||
527 | |||
528 | MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>"); | ||
529 | MODULE_DESCRIPTION("MTK Nand ECC Driver"); | ||
530 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mtd/nand/mtk_ecc.h b/drivers/mtd/nand/mtk_ecc.h new file mode 100644 index 000000000000..cbeba5cd1c13 --- /dev/null +++ b/drivers/mtd/nand/mtk_ecc.h | |||
@@ -0,0 +1,50 @@ | |||
1 | /* | ||
2 | * MTK SDG1 ECC controller | ||
3 | * | ||
4 | * Copyright (c) 2016 Mediatek | ||
5 | * Authors: Xiaolei Li <xiaolei.li@mediatek.com> | ||
6 | * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org> | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published | ||
9 | * by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__ | ||
13 | #define __DRIVERS_MTD_NAND_MTK_ECC_H__ | ||
14 | |||
15 | #include <linux/types.h> | ||
16 | |||
17 | #define ECC_PARITY_BITS (14) | ||
18 | |||
19 | enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1}; | ||
20 | enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE}; | ||
21 | |||
22 | struct device_node; | ||
23 | struct mtk_ecc; | ||
24 | |||
25 | struct mtk_ecc_stats { | ||
26 | u32 corrected; | ||
27 | u32 bitflips; | ||
28 | u32 failed; | ||
29 | }; | ||
30 | |||
31 | struct mtk_ecc_config { | ||
32 | enum mtk_ecc_operation op; | ||
33 | enum mtk_ecc_mode mode; | ||
34 | dma_addr_t addr; | ||
35 | u32 strength; | ||
36 | u32 sectors; | ||
37 | u32 len; | ||
38 | }; | ||
39 | |||
40 | int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32); | ||
41 | void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int); | ||
42 | int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation); | ||
43 | int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *); | ||
44 | void mtk_ecc_disable(struct mtk_ecc *); | ||
45 | void mtk_ecc_adjust_strength(u32 *); | ||
46 | |||
47 | struct mtk_ecc *of_mtk_ecc_get(struct device_node *); | ||
48 | void mtk_ecc_release(struct mtk_ecc *); | ||
49 | |||
50 | #endif | ||
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c new file mode 100644 index 000000000000..ddaa2acb9dd7 --- /dev/null +++ b/drivers/mtd/nand/mtk_nand.c | |||
@@ -0,0 +1,1526 @@ | |||
1 | /* | ||
2 | * MTK NAND Flash controller driver. | ||
3 | * Copyright (C) 2016 MediaTek Inc. | ||
4 | * Authors: Xiaolei Li <xiaolei.li@mediatek.com> | ||
5 | * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | */ | ||
16 | |||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/dma-mapping.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/clk.h> | ||
22 | #include <linux/mtd/nand.h> | ||
23 | #include <linux/mtd/mtd.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/iopoll.h> | ||
26 | #include <linux/of.h> | ||
27 | #include "mtk_ecc.h" | ||
28 | |||
29 | /* NAND controller register definition */ | ||
30 | #define NFI_CNFG (0x00) | ||
31 | #define CNFG_AHB BIT(0) | ||
32 | #define CNFG_READ_EN BIT(1) | ||
33 | #define CNFG_DMA_BURST_EN BIT(2) | ||
34 | #define CNFG_BYTE_RW BIT(6) | ||
35 | #define CNFG_HW_ECC_EN BIT(8) | ||
36 | #define CNFG_AUTO_FMT_EN BIT(9) | ||
37 | #define CNFG_OP_CUST (6 << 12) | ||
38 | #define NFI_PAGEFMT (0x04) | ||
39 | #define PAGEFMT_FDM_ECC_SHIFT (12) | ||
40 | #define PAGEFMT_FDM_SHIFT (8) | ||
41 | #define PAGEFMT_SPARE_16 (0) | ||
42 | #define PAGEFMT_SPARE_26 (1) | ||
43 | #define PAGEFMT_SPARE_27 (2) | ||
44 | #define PAGEFMT_SPARE_28 (3) | ||
45 | #define PAGEFMT_SPARE_32 (4) | ||
46 | #define PAGEFMT_SPARE_36 (5) | ||
47 | #define PAGEFMT_SPARE_40 (6) | ||
48 | #define PAGEFMT_SPARE_44 (7) | ||
49 | #define PAGEFMT_SPARE_48 (8) | ||
50 | #define PAGEFMT_SPARE_49 (9) | ||
51 | #define PAGEFMT_SPARE_50 (0xa) | ||
52 | #define PAGEFMT_SPARE_51 (0xb) | ||
53 | #define PAGEFMT_SPARE_52 (0xc) | ||
54 | #define PAGEFMT_SPARE_62 (0xd) | ||
55 | #define PAGEFMT_SPARE_63 (0xe) | ||
56 | #define PAGEFMT_SPARE_64 (0xf) | ||
57 | #define PAGEFMT_SPARE_SHIFT (4) | ||
58 | #define PAGEFMT_SEC_SEL_512 BIT(2) | ||
59 | #define PAGEFMT_512_2K (0) | ||
60 | #define PAGEFMT_2K_4K (1) | ||
61 | #define PAGEFMT_4K_8K (2) | ||
62 | #define PAGEFMT_8K_16K (3) | ||
63 | /* NFI control */ | ||
64 | #define NFI_CON (0x08) | ||
65 | #define CON_FIFO_FLUSH BIT(0) | ||
66 | #define CON_NFI_RST BIT(1) | ||
67 | #define CON_BRD BIT(8) /* burst read */ | ||
68 | #define CON_BWR BIT(9) /* burst write */ | ||
69 | #define CON_SEC_SHIFT (12) | ||
70 | /* Timming control register */ | ||
71 | #define NFI_ACCCON (0x0C) | ||
72 | #define NFI_INTR_EN (0x10) | ||
73 | #define INTR_AHB_DONE_EN BIT(6) | ||
74 | #define NFI_INTR_STA (0x14) | ||
75 | #define NFI_CMD (0x20) | ||
76 | #define NFI_ADDRNOB (0x30) | ||
77 | #define NFI_COLADDR (0x34) | ||
78 | #define NFI_ROWADDR (0x38) | ||
79 | #define NFI_STRDATA (0x40) | ||
80 | #define STAR_EN (1) | ||
81 | #define STAR_DE (0) | ||
82 | #define NFI_CNRNB (0x44) | ||
83 | #define NFI_DATAW (0x50) | ||
84 | #define NFI_DATAR (0x54) | ||
85 | #define NFI_PIO_DIRDY (0x58) | ||
86 | #define PIO_DI_RDY (0x01) | ||
87 | #define NFI_STA (0x60) | ||
88 | #define STA_CMD BIT(0) | ||
89 | #define STA_ADDR BIT(1) | ||
90 | #define STA_BUSY BIT(8) | ||
91 | #define STA_EMP_PAGE BIT(12) | ||
92 | #define NFI_FSM_CUSTDATA (0xe << 16) | ||
93 | #define NFI_FSM_MASK (0xf << 16) | ||
94 | #define NFI_ADDRCNTR (0x70) | ||
95 | #define CNTR_MASK GENMASK(16, 12) | ||
96 | #define NFI_STRADDR (0x80) | ||
97 | #define NFI_BYTELEN (0x84) | ||
98 | #define NFI_CSEL (0x90) | ||
99 | #define NFI_FDML(x) (0xA0 + (x) * sizeof(u32) * 2) | ||
100 | #define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2) | ||
101 | #define NFI_FDM_MAX_SIZE (8) | ||
102 | #define NFI_FDM_MIN_SIZE (1) | ||
103 | #define NFI_MASTER_STA (0x224) | ||
104 | #define MASTER_STA_MASK (0x0FFF) | ||
105 | #define NFI_EMPTY_THRESH (0x23C) | ||
106 | |||
107 | #define MTK_NAME "mtk-nand" | ||
108 | #define KB(x) ((x) * 1024UL) | ||
109 | #define MB(x) (KB(x) * 1024UL) | ||
110 | |||
111 | #define MTK_TIMEOUT (500000) | ||
112 | #define MTK_RESET_TIMEOUT (1000000) | ||
113 | #define MTK_MAX_SECTOR (16) | ||
114 | #define MTK_NAND_MAX_NSELS (2) | ||
115 | |||
116 | struct mtk_nfc_bad_mark_ctl { | ||
117 | void (*bm_swap)(struct mtd_info *, u8 *buf, int raw); | ||
118 | u32 sec; | ||
119 | u32 pos; | ||
120 | }; | ||
121 | |||
122 | /* | ||
123 | * FDM: region used to store free OOB data | ||
124 | */ | ||
125 | struct mtk_nfc_fdm { | ||
126 | u32 reg_size; | ||
127 | u32 ecc_size; | ||
128 | }; | ||
129 | |||
130 | struct mtk_nfc_nand_chip { | ||
131 | struct list_head node; | ||
132 | struct nand_chip nand; | ||
133 | |||
134 | struct mtk_nfc_bad_mark_ctl bad_mark; | ||
135 | struct mtk_nfc_fdm fdm; | ||
136 | u32 spare_per_sector; | ||
137 | |||
138 | int nsels; | ||
139 | u8 sels[0]; | ||
140 | /* nothing after this field */ | ||
141 | }; | ||
142 | |||
143 | struct mtk_nfc_clk { | ||
144 | struct clk *nfi_clk; | ||
145 | struct clk *pad_clk; | ||
146 | }; | ||
147 | |||
148 | struct mtk_nfc { | ||
149 | struct nand_hw_control controller; | ||
150 | struct mtk_ecc_config ecc_cfg; | ||
151 | struct mtk_nfc_clk clk; | ||
152 | struct mtk_ecc *ecc; | ||
153 | |||
154 | struct device *dev; | ||
155 | void __iomem *regs; | ||
156 | |||
157 | struct completion done; | ||
158 | struct list_head chips; | ||
159 | |||
160 | u8 *buffer; | ||
161 | }; | ||
162 | |||
163 | static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand) | ||
164 | { | ||
165 | return container_of(nand, struct mtk_nfc_nand_chip, nand); | ||
166 | } | ||
167 | |||
168 | static inline u8 *data_ptr(struct nand_chip *chip, const u8 *p, int i) | ||
169 | { | ||
170 | return (u8 *)p + i * chip->ecc.size; | ||
171 | } | ||
172 | |||
173 | static inline u8 *oob_ptr(struct nand_chip *chip, int i) | ||
174 | { | ||
175 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
176 | u8 *poi; | ||
177 | |||
178 | /* map the sector's FDM data to free oob: | ||
179 | * the beginning of the oob area stores the FDM data of bad mark sectors | ||
180 | */ | ||
181 | |||
182 | if (i < mtk_nand->bad_mark.sec) | ||
183 | poi = chip->oob_poi + (i + 1) * mtk_nand->fdm.reg_size; | ||
184 | else if (i == mtk_nand->bad_mark.sec) | ||
185 | poi = chip->oob_poi; | ||
186 | else | ||
187 | poi = chip->oob_poi + i * mtk_nand->fdm.reg_size; | ||
188 | |||
189 | return poi; | ||
190 | } | ||
191 | |||
192 | static inline int mtk_data_len(struct nand_chip *chip) | ||
193 | { | ||
194 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
195 | |||
196 | return chip->ecc.size + mtk_nand->spare_per_sector; | ||
197 | } | ||
198 | |||
199 | static inline u8 *mtk_data_ptr(struct nand_chip *chip, int i) | ||
200 | { | ||
201 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
202 | |||
203 | return nfc->buffer + i * mtk_data_len(chip); | ||
204 | } | ||
205 | |||
206 | static inline u8 *mtk_oob_ptr(struct nand_chip *chip, int i) | ||
207 | { | ||
208 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
209 | |||
210 | return nfc->buffer + i * mtk_data_len(chip) + chip->ecc.size; | ||
211 | } | ||
212 | |||
213 | static inline void nfi_writel(struct mtk_nfc *nfc, u32 val, u32 reg) | ||
214 | { | ||
215 | writel(val, nfc->regs + reg); | ||
216 | } | ||
217 | |||
218 | static inline void nfi_writew(struct mtk_nfc *nfc, u16 val, u32 reg) | ||
219 | { | ||
220 | writew(val, nfc->regs + reg); | ||
221 | } | ||
222 | |||
223 | static inline void nfi_writeb(struct mtk_nfc *nfc, u8 val, u32 reg) | ||
224 | { | ||
225 | writeb(val, nfc->regs + reg); | ||
226 | } | ||
227 | |||
228 | static inline u32 nfi_readl(struct mtk_nfc *nfc, u32 reg) | ||
229 | { | ||
230 | return readl_relaxed(nfc->regs + reg); | ||
231 | } | ||
232 | |||
233 | static inline u16 nfi_readw(struct mtk_nfc *nfc, u32 reg) | ||
234 | { | ||
235 | return readw_relaxed(nfc->regs + reg); | ||
236 | } | ||
237 | |||
238 | static inline u8 nfi_readb(struct mtk_nfc *nfc, u32 reg) | ||
239 | { | ||
240 | return readb_relaxed(nfc->regs + reg); | ||
241 | } | ||
242 | |||
243 | static void mtk_nfc_hw_reset(struct mtk_nfc *nfc) | ||
244 | { | ||
245 | struct device *dev = nfc->dev; | ||
246 | u32 val; | ||
247 | int ret; | ||
248 | |||
249 | /* reset all registers and force the NFI master to terminate */ | ||
250 | nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON); | ||
251 | |||
252 | /* wait for the master to finish the last transaction */ | ||
253 | ret = readl_poll_timeout(nfc->regs + NFI_MASTER_STA, val, | ||
254 | !(val & MASTER_STA_MASK), 50, | ||
255 | MTK_RESET_TIMEOUT); | ||
256 | if (ret) | ||
257 | dev_warn(dev, "master active in reset [0x%x] = 0x%x\n", | ||
258 | NFI_MASTER_STA, val); | ||
259 | |||
260 | /* ensure any status register affected by the NFI master is reset */ | ||
261 | nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON); | ||
262 | nfi_writew(nfc, STAR_DE, NFI_STRDATA); | ||
263 | } | ||
264 | |||
265 | static int mtk_nfc_send_command(struct mtk_nfc *nfc, u8 command) | ||
266 | { | ||
267 | struct device *dev = nfc->dev; | ||
268 | u32 val; | ||
269 | int ret; | ||
270 | |||
271 | nfi_writel(nfc, command, NFI_CMD); | ||
272 | |||
273 | ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val, | ||
274 | !(val & STA_CMD), 10, MTK_TIMEOUT); | ||
275 | if (ret) { | ||
276 | dev_warn(dev, "nfi core timed out entering command mode\n"); | ||
277 | return -EIO; | ||
278 | } | ||
279 | |||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | static int mtk_nfc_send_address(struct mtk_nfc *nfc, int addr) | ||
284 | { | ||
285 | struct device *dev = nfc->dev; | ||
286 | u32 val; | ||
287 | int ret; | ||
288 | |||
289 | nfi_writel(nfc, addr, NFI_COLADDR); | ||
290 | nfi_writel(nfc, 0, NFI_ROWADDR); | ||
291 | nfi_writew(nfc, 1, NFI_ADDRNOB); | ||
292 | |||
293 | ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val, | ||
294 | !(val & STA_ADDR), 10, MTK_TIMEOUT); | ||
295 | if (ret) { | ||
296 | dev_warn(dev, "nfi core timed out entering address mode\n"); | ||
297 | return -EIO; | ||
298 | } | ||
299 | |||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd) | ||
304 | { | ||
305 | struct nand_chip *chip = mtd_to_nand(mtd); | ||
306 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
307 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
308 | u32 fmt, spare; | ||
309 | |||
310 | if (!mtd->writesize) | ||
311 | return 0; | ||
312 | |||
313 | spare = mtk_nand->spare_per_sector; | ||
314 | |||
315 | switch (mtd->writesize) { | ||
316 | case 512: | ||
317 | fmt = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512; | ||
318 | break; | ||
319 | case KB(2): | ||
320 | if (chip->ecc.size == 512) | ||
321 | fmt = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512; | ||
322 | else | ||
323 | fmt = PAGEFMT_512_2K; | ||
324 | break; | ||
325 | case KB(4): | ||
326 | if (chip->ecc.size == 512) | ||
327 | fmt = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512; | ||
328 | else | ||
329 | fmt = PAGEFMT_2K_4K; | ||
330 | break; | ||
331 | case KB(8): | ||
332 | if (chip->ecc.size == 512) | ||
333 | fmt = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512; | ||
334 | else | ||
335 | fmt = PAGEFMT_4K_8K; | ||
336 | break; | ||
337 | case KB(16): | ||
338 | fmt = PAGEFMT_8K_16K; | ||
339 | break; | ||
340 | default: | ||
341 | dev_err(nfc->dev, "invalid page len: %d\n", mtd->writesize); | ||
342 | return -EINVAL; | ||
343 | } | ||
344 | |||
345 | /* | ||
346 | * the hardware will double the value for this eccsize, so we need to | ||
347 | * halve it | ||
348 | */ | ||
349 | if (chip->ecc.size == 1024) | ||
350 | spare >>= 1; | ||
351 | |||
352 | switch (spare) { | ||
353 | case 16: | ||
354 | fmt |= (PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT); | ||
355 | break; | ||
356 | case 26: | ||
357 | fmt |= (PAGEFMT_SPARE_26 << PAGEFMT_SPARE_SHIFT); | ||
358 | break; | ||
359 | case 27: | ||
360 | fmt |= (PAGEFMT_SPARE_27 << PAGEFMT_SPARE_SHIFT); | ||
361 | break; | ||
362 | case 28: | ||
363 | fmt |= (PAGEFMT_SPARE_28 << PAGEFMT_SPARE_SHIFT); | ||
364 | break; | ||
365 | case 32: | ||
366 | fmt |= (PAGEFMT_SPARE_32 << PAGEFMT_SPARE_SHIFT); | ||
367 | break; | ||
368 | case 36: | ||
369 | fmt |= (PAGEFMT_SPARE_36 << PAGEFMT_SPARE_SHIFT); | ||
370 | break; | ||
371 | case 40: | ||
372 | fmt |= (PAGEFMT_SPARE_40 << PAGEFMT_SPARE_SHIFT); | ||
373 | break; | ||
374 | case 44: | ||
375 | fmt |= (PAGEFMT_SPARE_44 << PAGEFMT_SPARE_SHIFT); | ||
376 | break; | ||
377 | case 48: | ||
378 | fmt |= (PAGEFMT_SPARE_48 << PAGEFMT_SPARE_SHIFT); | ||
379 | break; | ||
380 | case 49: | ||
381 | fmt |= (PAGEFMT_SPARE_49 << PAGEFMT_SPARE_SHIFT); | ||
382 | break; | ||
383 | case 50: | ||
384 | fmt |= (PAGEFMT_SPARE_50 << PAGEFMT_SPARE_SHIFT); | ||
385 | break; | ||
386 | case 51: | ||
387 | fmt |= (PAGEFMT_SPARE_51 << PAGEFMT_SPARE_SHIFT); | ||
388 | break; | ||
389 | case 52: | ||
390 | fmt |= (PAGEFMT_SPARE_52 << PAGEFMT_SPARE_SHIFT); | ||
391 | break; | ||
392 | case 62: | ||
393 | fmt |= (PAGEFMT_SPARE_62 << PAGEFMT_SPARE_SHIFT); | ||
394 | break; | ||
395 | case 63: | ||
396 | fmt |= (PAGEFMT_SPARE_63 << PAGEFMT_SPARE_SHIFT); | ||
397 | break; | ||
398 | case 64: | ||
399 | fmt |= (PAGEFMT_SPARE_64 << PAGEFMT_SPARE_SHIFT); | ||
400 | break; | ||
401 | default: | ||
402 | dev_err(nfc->dev, "invalid spare per sector %d\n", spare); | ||
403 | return -EINVAL; | ||
404 | } | ||
405 | |||
406 | fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT; | ||
407 | fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT; | ||
408 | nfi_writew(nfc, fmt, NFI_PAGEFMT); | ||
409 | |||
410 | nfc->ecc_cfg.strength = chip->ecc.strength; | ||
411 | nfc->ecc_cfg.len = chip->ecc.size + mtk_nand->fdm.ecc_size; | ||
412 | |||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | static void mtk_nfc_select_chip(struct mtd_info *mtd, int chip) | ||
417 | { | ||
418 | struct nand_chip *nand = mtd_to_nand(mtd); | ||
419 | struct mtk_nfc *nfc = nand_get_controller_data(nand); | ||
420 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand); | ||
421 | |||
422 | if (chip < 0) | ||
423 | return; | ||
424 | |||
425 | mtk_nfc_hw_runtime_config(mtd); | ||
426 | |||
427 | nfi_writel(nfc, mtk_nand->sels[chip], NFI_CSEL); | ||
428 | } | ||
429 | |||
430 | static int mtk_nfc_dev_ready(struct mtd_info *mtd) | ||
431 | { | ||
432 | struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd)); | ||
433 | |||
434 | if (nfi_readl(nfc, NFI_STA) & STA_BUSY) | ||
435 | return 0; | ||
436 | |||
437 | return 1; | ||
438 | } | ||
439 | |||
440 | static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) | ||
441 | { | ||
442 | struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd)); | ||
443 | |||
444 | if (ctrl & NAND_ALE) { | ||
445 | mtk_nfc_send_address(nfc, dat); | ||
446 | } else if (ctrl & NAND_CLE) { | ||
447 | mtk_nfc_hw_reset(nfc); | ||
448 | |||
449 | nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG); | ||
450 | mtk_nfc_send_command(nfc, dat); | ||
451 | } | ||
452 | } | ||
453 | |||
454 | static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc) | ||
455 | { | ||
456 | int rc; | ||
457 | u8 val; | ||
458 | |||
459 | rc = readb_poll_timeout_atomic(nfc->regs + NFI_PIO_DIRDY, val, | ||
460 | val & PIO_DI_RDY, 10, MTK_TIMEOUT); | ||
461 | if (rc < 0) | ||
462 | dev_err(nfc->dev, "data not ready\n"); | ||
463 | } | ||
464 | |||
465 | static inline u8 mtk_nfc_read_byte(struct mtd_info *mtd) | ||
466 | { | ||
467 | struct nand_chip *chip = mtd_to_nand(mtd); | ||
468 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
469 | u32 reg; | ||
470 | |||
471 | /* after each byte read, the NFI_STA reg is reset by the hardware */ | ||
472 | reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK; | ||
473 | if (reg != NFI_FSM_CUSTDATA) { | ||
474 | reg = nfi_readw(nfc, NFI_CNFG); | ||
475 | reg |= CNFG_BYTE_RW | CNFG_READ_EN; | ||
476 | nfi_writew(nfc, reg, NFI_CNFG); | ||
477 | |||
478 | /* | ||
479 | * set to max sector to allow the HW to continue reading over | ||
480 | * unaligned accesses | ||
481 | */ | ||
482 | reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD; | ||
483 | nfi_writel(nfc, reg, NFI_CON); | ||
484 | |||
485 | /* trigger to fetch data */ | ||
486 | nfi_writew(nfc, STAR_EN, NFI_STRDATA); | ||
487 | } | ||
488 | |||
489 | mtk_nfc_wait_ioready(nfc); | ||
490 | |||
491 | return nfi_readb(nfc, NFI_DATAR); | ||
492 | } | ||
493 | |||
494 | static void mtk_nfc_read_buf(struct mtd_info *mtd, u8 *buf, int len) | ||
495 | { | ||
496 | int i; | ||
497 | |||
498 | for (i = 0; i < len; i++) | ||
499 | buf[i] = mtk_nfc_read_byte(mtd); | ||
500 | } | ||
501 | |||
502 | static void mtk_nfc_write_byte(struct mtd_info *mtd, u8 byte) | ||
503 | { | ||
504 | struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd)); | ||
505 | u32 reg; | ||
506 | |||
507 | reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK; | ||
508 | |||
509 | if (reg != NFI_FSM_CUSTDATA) { | ||
510 | reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW; | ||
511 | nfi_writew(nfc, reg, NFI_CNFG); | ||
512 | |||
513 | reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR; | ||
514 | nfi_writel(nfc, reg, NFI_CON); | ||
515 | |||
516 | nfi_writew(nfc, STAR_EN, NFI_STRDATA); | ||
517 | } | ||
518 | |||
519 | mtk_nfc_wait_ioready(nfc); | ||
520 | nfi_writeb(nfc, byte, NFI_DATAW); | ||
521 | } | ||
522 | |||
523 | static void mtk_nfc_write_buf(struct mtd_info *mtd, const u8 *buf, int len) | ||
524 | { | ||
525 | int i; | ||
526 | |||
527 | for (i = 0; i < len; i++) | ||
528 | mtk_nfc_write_byte(mtd, buf[i]); | ||
529 | } | ||
530 | |||
531 | static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data) | ||
532 | { | ||
533 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
534 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
535 | int size = chip->ecc.size + mtk_nand->fdm.reg_size; | ||
536 | |||
537 | nfc->ecc_cfg.mode = ECC_DMA_MODE; | ||
538 | nfc->ecc_cfg.op = ECC_ENCODE; | ||
539 | |||
540 | return mtk_ecc_encode(nfc->ecc, &nfc->ecc_cfg, data, size); | ||
541 | } | ||
542 | |||
543 | static void mtk_nfc_no_bad_mark_swap(struct mtd_info *a, u8 *b, int c) | ||
544 | { | ||
545 | /* nop */ | ||
546 | } | ||
547 | |||
548 | static void mtk_nfc_bad_mark_swap(struct mtd_info *mtd, u8 *buf, int raw) | ||
549 | { | ||
550 | struct nand_chip *chip = mtd_to_nand(mtd); | ||
551 | struct mtk_nfc_nand_chip *nand = to_mtk_nand(chip); | ||
552 | u32 bad_pos = nand->bad_mark.pos; | ||
553 | |||
554 | if (raw) | ||
555 | bad_pos += nand->bad_mark.sec * mtk_data_len(chip); | ||
556 | else | ||
557 | bad_pos += nand->bad_mark.sec * chip->ecc.size; | ||
558 | |||
559 | swap(chip->oob_poi[0], buf[bad_pos]); | ||
560 | } | ||
561 | |||
562 | static int mtk_nfc_format_subpage(struct mtd_info *mtd, u32 offset, | ||
563 | u32 len, const u8 *buf) | ||
564 | { | ||
565 | struct nand_chip *chip = mtd_to_nand(mtd); | ||
566 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
567 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
568 | struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; | ||
569 | u32 start, end; | ||
570 | int i, ret; | ||
571 | |||
572 | start = offset / chip->ecc.size; | ||
573 | end = DIV_ROUND_UP(offset + len, chip->ecc.size); | ||
574 | |||
575 | memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize); | ||
576 | for (i = 0; i < chip->ecc.steps; i++) { | ||
577 | memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i), | ||
578 | chip->ecc.size); | ||
579 | |||
580 | if (start > i || i >= end) | ||
581 | continue; | ||
582 | |||
583 | if (i == mtk_nand->bad_mark.sec) | ||
584 | mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1); | ||
585 | |||
586 | memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size); | ||
587 | |||
588 | /* program the CRC back to the OOB */ | ||
589 | ret = mtk_nfc_sector_encode(chip, mtk_data_ptr(chip, i)); | ||
590 | if (ret < 0) | ||
591 | return ret; | ||
592 | } | ||
593 | |||
594 | return 0; | ||
595 | } | ||
596 | |||
597 | static void mtk_nfc_format_page(struct mtd_info *mtd, const u8 *buf) | ||
598 | { | ||
599 | struct nand_chip *chip = mtd_to_nand(mtd); | ||
600 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
601 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
602 | struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; | ||
603 | u32 i; | ||
604 | |||
605 | memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize); | ||
606 | for (i = 0; i < chip->ecc.steps; i++) { | ||
607 | if (buf) | ||
608 | memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i), | ||
609 | chip->ecc.size); | ||
610 | |||
611 | if (i == mtk_nand->bad_mark.sec) | ||
612 | mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1); | ||
613 | |||
614 | memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size); | ||
615 | } | ||
616 | } | ||
617 | |||
618 | static inline void mtk_nfc_read_fdm(struct nand_chip *chip, u32 start, | ||
619 | u32 sectors) | ||
620 | { | ||
621 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
622 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
623 | struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; | ||
624 | u32 vall, valm; | ||
625 | u8 *oobptr; | ||
626 | int i, j; | ||
627 | |||
628 | for (i = 0; i < sectors; i++) { | ||
629 | oobptr = oob_ptr(chip, start + i); | ||
630 | vall = nfi_readl(nfc, NFI_FDML(i)); | ||
631 | valm = nfi_readl(nfc, NFI_FDMM(i)); | ||
632 | |||
633 | for (j = 0; j < fdm->reg_size; j++) | ||
634 | oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8); | ||
635 | } | ||
636 | } | ||
637 | |||
638 | static inline void mtk_nfc_write_fdm(struct nand_chip *chip) | ||
639 | { | ||
640 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
641 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
642 | struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; | ||
643 | u32 vall, valm; | ||
644 | u8 *oobptr; | ||
645 | int i, j; | ||
646 | |||
647 | for (i = 0; i < chip->ecc.steps; i++) { | ||
648 | oobptr = oob_ptr(chip, i); | ||
649 | vall = 0; | ||
650 | valm = 0; | ||
651 | for (j = 0; j < 8; j++) { | ||
652 | if (j < 4) | ||
653 | vall |= (j < fdm->reg_size ? oobptr[j] : 0xff) | ||
654 | << (j * 8); | ||
655 | else | ||
656 | valm |= (j < fdm->reg_size ? oobptr[j] : 0xff) | ||
657 | << ((j - 4) * 8); | ||
658 | } | ||
659 | nfi_writel(nfc, vall, NFI_FDML(i)); | ||
660 | nfi_writel(nfc, valm, NFI_FDMM(i)); | ||
661 | } | ||
662 | } | ||
663 | |||
664 | static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip, | ||
665 | const u8 *buf, int page, int len) | ||
666 | { | ||
667 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
668 | struct device *dev = nfc->dev; | ||
669 | dma_addr_t addr; | ||
670 | u32 reg; | ||
671 | int ret; | ||
672 | |||
673 | addr = dma_map_single(dev, (void *)buf, len, DMA_TO_DEVICE); | ||
674 | ret = dma_mapping_error(nfc->dev, addr); | ||
675 | if (ret) { | ||
676 | dev_err(nfc->dev, "dma mapping error\n"); | ||
677 | return -EINVAL; | ||
678 | } | ||
679 | |||
680 | reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AHB | CNFG_DMA_BURST_EN; | ||
681 | nfi_writew(nfc, reg, NFI_CNFG); | ||
682 | |||
683 | nfi_writel(nfc, chip->ecc.steps << CON_SEC_SHIFT, NFI_CON); | ||
684 | nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR); | ||
685 | nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN); | ||
686 | |||
687 | init_completion(&nfc->done); | ||
688 | |||
689 | reg = nfi_readl(nfc, NFI_CON) | CON_BWR; | ||
690 | nfi_writel(nfc, reg, NFI_CON); | ||
691 | nfi_writew(nfc, STAR_EN, NFI_STRDATA); | ||
692 | |||
693 | ret = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500)); | ||
694 | if (!ret) { | ||
695 | dev_err(dev, "program ahb done timeout\n"); | ||
696 | nfi_writew(nfc, 0, NFI_INTR_EN); | ||
697 | ret = -ETIMEDOUT; | ||
698 | goto timeout; | ||
699 | } | ||
700 | |||
701 | ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg, | ||
702 | (reg & CNTR_MASK) >= chip->ecc.steps, | ||
703 | 10, MTK_TIMEOUT); | ||
704 | if (ret) | ||
705 | dev_err(dev, "hwecc write timeout\n"); | ||
706 | |||
707 | timeout: | ||
708 | |||
709 | dma_unmap_single(nfc->dev, addr, len, DMA_TO_DEVICE); | ||
710 | nfi_writel(nfc, 0, NFI_CON); | ||
711 | |||
712 | return ret; | ||
713 | } | ||
714 | |||
715 | static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip, | ||
716 | const u8 *buf, int page, int raw) | ||
717 | { | ||
718 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
719 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
720 | size_t len; | ||
721 | const u8 *bufpoi; | ||
722 | u32 reg; | ||
723 | int ret; | ||
724 | |||
725 | if (!raw) { | ||
726 | /* OOB => FDM: from register, ECC: from HW */ | ||
727 | reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN; | ||
728 | nfi_writew(nfc, reg | CNFG_HW_ECC_EN, NFI_CNFG); | ||
729 | |||
730 | nfc->ecc_cfg.op = ECC_ENCODE; | ||
731 | nfc->ecc_cfg.mode = ECC_NFI_MODE; | ||
732 | ret = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg); | ||
733 | if (ret) { | ||
734 | /* clear NFI config */ | ||
735 | reg = nfi_readw(nfc, NFI_CNFG); | ||
736 | reg &= ~(CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN); | ||
737 | nfi_writew(nfc, reg, NFI_CNFG); | ||
738 | |||
739 | return ret; | ||
740 | } | ||
741 | |||
742 | memcpy(nfc->buffer, buf, mtd->writesize); | ||
743 | mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, raw); | ||
744 | bufpoi = nfc->buffer; | ||
745 | |||
746 | /* write OOB into the FDM registers (OOB area in MTK NAND) */ | ||
747 | mtk_nfc_write_fdm(chip); | ||
748 | } else { | ||
749 | bufpoi = buf; | ||
750 | } | ||
751 | |||
752 | len = mtd->writesize + (raw ? mtd->oobsize : 0); | ||
753 | ret = mtk_nfc_do_write_page(mtd, chip, bufpoi, page, len); | ||
754 | |||
755 | if (!raw) | ||
756 | mtk_ecc_disable(nfc->ecc); | ||
757 | |||
758 | return ret; | ||
759 | } | ||
760 | |||
761 | static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd, | ||
762 | struct nand_chip *chip, const u8 *buf, | ||
763 | int oob_on, int page) | ||
764 | { | ||
765 | return mtk_nfc_write_page(mtd, chip, buf, page, 0); | ||
766 | } | ||
767 | |||
768 | static int mtk_nfc_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, | ||
769 | const u8 *buf, int oob_on, int pg) | ||
770 | { | ||
771 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
772 | |||
773 | mtk_nfc_format_page(mtd, buf); | ||
774 | return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1); | ||
775 | } | ||
776 | |||
777 | static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd, | ||
778 | struct nand_chip *chip, u32 offset, | ||
779 | u32 data_len, const u8 *buf, | ||
780 | int oob_on, int page) | ||
781 | { | ||
782 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
783 | int ret; | ||
784 | |||
785 | ret = mtk_nfc_format_subpage(mtd, offset, data_len, buf); | ||
786 | if (ret < 0) | ||
787 | return ret; | ||
788 | |||
789 | /* use the data in the private buffer (now with FDM and CRC) */ | ||
790 | return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1); | ||
791 | } | ||
792 | |||
793 | static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, | ||
794 | int page) | ||
795 | { | ||
796 | int ret; | ||
797 | |||
798 | chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); | ||
799 | |||
800 | ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page); | ||
801 | if (ret < 0) | ||
802 | return -EIO; | ||
803 | |||
804 | chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); | ||
805 | ret = chip->waitfunc(mtd, chip); | ||
806 | |||
807 | return ret & NAND_STATUS_FAIL ? -EIO : 0; | ||
808 | } | ||
809 | |||
810 | static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors) | ||
811 | { | ||
812 | struct nand_chip *chip = mtd_to_nand(mtd); | ||
813 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
814 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
815 | struct mtk_ecc_stats stats; | ||
816 | int rc, i; | ||
817 | |||
818 | rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE; | ||
819 | if (rc) { | ||
820 | memset(buf, 0xff, sectors * chip->ecc.size); | ||
821 | for (i = 0; i < sectors; i++) | ||
822 | memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size); | ||
823 | return 0; | ||
824 | } | ||
825 | |||
826 | mtk_ecc_get_stats(nfc->ecc, &stats, sectors); | ||
827 | mtd->ecc_stats.corrected += stats.corrected; | ||
828 | mtd->ecc_stats.failed += stats.failed; | ||
829 | |||
830 | return stats.bitflips; | ||
831 | } | ||
832 | |||
833 | static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, | ||
834 | u32 data_offs, u32 readlen, | ||
835 | u8 *bufpoi, int page, int raw) | ||
836 | { | ||
837 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
838 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
839 | u32 spare = mtk_nand->spare_per_sector; | ||
840 | u32 column, sectors, start, end, reg; | ||
841 | dma_addr_t addr; | ||
842 | int bitflips; | ||
843 | size_t len; | ||
844 | u8 *buf; | ||
845 | int rc; | ||
846 | |||
847 | start = data_offs / chip->ecc.size; | ||
848 | end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size); | ||
849 | |||
850 | sectors = end - start; | ||
851 | column = start * (chip->ecc.size + spare); | ||
852 | |||
853 | len = sectors * chip->ecc.size + (raw ? sectors * spare : 0); | ||
854 | buf = bufpoi + start * chip->ecc.size; | ||
855 | |||
856 | if (column != 0) | ||
857 | chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1); | ||
858 | |||
859 | addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE); | ||
860 | rc = dma_mapping_error(nfc->dev, addr); | ||
861 | if (rc) { | ||
862 | dev_err(nfc->dev, "dma mapping error\n"); | ||
863 | |||
864 | return -EINVAL; | ||
865 | } | ||
866 | |||
867 | reg = nfi_readw(nfc, NFI_CNFG); | ||
868 | reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_AHB; | ||
869 | if (!raw) { | ||
870 | reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN; | ||
871 | nfi_writew(nfc, reg, NFI_CNFG); | ||
872 | |||
873 | nfc->ecc_cfg.mode = ECC_NFI_MODE; | ||
874 | nfc->ecc_cfg.sectors = sectors; | ||
875 | nfc->ecc_cfg.op = ECC_DECODE; | ||
876 | rc = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg); | ||
877 | if (rc) { | ||
878 | dev_err(nfc->dev, "ecc enable\n"); | ||
879 | /* clear NFI_CNFG */ | ||
880 | reg &= ~(CNFG_DMA_BURST_EN | CNFG_AHB | CNFG_READ_EN | | ||
881 | CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN); | ||
882 | nfi_writew(nfc, reg, NFI_CNFG); | ||
883 | dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE); | ||
884 | |||
885 | return rc; | ||
886 | } | ||
887 | } else { | ||
888 | nfi_writew(nfc, reg, NFI_CNFG); | ||
889 | } | ||
890 | |||
891 | nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON); | ||
892 | nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN); | ||
893 | nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR); | ||
894 | |||
895 | init_completion(&nfc->done); | ||
896 | reg = nfi_readl(nfc, NFI_CON) | CON_BRD; | ||
897 | nfi_writel(nfc, reg, NFI_CON); | ||
898 | nfi_writew(nfc, STAR_EN, NFI_STRDATA); | ||
899 | |||
900 | rc = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500)); | ||
901 | if (!rc) | ||
902 | dev_warn(nfc->dev, "read ahb/dma done timeout\n"); | ||
903 | |||
904 | rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg, | ||
905 | (reg & CNTR_MASK) >= sectors, 10, | ||
906 | MTK_TIMEOUT); | ||
907 | if (rc < 0) { | ||
908 | dev_err(nfc->dev, "subpage done timeout\n"); | ||
909 | bitflips = -EIO; | ||
910 | } else { | ||
911 | bitflips = 0; | ||
912 | if (!raw) { | ||
913 | rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE); | ||
914 | bitflips = rc < 0 ? -ETIMEDOUT : | ||
915 | mtk_nfc_update_ecc_stats(mtd, buf, sectors); | ||
916 | mtk_nfc_read_fdm(chip, start, sectors); | ||
917 | } | ||
918 | } | ||
919 | |||
920 | dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE); | ||
921 | |||
922 | if (raw) | ||
923 | goto done; | ||
924 | |||
925 | mtk_ecc_disable(nfc->ecc); | ||
926 | |||
927 | if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec) | ||
928 | mtk_nand->bad_mark.bm_swap(mtd, bufpoi, raw); | ||
929 | done: | ||
930 | nfi_writel(nfc, 0, NFI_CON); | ||
931 | |||
932 | return bitflips; | ||
933 | } | ||
934 | |||
935 | static int mtk_nfc_read_subpage_hwecc(struct mtd_info *mtd, | ||
936 | struct nand_chip *chip, u32 off, | ||
937 | u32 len, u8 *p, int pg) | ||
938 | { | ||
939 | return mtk_nfc_read_subpage(mtd, chip, off, len, p, pg, 0); | ||
940 | } | ||
941 | |||
942 | static int mtk_nfc_read_page_hwecc(struct mtd_info *mtd, | ||
943 | struct nand_chip *chip, u8 *p, | ||
944 | int oob_on, int pg) | ||
945 | { | ||
946 | return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0); | ||
947 | } | ||
948 | |||
949 | static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, | ||
950 | u8 *buf, int oob_on, int page) | ||
951 | { | ||
952 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
953 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
954 | struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; | ||
955 | int i, ret; | ||
956 | |||
957 | memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize); | ||
958 | ret = mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, nfc->buffer, | ||
959 | page, 1); | ||
960 | if (ret < 0) | ||
961 | return ret; | ||
962 | |||
963 | for (i = 0; i < chip->ecc.steps; i++) { | ||
964 | memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size); | ||
965 | |||
966 | if (i == mtk_nand->bad_mark.sec) | ||
967 | mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1); | ||
968 | |||
969 | if (buf) | ||
970 | memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i), | ||
971 | chip->ecc.size); | ||
972 | } | ||
973 | |||
974 | return ret; | ||
975 | } | ||
976 | |||
977 | static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, | ||
978 | int page) | ||
979 | { | ||
980 | chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); | ||
981 | |||
982 | return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page); | ||
983 | } | ||
984 | |||
985 | static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc) | ||
986 | { | ||
987 | /* | ||
988 | * ACCON: access timing control register | ||
989 | * ------------------------------------- | ||
990 | * 31:28: minimum required time for CS post pulling down after accessing | ||
991 | * the device | ||
992 | * 27:22: minimum required time for CS pre pulling down before accessing | ||
993 | * the device | ||
994 | * 21:16: minimum required time from NCEB low to NREB low | ||
995 | * 15:12: minimum required time from NWEB high to NREB low. | ||
996 | * 11:08: write enable hold time | ||
997 | * 07:04: write wait states | ||
998 | * 03:00: read wait states | ||
999 | */ | ||
1000 | nfi_writel(nfc, 0x10804211, NFI_ACCCON); | ||
1001 | |||
1002 | /* | ||
1003 | * CNRNB: nand ready/busy register | ||
1004 | * ------------------------------- | ||
1005 | * 7:4: timeout register for polling the NAND busy/ready signal | ||
1006 | * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles. | ||
1007 | */ | ||
1008 | nfi_writew(nfc, 0xf1, NFI_CNRNB); | ||
1009 | nfi_writew(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT); | ||
1010 | |||
1011 | mtk_nfc_hw_reset(nfc); | ||
1012 | |||
1013 | nfi_readl(nfc, NFI_INTR_STA); | ||
1014 | nfi_writel(nfc, 0, NFI_INTR_EN); | ||
1015 | } | ||
1016 | |||
1017 | static irqreturn_t mtk_nfc_irq(int irq, void *id) | ||
1018 | { | ||
1019 | struct mtk_nfc *nfc = id; | ||
1020 | u16 sta, ien; | ||
1021 | |||
1022 | sta = nfi_readw(nfc, NFI_INTR_STA); | ||
1023 | ien = nfi_readw(nfc, NFI_INTR_EN); | ||
1024 | |||
1025 | if (!(sta & ien)) | ||
1026 | return IRQ_NONE; | ||
1027 | |||
1028 | nfi_writew(nfc, ~sta & ien, NFI_INTR_EN); | ||
1029 | complete(&nfc->done); | ||
1030 | |||
1031 | return IRQ_HANDLED; | ||
1032 | } | ||
1033 | |||
1034 | static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk) | ||
1035 | { | ||
1036 | int ret; | ||
1037 | |||
1038 | ret = clk_prepare_enable(clk->nfi_clk); | ||
1039 | if (ret) { | ||
1040 | dev_err(dev, "failed to enable nfi clk\n"); | ||
1041 | return ret; | ||
1042 | } | ||
1043 | |||
1044 | ret = clk_prepare_enable(clk->pad_clk); | ||
1045 | if (ret) { | ||
1046 | dev_err(dev, "failed to enable pad clk\n"); | ||
1047 | clk_disable_unprepare(clk->nfi_clk); | ||
1048 | return ret; | ||
1049 | } | ||
1050 | |||
1051 | return 0; | ||
1052 | } | ||
1053 | |||
1054 | static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk) | ||
1055 | { | ||
1056 | clk_disable_unprepare(clk->nfi_clk); | ||
1057 | clk_disable_unprepare(clk->pad_clk); | ||
1058 | } | ||
1059 | |||
1060 | static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section, | ||
1061 | struct mtd_oob_region *oob_region) | ||
1062 | { | ||
1063 | struct nand_chip *chip = mtd_to_nand(mtd); | ||
1064 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
1065 | struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; | ||
1066 | u32 eccsteps; | ||
1067 | |||
1068 | eccsteps = mtd->writesize / chip->ecc.size; | ||
1069 | |||
1070 | if (section >= eccsteps) | ||
1071 | return -ERANGE; | ||
1072 | |||
1073 | oob_region->length = fdm->reg_size - fdm->ecc_size; | ||
1074 | oob_region->offset = section * fdm->reg_size + fdm->ecc_size; | ||
1075 | |||
1076 | return 0; | ||
1077 | } | ||
1078 | |||
1079 | static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section, | ||
1080 | struct mtd_oob_region *oob_region) | ||
1081 | { | ||
1082 | struct nand_chip *chip = mtd_to_nand(mtd); | ||
1083 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
1084 | u32 eccsteps; | ||
1085 | |||
1086 | if (section) | ||
1087 | return -ERANGE; | ||
1088 | |||
1089 | eccsteps = mtd->writesize / chip->ecc.size; | ||
1090 | oob_region->offset = mtk_nand->fdm.reg_size * eccsteps; | ||
1091 | oob_region->length = mtd->oobsize - oob_region->offset; | ||
1092 | |||
1093 | return 0; | ||
1094 | } | ||
1095 | |||
1096 | static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = { | ||
1097 | .free = mtk_nfc_ooblayout_free, | ||
1098 | .ecc = mtk_nfc_ooblayout_ecc, | ||
1099 | }; | ||
1100 | |||
1101 | static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd) | ||
1102 | { | ||
1103 | struct nand_chip *nand = mtd_to_nand(mtd); | ||
1104 | struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand); | ||
1105 | u32 ecc_bytes; | ||
1106 | |||
1107 | ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8); | ||
1108 | |||
1109 | fdm->reg_size = chip->spare_per_sector - ecc_bytes; | ||
1110 | if (fdm->reg_size > NFI_FDM_MAX_SIZE) | ||
1111 | fdm->reg_size = NFI_FDM_MAX_SIZE; | ||
1112 | |||
1113 | /* bad block mark storage */ | ||
1114 | fdm->ecc_size = 1; | ||
1115 | } | ||
1116 | |||
1117 | static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl, | ||
1118 | struct mtd_info *mtd) | ||
1119 | { | ||
1120 | struct nand_chip *nand = mtd_to_nand(mtd); | ||
1121 | |||
1122 | if (mtd->writesize == 512) { | ||
1123 | bm_ctl->bm_swap = mtk_nfc_no_bad_mark_swap; | ||
1124 | } else { | ||
1125 | bm_ctl->bm_swap = mtk_nfc_bad_mark_swap; | ||
1126 | bm_ctl->sec = mtd->writesize / mtk_data_len(nand); | ||
1127 | bm_ctl->pos = mtd->writesize % mtk_data_len(nand); | ||
1128 | } | ||
1129 | } | ||
1130 | |||
1131 | static void mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd) | ||
1132 | { | ||
1133 | struct nand_chip *nand = mtd_to_nand(mtd); | ||
1134 | u32 spare[] = {16, 26, 27, 28, 32, 36, 40, 44, | ||
1135 | 48, 49, 50, 51, 52, 62, 63, 64}; | ||
1136 | u32 eccsteps, i; | ||
1137 | |||
1138 | eccsteps = mtd->writesize / nand->ecc.size; | ||
1139 | *sps = mtd->oobsize / eccsteps; | ||
1140 | |||
1141 | if (nand->ecc.size == 1024) | ||
1142 | *sps >>= 1; | ||
1143 | |||
1144 | for (i = 0; i < ARRAY_SIZE(spare); i++) { | ||
1145 | if (*sps <= spare[i]) { | ||
1146 | if (!i) | ||
1147 | *sps = spare[i]; | ||
1148 | else if (*sps != spare[i]) | ||
1149 | *sps = spare[i - 1]; | ||
1150 | break; | ||
1151 | } | ||
1152 | } | ||
1153 | |||
1154 | if (i >= ARRAY_SIZE(spare)) | ||
1155 | *sps = spare[ARRAY_SIZE(spare) - 1]; | ||
1156 | |||
1157 | if (nand->ecc.size == 1024) | ||
1158 | *sps <<= 1; | ||
1159 | } | ||
1160 | |||
1161 | static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd) | ||
1162 | { | ||
1163 | struct nand_chip *nand = mtd_to_nand(mtd); | ||
1164 | u32 spare; | ||
1165 | int free; | ||
1166 | |||
1167 | /* support only ecc hw mode */ | ||
1168 | if (nand->ecc.mode != NAND_ECC_HW) { | ||
1169 | dev_err(dev, "ecc.mode not supported\n"); | ||
1170 | return -EINVAL; | ||
1171 | } | ||
1172 | |||
1173 | /* if optional dt settings not present */ | ||
1174 | if (!nand->ecc.size || !nand->ecc.strength) { | ||
1175 | /* use datasheet requirements */ | ||
1176 | nand->ecc.strength = nand->ecc_strength_ds; | ||
1177 | nand->ecc.size = nand->ecc_step_ds; | ||
1178 | |||
1179 | /* | ||
1180 | * align eccstrength and eccsize | ||
1181 | * this controller only supports 512 and 1024 sizes | ||
1182 | */ | ||
1183 | if (nand->ecc.size < 1024) { | ||
1184 | if (mtd->writesize > 512) { | ||
1185 | nand->ecc.size = 1024; | ||
1186 | nand->ecc.strength <<= 1; | ||
1187 | } else { | ||
1188 | nand->ecc.size = 512; | ||
1189 | } | ||
1190 | } else { | ||
1191 | nand->ecc.size = 1024; | ||
1192 | } | ||
1193 | |||
1194 | mtk_nfc_set_spare_per_sector(&spare, mtd); | ||
1195 | |||
1196 | /* calculate oob bytes except ecc parity data */ | ||
1197 | free = ((nand->ecc.strength * ECC_PARITY_BITS) + 7) >> 3; | ||
1198 | free = spare - free; | ||
1199 | |||
1200 | /* | ||
1201 | * enhance ecc strength if oob left is bigger than max FDM size | ||
1202 | * or reduce ecc strength if oob size is not enough for ecc | ||
1203 | * parity data. | ||
1204 | */ | ||
1205 | if (free > NFI_FDM_MAX_SIZE) { | ||
1206 | spare -= NFI_FDM_MAX_SIZE; | ||
1207 | nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS; | ||
1208 | } else if (free < 0) { | ||
1209 | spare -= NFI_FDM_MIN_SIZE; | ||
1210 | nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS; | ||
1211 | } | ||
1212 | } | ||
1213 | |||
1214 | mtk_ecc_adjust_strength(&nand->ecc.strength); | ||
1215 | |||
1216 | dev_info(dev, "eccsize %d eccstrength %d\n", | ||
1217 | nand->ecc.size, nand->ecc.strength); | ||
1218 | |||
1219 | return 0; | ||
1220 | } | ||
1221 | |||
1222 | static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, | ||
1223 | struct device_node *np) | ||
1224 | { | ||
1225 | struct mtk_nfc_nand_chip *chip; | ||
1226 | struct nand_chip *nand; | ||
1227 | struct mtd_info *mtd; | ||
1228 | int nsels, len; | ||
1229 | u32 tmp; | ||
1230 | int ret; | ||
1231 | int i; | ||
1232 | |||
1233 | if (!of_get_property(np, "reg", &nsels)) | ||
1234 | return -ENODEV; | ||
1235 | |||
1236 | nsels /= sizeof(u32); | ||
1237 | if (!nsels || nsels > MTK_NAND_MAX_NSELS) { | ||
1238 | dev_err(dev, "invalid reg property size %d\n", nsels); | ||
1239 | return -EINVAL; | ||
1240 | } | ||
1241 | |||
1242 | chip = devm_kzalloc(dev, sizeof(*chip) + nsels * sizeof(u8), | ||
1243 | GFP_KERNEL); | ||
1244 | if (!chip) | ||
1245 | return -ENOMEM; | ||
1246 | |||
1247 | chip->nsels = nsels; | ||
1248 | for (i = 0; i < nsels; i++) { | ||
1249 | ret = of_property_read_u32_index(np, "reg", i, &tmp); | ||
1250 | if (ret) { | ||
1251 | dev_err(dev, "reg property failure : %d\n", ret); | ||
1252 | return ret; | ||
1253 | } | ||
1254 | chip->sels[i] = tmp; | ||
1255 | } | ||
1256 | |||
1257 | nand = &chip->nand; | ||
1258 | nand->controller = &nfc->controller; | ||
1259 | |||
1260 | nand_set_flash_node(nand, np); | ||
1261 | nand_set_controller_data(nand, nfc); | ||
1262 | |||
1263 | nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ; | ||
1264 | nand->dev_ready = mtk_nfc_dev_ready; | ||
1265 | nand->select_chip = mtk_nfc_select_chip; | ||
1266 | nand->write_byte = mtk_nfc_write_byte; | ||
1267 | nand->write_buf = mtk_nfc_write_buf; | ||
1268 | nand->read_byte = mtk_nfc_read_byte; | ||
1269 | nand->read_buf = mtk_nfc_read_buf; | ||
1270 | nand->cmd_ctrl = mtk_nfc_cmd_ctrl; | ||
1271 | |||
1272 | /* set default mode in case dt entry is missing */ | ||
1273 | nand->ecc.mode = NAND_ECC_HW; | ||
1274 | |||
1275 | nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc; | ||
1276 | nand->ecc.write_page_raw = mtk_nfc_write_page_raw; | ||
1277 | nand->ecc.write_page = mtk_nfc_write_page_hwecc; | ||
1278 | nand->ecc.write_oob_raw = mtk_nfc_write_oob_std; | ||
1279 | nand->ecc.write_oob = mtk_nfc_write_oob_std; | ||
1280 | |||
1281 | nand->ecc.read_subpage = mtk_nfc_read_subpage_hwecc; | ||
1282 | nand->ecc.read_page_raw = mtk_nfc_read_page_raw; | ||
1283 | nand->ecc.read_page = mtk_nfc_read_page_hwecc; | ||
1284 | nand->ecc.read_oob_raw = mtk_nfc_read_oob_std; | ||
1285 | nand->ecc.read_oob = mtk_nfc_read_oob_std; | ||
1286 | |||
1287 | mtd = nand_to_mtd(nand); | ||
1288 | mtd->owner = THIS_MODULE; | ||
1289 | mtd->dev.parent = dev; | ||
1290 | mtd->name = MTK_NAME; | ||
1291 | mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops); | ||
1292 | |||
1293 | mtk_nfc_hw_init(nfc); | ||
1294 | |||
1295 | ret = nand_scan_ident(mtd, nsels, NULL); | ||
1296 | if (ret) | ||
1297 | return -ENODEV; | ||
1298 | |||
1299 | /* store bbt magic in page, cause OOB is not protected */ | ||
1300 | if (nand->bbt_options & NAND_BBT_USE_FLASH) | ||
1301 | nand->bbt_options |= NAND_BBT_NO_OOB; | ||
1302 | |||
1303 | ret = mtk_nfc_ecc_init(dev, mtd); | ||
1304 | if (ret) | ||
1305 | return -EINVAL; | ||
1306 | |||
1307 | if (nand->options & NAND_BUSWIDTH_16) { | ||
1308 | dev_err(dev, "16bits buswidth not supported"); | ||
1309 | return -EINVAL; | ||
1310 | } | ||
1311 | |||
1312 | mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd); | ||
1313 | mtk_nfc_set_fdm(&chip->fdm, mtd); | ||
1314 | mtk_nfc_set_bad_mark_ctl(&chip->bad_mark, mtd); | ||
1315 | |||
1316 | len = mtd->writesize + mtd->oobsize; | ||
1317 | nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL); | ||
1318 | if (!nfc->buffer) | ||
1319 | return -ENOMEM; | ||
1320 | |||
1321 | ret = nand_scan_tail(mtd); | ||
1322 | if (ret) | ||
1323 | return -ENODEV; | ||
1324 | |||
1325 | ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0); | ||
1326 | if (ret) { | ||
1327 | dev_err(dev, "mtd parse partition error\n"); | ||
1328 | nand_release(mtd); | ||
1329 | return ret; | ||
1330 | } | ||
1331 | |||
1332 | list_add_tail(&chip->node, &nfc->chips); | ||
1333 | |||
1334 | return 0; | ||
1335 | } | ||
1336 | |||
1337 | static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc) | ||
1338 | { | ||
1339 | struct device_node *np = dev->of_node; | ||
1340 | struct device_node *nand_np; | ||
1341 | int ret; | ||
1342 | |||
1343 | for_each_child_of_node(np, nand_np) { | ||
1344 | ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np); | ||
1345 | if (ret) { | ||
1346 | of_node_put(nand_np); | ||
1347 | return ret; | ||
1348 | } | ||
1349 | } | ||
1350 | |||
1351 | return 0; | ||
1352 | } | ||
1353 | |||
1354 | static int mtk_nfc_probe(struct platform_device *pdev) | ||
1355 | { | ||
1356 | struct device *dev = &pdev->dev; | ||
1357 | struct device_node *np = dev->of_node; | ||
1358 | struct mtk_nfc *nfc; | ||
1359 | struct resource *res; | ||
1360 | int ret, irq; | ||
1361 | |||
1362 | nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL); | ||
1363 | if (!nfc) | ||
1364 | return -ENOMEM; | ||
1365 | |||
1366 | spin_lock_init(&nfc->controller.lock); | ||
1367 | init_waitqueue_head(&nfc->controller.wq); | ||
1368 | INIT_LIST_HEAD(&nfc->chips); | ||
1369 | |||
1370 | /* probe defer if not ready */ | ||
1371 | nfc->ecc = of_mtk_ecc_get(np); | ||
1372 | if (IS_ERR(nfc->ecc)) | ||
1373 | return PTR_ERR(nfc->ecc); | ||
1374 | else if (!nfc->ecc) | ||
1375 | return -ENODEV; | ||
1376 | |||
1377 | nfc->dev = dev; | ||
1378 | |||
1379 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1380 | nfc->regs = devm_ioremap_resource(dev, res); | ||
1381 | if (IS_ERR(nfc->regs)) { | ||
1382 | ret = PTR_ERR(nfc->regs); | ||
1383 | dev_err(dev, "no nfi base\n"); | ||
1384 | goto release_ecc; | ||
1385 | } | ||
1386 | |||
1387 | nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk"); | ||
1388 | if (IS_ERR(nfc->clk.nfi_clk)) { | ||
1389 | dev_err(dev, "no clk\n"); | ||
1390 | ret = PTR_ERR(nfc->clk.nfi_clk); | ||
1391 | goto release_ecc; | ||
1392 | } | ||
1393 | |||
1394 | nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk"); | ||
1395 | if (IS_ERR(nfc->clk.pad_clk)) { | ||
1396 | dev_err(dev, "no pad clk\n"); | ||
1397 | ret = PTR_ERR(nfc->clk.pad_clk); | ||
1398 | goto release_ecc; | ||
1399 | } | ||
1400 | |||
1401 | ret = mtk_nfc_enable_clk(dev, &nfc->clk); | ||
1402 | if (ret) | ||
1403 | goto release_ecc; | ||
1404 | |||
1405 | irq = platform_get_irq(pdev, 0); | ||
1406 | if (irq < 0) { | ||
1407 | dev_err(dev, "no nfi irq resource\n"); | ||
1408 | ret = -EINVAL; | ||
1409 | goto clk_disable; | ||
1410 | } | ||
1411 | |||
1412 | ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc); | ||
1413 | if (ret) { | ||
1414 | dev_err(dev, "failed to request nfi irq\n"); | ||
1415 | goto clk_disable; | ||
1416 | } | ||
1417 | |||
1418 | ret = dma_set_mask(dev, DMA_BIT_MASK(32)); | ||
1419 | if (ret) { | ||
1420 | dev_err(dev, "failed to set dma mask\n"); | ||
1421 | goto clk_disable; | ||
1422 | } | ||
1423 | |||
1424 | platform_set_drvdata(pdev, nfc); | ||
1425 | |||
1426 | ret = mtk_nfc_nand_chips_init(dev, nfc); | ||
1427 | if (ret) { | ||
1428 | dev_err(dev, "failed to init nand chips\n"); | ||
1429 | goto clk_disable; | ||
1430 | } | ||
1431 | |||
1432 | return 0; | ||
1433 | |||
1434 | clk_disable: | ||
1435 | mtk_nfc_disable_clk(&nfc->clk); | ||
1436 | |||
1437 | release_ecc: | ||
1438 | mtk_ecc_release(nfc->ecc); | ||
1439 | |||
1440 | return ret; | ||
1441 | } | ||
1442 | |||
1443 | static int mtk_nfc_remove(struct platform_device *pdev) | ||
1444 | { | ||
1445 | struct mtk_nfc *nfc = platform_get_drvdata(pdev); | ||
1446 | struct mtk_nfc_nand_chip *chip; | ||
1447 | |||
1448 | while (!list_empty(&nfc->chips)) { | ||
1449 | chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip, | ||
1450 | node); | ||
1451 | nand_release(nand_to_mtd(&chip->nand)); | ||
1452 | list_del(&chip->node); | ||
1453 | } | ||
1454 | |||
1455 | mtk_ecc_release(nfc->ecc); | ||
1456 | mtk_nfc_disable_clk(&nfc->clk); | ||
1457 | |||
1458 | return 0; | ||
1459 | } | ||
1460 | |||
1461 | #ifdef CONFIG_PM_SLEEP | ||
1462 | static int mtk_nfc_suspend(struct device *dev) | ||
1463 | { | ||
1464 | struct mtk_nfc *nfc = dev_get_drvdata(dev); | ||
1465 | |||
1466 | mtk_nfc_disable_clk(&nfc->clk); | ||
1467 | |||
1468 | return 0; | ||
1469 | } | ||
1470 | |||
1471 | static int mtk_nfc_resume(struct device *dev) | ||
1472 | { | ||
1473 | struct mtk_nfc *nfc = dev_get_drvdata(dev); | ||
1474 | struct mtk_nfc_nand_chip *chip; | ||
1475 | struct nand_chip *nand; | ||
1476 | struct mtd_info *mtd; | ||
1477 | int ret; | ||
1478 | u32 i; | ||
1479 | |||
1480 | udelay(200); | ||
1481 | |||
1482 | ret = mtk_nfc_enable_clk(dev, &nfc->clk); | ||
1483 | if (ret) | ||
1484 | return ret; | ||
1485 | |||
1486 | mtk_nfc_hw_init(nfc); | ||
1487 | |||
1488 | /* reset NAND chip if VCC was powered off */ | ||
1489 | list_for_each_entry(chip, &nfc->chips, node) { | ||
1490 | nand = &chip->nand; | ||
1491 | mtd = nand_to_mtd(nand); | ||
1492 | for (i = 0; i < chip->nsels; i++) { | ||
1493 | nand->select_chip(mtd, i); | ||
1494 | nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); | ||
1495 | } | ||
1496 | } | ||
1497 | |||
1498 | return 0; | ||
1499 | } | ||
1500 | |||
1501 | static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume); | ||
1502 | #endif | ||
1503 | |||
1504 | static const struct of_device_id mtk_nfc_id_table[] = { | ||
1505 | { .compatible = "mediatek,mt2701-nfc" }, | ||
1506 | {} | ||
1507 | }; | ||
1508 | MODULE_DEVICE_TABLE(of, mtk_nfc_id_table); | ||
1509 | |||
1510 | static struct platform_driver mtk_nfc_driver = { | ||
1511 | .probe = mtk_nfc_probe, | ||
1512 | .remove = mtk_nfc_remove, | ||
1513 | .driver = { | ||
1514 | .name = MTK_NAME, | ||
1515 | .of_match_table = mtk_nfc_id_table, | ||
1516 | #ifdef CONFIG_PM_SLEEP | ||
1517 | .pm = &mtk_nfc_pm_ops, | ||
1518 | #endif | ||
1519 | }, | ||
1520 | }; | ||
1521 | |||
1522 | module_platform_driver(mtk_nfc_driver); | ||
1523 | |||
1524 | MODULE_LICENSE("GPL"); | ||
1525 | MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>"); | ||
1526 | MODULE_DESCRIPTION("MTK Nand Flash Controller Driver"); | ||
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index ccc05f5b2695..2af9869a115e 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c | |||
@@ -168,6 +168,7 @@ struct nand_flash_dev nand_flash_ids[] = { | |||
168 | /* Manufacturer IDs */ | 168 | /* Manufacturer IDs */ |
169 | struct nand_manufacturers nand_manuf_ids[] = { | 169 | struct nand_manufacturers nand_manuf_ids[] = { |
170 | {NAND_MFR_TOSHIBA, "Toshiba"}, | 170 | {NAND_MFR_TOSHIBA, "Toshiba"}, |
171 | {NAND_MFR_ESMT, "ESMT"}, | ||
171 | {NAND_MFR_SAMSUNG, "Samsung"}, | 172 | {NAND_MFR_SAMSUNG, "Samsung"}, |
172 | {NAND_MFR_FUJITSU, "Fujitsu"}, | 173 | {NAND_MFR_FUJITSU, "Fujitsu"}, |
173 | {NAND_MFR_NATIONAL, "National"}, | 174 | {NAND_MFR_NATIONAL, "National"}, |
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 08e158895635..83b9091233d4 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
@@ -118,8 +118,6 @@ | |||
118 | #define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F) | 118 | #define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F) |
119 | #define STATUS_BUFF_EMPTY 0x00000001 | 119 | #define STATUS_BUFF_EMPTY 0x00000001 |
120 | 120 | ||
121 | #define OMAP24XX_DMA_GPMC 4 | ||
122 | |||
123 | #define SECTOR_BYTES 512 | 121 | #define SECTOR_BYTES 512 |
124 | /* 4 bit padding to make byte aligned, 56 = 52 + 4 */ | 122 | /* 4 bit padding to make byte aligned, 56 = 52 + 4 */ |
125 | #define BCH4_BIT_PAD 4 | 123 | #define BCH4_BIT_PAD 4 |
@@ -1808,7 +1806,6 @@ static int omap_nand_probe(struct platform_device *pdev) | |||
1808 | struct nand_chip *nand_chip; | 1806 | struct nand_chip *nand_chip; |
1809 | int err; | 1807 | int err; |
1810 | dma_cap_mask_t mask; | 1808 | dma_cap_mask_t mask; |
1811 | unsigned sig; | ||
1812 | struct resource *res; | 1809 | struct resource *res; |
1813 | struct device *dev = &pdev->dev; | 1810 | struct device *dev = &pdev->dev; |
1814 | int min_oobbytes = BADBLOCK_MARKER_LENGTH; | 1811 | int min_oobbytes = BADBLOCK_MARKER_LENGTH; |
@@ -1921,8 +1918,8 @@ static int omap_nand_probe(struct platform_device *pdev) | |||
1921 | case NAND_OMAP_PREFETCH_DMA: | 1918 | case NAND_OMAP_PREFETCH_DMA: |
1922 | dma_cap_zero(mask); | 1919 | dma_cap_zero(mask); |
1923 | dma_cap_set(DMA_SLAVE, mask); | 1920 | dma_cap_set(DMA_SLAVE, mask); |
1924 | sig = OMAP24XX_DMA_GPMC; | 1921 | info->dma = dma_request_chan(pdev->dev.parent, "rxtx"); |
1925 | info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig); | 1922 | |
1926 | if (!info->dma) { | 1923 | if (!info->dma) { |
1927 | dev_err(&pdev->dev, "DMA engine request failed\n"); | 1924 | dev_err(&pdev->dev, "DMA engine request failed\n"); |
1928 | err = -ENXIO; | 1925 | err = -ENXIO; |
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index a83a690688b4..e414b31b71c1 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/gpio.h> | 39 | #include <linux/gpio.h> |
40 | #include <linux/interrupt.h> | 40 | #include <linux/interrupt.h> |
41 | #include <linux/iopoll.h> | 41 | #include <linux/iopoll.h> |
42 | #include <linux/reset.h> | ||
42 | 43 | ||
43 | #define NFC_REG_CTL 0x0000 | 44 | #define NFC_REG_CTL 0x0000 |
44 | #define NFC_REG_ST 0x0004 | 45 | #define NFC_REG_ST 0x0004 |
@@ -153,6 +154,7 @@ | |||
153 | 154 | ||
154 | /* define bit use in NFC_ECC_ST */ | 155 | /* define bit use in NFC_ECC_ST */ |
155 | #define NFC_ECC_ERR(x) BIT(x) | 156 | #define NFC_ECC_ERR(x) BIT(x) |
157 | #define NFC_ECC_ERR_MSK GENMASK(15, 0) | ||
156 | #define NFC_ECC_PAT_FOUND(x) BIT(x + 16) | 158 | #define NFC_ECC_PAT_FOUND(x) BIT(x + 16) |
157 | #define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff) | 159 | #define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff) |
158 | 160 | ||
@@ -269,10 +271,12 @@ struct sunxi_nfc { | |||
269 | void __iomem *regs; | 271 | void __iomem *regs; |
270 | struct clk *ahb_clk; | 272 | struct clk *ahb_clk; |
271 | struct clk *mod_clk; | 273 | struct clk *mod_clk; |
274 | struct reset_control *reset; | ||
272 | unsigned long assigned_cs; | 275 | unsigned long assigned_cs; |
273 | unsigned long clk_rate; | 276 | unsigned long clk_rate; |
274 | struct list_head chips; | 277 | struct list_head chips; |
275 | struct completion complete; | 278 | struct completion complete; |
279 | struct dma_chan *dmac; | ||
276 | }; | 280 | }; |
277 | 281 | ||
278 | static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl) | 282 | static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl) |
@@ -365,6 +369,67 @@ static int sunxi_nfc_rst(struct sunxi_nfc *nfc) | |||
365 | return ret; | 369 | return ret; |
366 | } | 370 | } |
367 | 371 | ||
372 | static int sunxi_nfc_dma_op_prepare(struct mtd_info *mtd, const void *buf, | ||
373 | int chunksize, int nchunks, | ||
374 | enum dma_data_direction ddir, | ||
375 | struct scatterlist *sg) | ||
376 | { | ||
377 | struct nand_chip *nand = mtd_to_nand(mtd); | ||
378 | struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); | ||
379 | struct dma_async_tx_descriptor *dmad; | ||
380 | enum dma_transfer_direction tdir; | ||
381 | dma_cookie_t dmat; | ||
382 | int ret; | ||
383 | |||
384 | if (ddir == DMA_FROM_DEVICE) | ||
385 | tdir = DMA_DEV_TO_MEM; | ||
386 | else | ||
387 | tdir = DMA_MEM_TO_DEV; | ||
388 | |||
389 | sg_init_one(sg, buf, nchunks * chunksize); | ||
390 | ret = dma_map_sg(nfc->dev, sg, 1, ddir); | ||
391 | if (!ret) | ||
392 | return -ENOMEM; | ||
393 | |||
394 | dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK); | ||
395 | if (!dmad) { | ||
396 | ret = -EINVAL; | ||
397 | goto err_unmap_buf; | ||
398 | } | ||
399 | |||
400 | writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD, | ||
401 | nfc->regs + NFC_REG_CTL); | ||
402 | writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM); | ||
403 | writel(chunksize, nfc->regs + NFC_REG_CNT); | ||
404 | dmat = dmaengine_submit(dmad); | ||
405 | |||
406 | ret = dma_submit_error(dmat); | ||
407 | if (ret) | ||
408 | goto err_clr_dma_flag; | ||
409 | |||
410 | return 0; | ||
411 | |||
412 | err_clr_dma_flag: | ||
413 | writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD, | ||
414 | nfc->regs + NFC_REG_CTL); | ||
415 | |||
416 | err_unmap_buf: | ||
417 | dma_unmap_sg(nfc->dev, sg, 1, ddir); | ||
418 | return ret; | ||
419 | } | ||
420 | |||
421 | static void sunxi_nfc_dma_op_cleanup(struct mtd_info *mtd, | ||
422 | enum dma_data_direction ddir, | ||
423 | struct scatterlist *sg) | ||
424 | { | ||
425 | struct nand_chip *nand = mtd_to_nand(mtd); | ||
426 | struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); | ||
427 | |||
428 | dma_unmap_sg(nfc->dev, sg, 1, ddir); | ||
429 | writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD, | ||
430 | nfc->regs + NFC_REG_CTL); | ||
431 | } | ||
432 | |||
368 | static int sunxi_nfc_dev_ready(struct mtd_info *mtd) | 433 | static int sunxi_nfc_dev_ready(struct mtd_info *mtd) |
369 | { | 434 | { |
370 | struct nand_chip *nand = mtd_to_nand(mtd); | 435 | struct nand_chip *nand = mtd_to_nand(mtd); |
@@ -822,17 +887,15 @@ static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd, | |||
822 | } | 887 | } |
823 | 888 | ||
824 | static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob, | 889 | static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob, |
825 | int step, bool *erased) | 890 | int step, u32 status, bool *erased) |
826 | { | 891 | { |
827 | struct nand_chip *nand = mtd_to_nand(mtd); | 892 | struct nand_chip *nand = mtd_to_nand(mtd); |
828 | struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); | 893 | struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); |
829 | struct nand_ecc_ctrl *ecc = &nand->ecc; | 894 | struct nand_ecc_ctrl *ecc = &nand->ecc; |
830 | u32 status, tmp; | 895 | u32 tmp; |
831 | 896 | ||
832 | *erased = false; | 897 | *erased = false; |
833 | 898 | ||
834 | status = readl(nfc->regs + NFC_REG_ECC_ST); | ||
835 | |||
836 | if (status & NFC_ECC_ERR(step)) | 899 | if (status & NFC_ECC_ERR(step)) |
837 | return -EBADMSG; | 900 | return -EBADMSG; |
838 | 901 | ||
@@ -898,6 +961,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd, | |||
898 | *cur_off = oob_off + ecc->bytes + 4; | 961 | *cur_off = oob_off + ecc->bytes + 4; |
899 | 962 | ||
900 | ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0, | 963 | ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0, |
964 | readl(nfc->regs + NFC_REG_ECC_ST), | ||
901 | &erased); | 965 | &erased); |
902 | if (erased) | 966 | if (erased) |
903 | return 1; | 967 | return 1; |
@@ -967,6 +1031,130 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd, | |||
967 | *cur_off = mtd->oobsize + mtd->writesize; | 1031 | *cur_off = mtd->oobsize + mtd->writesize; |
968 | } | 1032 | } |
969 | 1033 | ||
1034 | static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf, | ||
1035 | int oob_required, int page, | ||
1036 | int nchunks) | ||
1037 | { | ||
1038 | struct nand_chip *nand = mtd_to_nand(mtd); | ||
1039 | bool randomized = nand->options & NAND_NEED_SCRAMBLING; | ||
1040 | struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); | ||
1041 | struct nand_ecc_ctrl *ecc = &nand->ecc; | ||
1042 | unsigned int max_bitflips = 0; | ||
1043 | int ret, i, raw_mode = 0; | ||
1044 | struct scatterlist sg; | ||
1045 | u32 status; | ||
1046 | |||
1047 | ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); | ||
1048 | if (ret) | ||
1049 | return ret; | ||
1050 | |||
1051 | ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, nchunks, | ||
1052 | DMA_FROM_DEVICE, &sg); | ||
1053 | if (ret) | ||
1054 | return ret; | ||
1055 | |||
1056 | sunxi_nfc_hw_ecc_enable(mtd); | ||
1057 | sunxi_nfc_randomizer_config(mtd, page, false); | ||
1058 | sunxi_nfc_randomizer_enable(mtd); | ||
1059 | |||
1060 | writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) | | ||
1061 | NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET); | ||
1062 | |||
1063 | dma_async_issue_pending(nfc->dmac); | ||
1064 | |||
1065 | writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS, | ||
1066 | nfc->regs + NFC_REG_CMD); | ||
1067 | |||
1068 | ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0); | ||
1069 | if (ret) | ||
1070 | dmaengine_terminate_all(nfc->dmac); | ||
1071 | |||
1072 | sunxi_nfc_randomizer_disable(mtd); | ||
1073 | sunxi_nfc_hw_ecc_disable(mtd); | ||
1074 | |||
1075 | sunxi_nfc_dma_op_cleanup(mtd, DMA_FROM_DEVICE, &sg); | ||
1076 | |||
1077 | if (ret) | ||
1078 | return ret; | ||
1079 | |||
1080 | status = readl(nfc->regs + NFC_REG_ECC_ST); | ||
1081 | |||
1082 | for (i = 0; i < nchunks; i++) { | ||
1083 | int data_off = i * ecc->size; | ||
1084 | int oob_off = i * (ecc->bytes + 4); | ||
1085 | u8 *data = buf + data_off; | ||
1086 | u8 *oob = nand->oob_poi + oob_off; | ||
1087 | bool erased; | ||
1088 | |||
1089 | ret = sunxi_nfc_hw_ecc_correct(mtd, randomized ? data : NULL, | ||
1090 | oob_required ? oob : NULL, | ||
1091 | i, status, &erased); | ||
1092 | |||
1093 | /* ECC errors are handled in the second loop. */ | ||
1094 | if (ret < 0) | ||
1095 | continue; | ||
1096 | |||
1097 | if (oob_required && !erased) { | ||
1098 | /* TODO: use DMA to retrieve OOB */ | ||
1099 | nand->cmdfunc(mtd, NAND_CMD_RNDOUT, | ||
1100 | mtd->writesize + oob_off, -1); | ||
1101 | nand->read_buf(mtd, oob, ecc->bytes + 4); | ||
1102 | |||
1103 | sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i, | ||
1104 | !i, page); | ||
1105 | } | ||
1106 | |||
1107 | if (erased) | ||
1108 | raw_mode = 1; | ||
1109 | |||
1110 | sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret); | ||
1111 | } | ||
1112 | |||
1113 | if (status & NFC_ECC_ERR_MSK) { | ||
1114 | for (i = 0; i < nchunks; i++) { | ||
1115 | int data_off = i * ecc->size; | ||
1116 | int oob_off = i * (ecc->bytes + 4); | ||
1117 | u8 *data = buf + data_off; | ||
1118 | u8 *oob = nand->oob_poi + oob_off; | ||
1119 | |||
1120 | if (!(status & NFC_ECC_ERR(i))) | ||
1121 | continue; | ||
1122 | |||
1123 | /* | ||
1124 | * Re-read the data with the randomizer disabled to | ||
1125 | * identify bitflips in erased pages. | ||
1126 | */ | ||
1127 | if (randomized) { | ||
1128 | /* TODO: use DMA to read page in raw mode */ | ||
1129 | nand->cmdfunc(mtd, NAND_CMD_RNDOUT, | ||
1130 | data_off, -1); | ||
1131 | nand->read_buf(mtd, data, ecc->size); | ||
1132 | } | ||
1133 | |||
1134 | /* TODO: use DMA to retrieve OOB */ | ||
1135 | nand->cmdfunc(mtd, NAND_CMD_RNDOUT, | ||
1136 | mtd->writesize + oob_off, -1); | ||
1137 | nand->read_buf(mtd, oob, ecc->bytes + 4); | ||
1138 | |||
1139 | ret = nand_check_erased_ecc_chunk(data, ecc->size, | ||
1140 | oob, ecc->bytes + 4, | ||
1141 | NULL, 0, | ||
1142 | ecc->strength); | ||
1143 | if (ret >= 0) | ||
1144 | raw_mode = 1; | ||
1145 | |||
1146 | sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret); | ||
1147 | } | ||
1148 | } | ||
1149 | |||
1150 | if (oob_required) | ||
1151 | sunxi_nfc_hw_ecc_read_extra_oob(mtd, nand->oob_poi, | ||
1152 | NULL, !raw_mode, | ||
1153 | page); | ||
1154 | |||
1155 | return max_bitflips; | ||
1156 | } | ||
1157 | |||
970 | static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, | 1158 | static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, |
971 | const u8 *data, int data_off, | 1159 | const u8 *data, int data_off, |
972 | const u8 *oob, int oob_off, | 1160 | const u8 *oob, int oob_off, |
@@ -1065,6 +1253,23 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd, | |||
1065 | return max_bitflips; | 1253 | return max_bitflips; |
1066 | } | 1254 | } |
1067 | 1255 | ||
1256 | static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd, | ||
1257 | struct nand_chip *chip, u8 *buf, | ||
1258 | int oob_required, int page) | ||
1259 | { | ||
1260 | int ret; | ||
1261 | |||
1262 | ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page, | ||
1263 | chip->ecc.steps); | ||
1264 | if (ret >= 0) | ||
1265 | return ret; | ||
1266 | |||
1267 | /* Fallback to PIO mode */ | ||
1268 | chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1); | ||
1269 | |||
1270 | return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page); | ||
1271 | } | ||
1272 | |||
1068 | static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd, | 1273 | static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd, |
1069 | struct nand_chip *chip, | 1274 | struct nand_chip *chip, |
1070 | u32 data_offs, u32 readlen, | 1275 | u32 data_offs, u32 readlen, |
@@ -1098,6 +1303,25 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd, | |||
1098 | return max_bitflips; | 1303 | return max_bitflips; |
1099 | } | 1304 | } |
1100 | 1305 | ||
1306 | static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd, | ||
1307 | struct nand_chip *chip, | ||
1308 | u32 data_offs, u32 readlen, | ||
1309 | u8 *buf, int page) | ||
1310 | { | ||
1311 | int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size); | ||
1312 | int ret; | ||
1313 | |||
1314 | ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks); | ||
1315 | if (ret >= 0) | ||
1316 | return ret; | ||
1317 | |||
1318 | /* Fallback to PIO mode */ | ||
1319 | chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1); | ||
1320 | |||
1321 | return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen, | ||
1322 | buf, page); | ||
1323 | } | ||
1324 | |||
1101 | static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd, | 1325 | static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd, |
1102 | struct nand_chip *chip, | 1326 | struct nand_chip *chip, |
1103 | const uint8_t *buf, int oob_required, | 1327 | const uint8_t *buf, int oob_required, |
@@ -1130,6 +1354,99 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd, | |||
1130 | return 0; | 1354 | return 0; |
1131 | } | 1355 | } |
1132 | 1356 | ||
1357 | static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd, | ||
1358 | struct nand_chip *chip, | ||
1359 | u32 data_offs, u32 data_len, | ||
1360 | const u8 *buf, int oob_required, | ||
1361 | int page) | ||
1362 | { | ||
1363 | struct nand_ecc_ctrl *ecc = &chip->ecc; | ||
1364 | int ret, i, cur_off = 0; | ||
1365 | |||
1366 | sunxi_nfc_hw_ecc_enable(mtd); | ||
1367 | |||
1368 | for (i = data_offs / ecc->size; | ||
1369 | i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) { | ||
1370 | int data_off = i * ecc->size; | ||
1371 | int oob_off = i * (ecc->bytes + 4); | ||
1372 | const u8 *data = buf + data_off; | ||
1373 | const u8 *oob = chip->oob_poi + oob_off; | ||
1374 | |||
1375 | ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob, | ||
1376 | oob_off + mtd->writesize, | ||
1377 | &cur_off, !i, page); | ||
1378 | if (ret) | ||
1379 | return ret; | ||
1380 | } | ||
1381 | |||
1382 | sunxi_nfc_hw_ecc_disable(mtd); | ||
1383 | |||
1384 | return 0; | ||
1385 | } | ||
1386 | |||
1387 | static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd, | ||
1388 | struct nand_chip *chip, | ||
1389 | const u8 *buf, | ||
1390 | int oob_required, | ||
1391 | int page) | ||
1392 | { | ||
1393 | struct nand_chip *nand = mtd_to_nand(mtd); | ||
1394 | struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); | ||
1395 | struct nand_ecc_ctrl *ecc = &nand->ecc; | ||
1396 | struct scatterlist sg; | ||
1397 | int ret, i; | ||
1398 | |||
1399 | ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); | ||
1400 | if (ret) | ||
1401 | return ret; | ||
1402 | |||
1403 | ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, ecc->steps, | ||
1404 | DMA_TO_DEVICE, &sg); | ||
1405 | if (ret) | ||
1406 | goto pio_fallback; | ||
1407 | |||
1408 | for (i = 0; i < ecc->steps; i++) { | ||
1409 | const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4)); | ||
1410 | |||
1411 | sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page); | ||
1412 | } | ||
1413 | |||
1414 | sunxi_nfc_hw_ecc_enable(mtd); | ||
1415 | sunxi_nfc_randomizer_config(mtd, page, false); | ||
1416 | sunxi_nfc_randomizer_enable(mtd); | ||
1417 | |||
1418 | writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG, | ||
1419 | nfc->regs + NFC_REG_RCMD_SET); | ||
1420 | |||
1421 | dma_async_issue_pending(nfc->dmac); | ||
1422 | |||
1423 | writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | | ||
1424 | NFC_DATA_TRANS | NFC_ACCESS_DIR, | ||
1425 | nfc->regs + NFC_REG_CMD); | ||
1426 | |||
1427 | ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0); | ||
1428 | if (ret) | ||
1429 | dmaengine_terminate_all(nfc->dmac); | ||
1430 | |||
1431 | sunxi_nfc_randomizer_disable(mtd); | ||
1432 | sunxi_nfc_hw_ecc_disable(mtd); | ||
1433 | |||
1434 | sunxi_nfc_dma_op_cleanup(mtd, DMA_TO_DEVICE, &sg); | ||
1435 | |||
1436 | if (ret) | ||
1437 | return ret; | ||
1438 | |||
1439 | if (oob_required || (chip->options & NAND_NEED_SCRAMBLING)) | ||
1440 | /* TODO: use DMA to transfer extra OOB bytes ? */ | ||
1441 | sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, | ||
1442 | NULL, page); | ||
1443 | |||
1444 | return 0; | ||
1445 | |||
1446 | pio_fallback: | ||
1447 | return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page); | ||
1448 | } | ||
1449 | |||
1133 | static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd, | 1450 | static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd, |
1134 | struct nand_chip *chip, | 1451 | struct nand_chip *chip, |
1135 | uint8_t *buf, int oob_required, | 1452 | uint8_t *buf, int oob_required, |
@@ -1497,10 +1814,19 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd, | |||
1497 | int ret; | 1814 | int ret; |
1498 | int i; | 1815 | int i; |
1499 | 1816 | ||
1817 | if (ecc->size != 512 && ecc->size != 1024) | ||
1818 | return -EINVAL; | ||
1819 | |||
1500 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 1820 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
1501 | if (!data) | 1821 | if (!data) |
1502 | return -ENOMEM; | 1822 | return -ENOMEM; |
1503 | 1823 | ||
1824 | /* Prefer 1k ECC chunk over 512 ones */ | ||
1825 | if (ecc->size == 512 && mtd->writesize > 512) { | ||
1826 | ecc->size = 1024; | ||
1827 | ecc->strength *= 2; | ||
1828 | } | ||
1829 | |||
1504 | /* Add ECC info retrieval from DT */ | 1830 | /* Add ECC info retrieval from DT */ |
1505 | for (i = 0; i < ARRAY_SIZE(strengths); i++) { | 1831 | for (i = 0; i < ARRAY_SIZE(strengths); i++) { |
1506 | if (ecc->strength <= strengths[i]) | 1832 | if (ecc->strength <= strengths[i]) |
@@ -1550,14 +1876,28 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd, | |||
1550 | struct nand_ecc_ctrl *ecc, | 1876 | struct nand_ecc_ctrl *ecc, |
1551 | struct device_node *np) | 1877 | struct device_node *np) |
1552 | { | 1878 | { |
1879 | struct nand_chip *nand = mtd_to_nand(mtd); | ||
1880 | struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); | ||
1881 | struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); | ||
1553 | int ret; | 1882 | int ret; |
1554 | 1883 | ||
1555 | ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np); | 1884 | ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np); |
1556 | if (ret) | 1885 | if (ret) |
1557 | return ret; | 1886 | return ret; |
1558 | 1887 | ||
1559 | ecc->read_page = sunxi_nfc_hw_ecc_read_page; | 1888 | if (nfc->dmac) { |
1560 | ecc->write_page = sunxi_nfc_hw_ecc_write_page; | 1889 | ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma; |
1890 | ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma; | ||
1891 | ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma; | ||
1892 | nand->options |= NAND_USE_BOUNCE_BUFFER; | ||
1893 | } else { | ||
1894 | ecc->read_page = sunxi_nfc_hw_ecc_read_page; | ||
1895 | ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage; | ||
1896 | ecc->write_page = sunxi_nfc_hw_ecc_write_page; | ||
1897 | } | ||
1898 | |||
1899 | /* TODO: support DMA for raw accesses and subpage write */ | ||
1900 | ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage; | ||
1561 | ecc->read_oob_raw = nand_read_oob_std; | 1901 | ecc->read_oob_raw = nand_read_oob_std; |
1562 | ecc->write_oob_raw = nand_write_oob_std; | 1902 | ecc->write_oob_raw = nand_write_oob_std; |
1563 | ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage; | 1903 | ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage; |
@@ -1871,26 +2211,59 @@ static int sunxi_nfc_probe(struct platform_device *pdev) | |||
1871 | if (ret) | 2211 | if (ret) |
1872 | goto out_ahb_clk_unprepare; | 2212 | goto out_ahb_clk_unprepare; |
1873 | 2213 | ||
2214 | nfc->reset = devm_reset_control_get_optional(dev, "ahb"); | ||
2215 | if (!IS_ERR(nfc->reset)) { | ||
2216 | ret = reset_control_deassert(nfc->reset); | ||
2217 | if (ret) { | ||
2218 | dev_err(dev, "reset err %d\n", ret); | ||
2219 | goto out_mod_clk_unprepare; | ||
2220 | } | ||
2221 | } else if (PTR_ERR(nfc->reset) != -ENOENT) { | ||
2222 | ret = PTR_ERR(nfc->reset); | ||
2223 | goto out_mod_clk_unprepare; | ||
2224 | } | ||
2225 | |||
1874 | ret = sunxi_nfc_rst(nfc); | 2226 | ret = sunxi_nfc_rst(nfc); |
1875 | if (ret) | 2227 | if (ret) |
1876 | goto out_mod_clk_unprepare; | 2228 | goto out_ahb_reset_reassert; |
1877 | 2229 | ||
1878 | writel(0, nfc->regs + NFC_REG_INT); | 2230 | writel(0, nfc->regs + NFC_REG_INT); |
1879 | ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt, | 2231 | ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt, |
1880 | 0, "sunxi-nand", nfc); | 2232 | 0, "sunxi-nand", nfc); |
1881 | if (ret) | 2233 | if (ret) |
1882 | goto out_mod_clk_unprepare; | 2234 | goto out_ahb_reset_reassert; |
2235 | |||
2236 | nfc->dmac = dma_request_slave_channel(dev, "rxtx"); | ||
2237 | if (nfc->dmac) { | ||
2238 | struct dma_slave_config dmac_cfg = { }; | ||
2239 | |||
2240 | dmac_cfg.src_addr = r->start + NFC_REG_IO_DATA; | ||
2241 | dmac_cfg.dst_addr = dmac_cfg.src_addr; | ||
2242 | dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
2243 | dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width; | ||
2244 | dmac_cfg.src_maxburst = 4; | ||
2245 | dmac_cfg.dst_maxburst = 4; | ||
2246 | dmaengine_slave_config(nfc->dmac, &dmac_cfg); | ||
2247 | } else { | ||
2248 | dev_warn(dev, "failed to request rxtx DMA channel\n"); | ||
2249 | } | ||
1883 | 2250 | ||
1884 | platform_set_drvdata(pdev, nfc); | 2251 | platform_set_drvdata(pdev, nfc); |
1885 | 2252 | ||
1886 | ret = sunxi_nand_chips_init(dev, nfc); | 2253 | ret = sunxi_nand_chips_init(dev, nfc); |
1887 | if (ret) { | 2254 | if (ret) { |
1888 | dev_err(dev, "failed to init nand chips\n"); | 2255 | dev_err(dev, "failed to init nand chips\n"); |
1889 | goto out_mod_clk_unprepare; | 2256 | goto out_release_dmac; |
1890 | } | 2257 | } |
1891 | 2258 | ||
1892 | return 0; | 2259 | return 0; |
1893 | 2260 | ||
2261 | out_release_dmac: | ||
2262 | if (nfc->dmac) | ||
2263 | dma_release_channel(nfc->dmac); | ||
2264 | out_ahb_reset_reassert: | ||
2265 | if (!IS_ERR(nfc->reset)) | ||
2266 | reset_control_assert(nfc->reset); | ||
1894 | out_mod_clk_unprepare: | 2267 | out_mod_clk_unprepare: |
1895 | clk_disable_unprepare(nfc->mod_clk); | 2268 | clk_disable_unprepare(nfc->mod_clk); |
1896 | out_ahb_clk_unprepare: | 2269 | out_ahb_clk_unprepare: |
@@ -1904,6 +2277,12 @@ static int sunxi_nfc_remove(struct platform_device *pdev) | |||
1904 | struct sunxi_nfc *nfc = platform_get_drvdata(pdev); | 2277 | struct sunxi_nfc *nfc = platform_get_drvdata(pdev); |
1905 | 2278 | ||
1906 | sunxi_nand_chips_cleanup(nfc); | 2279 | sunxi_nand_chips_cleanup(nfc); |
2280 | |||
2281 | if (!IS_ERR(nfc->reset)) | ||
2282 | reset_control_assert(nfc->reset); | ||
2283 | |||
2284 | if (nfc->dmac) | ||
2285 | dma_release_channel(nfc->dmac); | ||
1907 | clk_disable_unprepare(nfc->mod_clk); | 2286 | clk_disable_unprepare(nfc->mod_clk); |
1908 | clk_disable_unprepare(nfc->ahb_clk); | 2287 | clk_disable_unprepare(nfc->ahb_clk); |
1909 | 2288 | ||
diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c index 0cf0ac07a8c2..1f2948c0c458 100644 --- a/drivers/mtd/nand/xway_nand.c +++ b/drivers/mtd/nand/xway_nand.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * by the Free Software Foundation. | 4 | * by the Free Software Foundation. |
5 | * | 5 | * |
6 | * Copyright © 2012 John Crispin <blogic@openwrt.org> | 6 | * Copyright © 2012 John Crispin <blogic@openwrt.org> |
7 | * Copyright © 2016 Hauke Mehrtens <hauke@hauke-m.de> | ||
7 | */ | 8 | */ |
8 | 9 | ||
9 | #include <linux/mtd/nand.h> | 10 | #include <linux/mtd/nand.h> |
@@ -16,20 +17,28 @@ | |||
16 | #define EBU_ADDSEL1 0x24 | 17 | #define EBU_ADDSEL1 0x24 |
17 | #define EBU_NAND_CON 0xB0 | 18 | #define EBU_NAND_CON 0xB0 |
18 | #define EBU_NAND_WAIT 0xB4 | 19 | #define EBU_NAND_WAIT 0xB4 |
20 | #define NAND_WAIT_RD BIT(0) /* NAND flash status output */ | ||
21 | #define NAND_WAIT_WR_C BIT(3) /* NAND Write/Read complete */ | ||
19 | #define EBU_NAND_ECC0 0xB8 | 22 | #define EBU_NAND_ECC0 0xB8 |
20 | #define EBU_NAND_ECC_AC 0xBC | 23 | #define EBU_NAND_ECC_AC 0xBC |
21 | 24 | ||
22 | /* nand commands */ | 25 | /* |
23 | #define NAND_CMD_ALE (1 << 2) | 26 | * nand commands |
24 | #define NAND_CMD_CLE (1 << 3) | 27 | * The pins of the NAND chip are selected based on the address bits of the |
25 | #define NAND_CMD_CS (1 << 4) | 28 | * "register" read and write. There are no special registers, but an |
26 | #define NAND_WRITE_CMD_RESET 0xff | 29 | * address range and the lower address bits are used to activate the |
30 | * correct line. For example when the bit (1 << 2) is set in the address | ||
31 | * the ALE pin will be activated. | ||
32 | */ | ||
33 | #define NAND_CMD_ALE BIT(2) /* address latch enable */ | ||
34 | #define NAND_CMD_CLE BIT(3) /* command latch enable */ | ||
35 | #define NAND_CMD_CS BIT(4) /* chip select */ | ||
36 | #define NAND_CMD_SE BIT(5) /* spare area access latch */ | ||
37 | #define NAND_CMD_WP BIT(6) /* write protect */ | ||
27 | #define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE) | 38 | #define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE) |
28 | #define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE) | 39 | #define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE) |
29 | #define NAND_WRITE_DATA (NAND_CMD_CS) | 40 | #define NAND_WRITE_DATA (NAND_CMD_CS) |
30 | #define NAND_READ_DATA (NAND_CMD_CS) | 41 | #define NAND_READ_DATA (NAND_CMD_CS) |
31 | #define NAND_WAIT_WR_C (1 << 3) | ||
32 | #define NAND_WAIT_RD (0x1) | ||
33 | 42 | ||
34 | /* we need to tel the ebu which addr we mapped the nand to */ | 43 | /* we need to tel the ebu which addr we mapped the nand to */ |
35 | #define ADDSEL1_MASK(x) (x << 4) | 44 | #define ADDSEL1_MASK(x) (x << 4) |
@@ -54,31 +63,41 @@ | |||
54 | #define NAND_CON_CSMUX (1 << 1) | 63 | #define NAND_CON_CSMUX (1 << 1) |
55 | #define NAND_CON_NANDM 1 | 64 | #define NAND_CON_NANDM 1 |
56 | 65 | ||
57 | static void xway_reset_chip(struct nand_chip *chip) | 66 | struct xway_nand_data { |
67 | struct nand_chip chip; | ||
68 | unsigned long csflags; | ||
69 | void __iomem *nandaddr; | ||
70 | }; | ||
71 | |||
72 | static u8 xway_readb(struct mtd_info *mtd, int op) | ||
58 | { | 73 | { |
59 | unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W; | 74 | struct nand_chip *chip = mtd_to_nand(mtd); |
60 | unsigned long flags; | 75 | struct xway_nand_data *data = nand_get_controller_data(chip); |
61 | 76 | ||
62 | nandaddr &= ~NAND_WRITE_ADDR; | 77 | return readb(data->nandaddr + op); |
63 | nandaddr |= NAND_WRITE_CMD; | 78 | } |
64 | 79 | ||
65 | /* finish with a reset */ | 80 | static void xway_writeb(struct mtd_info *mtd, int op, u8 value) |
66 | spin_lock_irqsave(&ebu_lock, flags); | 81 | { |
67 | writeb(NAND_WRITE_CMD_RESET, (void __iomem *) nandaddr); | 82 | struct nand_chip *chip = mtd_to_nand(mtd); |
68 | while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0) | 83 | struct xway_nand_data *data = nand_get_controller_data(chip); |
69 | ; | 84 | |
70 | spin_unlock_irqrestore(&ebu_lock, flags); | 85 | writeb(value, data->nandaddr + op); |
71 | } | 86 | } |
72 | 87 | ||
73 | static void xway_select_chip(struct mtd_info *mtd, int chip) | 88 | static void xway_select_chip(struct mtd_info *mtd, int select) |
74 | { | 89 | { |
90 | struct nand_chip *chip = mtd_to_nand(mtd); | ||
91 | struct xway_nand_data *data = nand_get_controller_data(chip); | ||
75 | 92 | ||
76 | switch (chip) { | 93 | switch (select) { |
77 | case -1: | 94 | case -1: |
78 | ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON); | 95 | ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON); |
79 | ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON); | 96 | ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON); |
97 | spin_unlock_irqrestore(&ebu_lock, data->csflags); | ||
80 | break; | 98 | break; |
81 | case 0: | 99 | case 0: |
100 | spin_lock_irqsave(&ebu_lock, data->csflags); | ||
82 | ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON); | 101 | ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON); |
83 | ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON); | 102 | ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON); |
84 | break; | 103 | break; |
@@ -89,26 +108,16 @@ static void xway_select_chip(struct mtd_info *mtd, int chip) | |||
89 | 108 | ||
90 | static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) | 109 | static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) |
91 | { | 110 | { |
92 | struct nand_chip *this = mtd_to_nand(mtd); | 111 | if (cmd == NAND_CMD_NONE) |
93 | unsigned long nandaddr = (unsigned long) this->IO_ADDR_W; | 112 | return; |
94 | unsigned long flags; | ||
95 | |||
96 | if (ctrl & NAND_CTRL_CHANGE) { | ||
97 | nandaddr &= ~(NAND_WRITE_CMD | NAND_WRITE_ADDR); | ||
98 | if (ctrl & NAND_CLE) | ||
99 | nandaddr |= NAND_WRITE_CMD; | ||
100 | else | ||
101 | nandaddr |= NAND_WRITE_ADDR; | ||
102 | this->IO_ADDR_W = (void __iomem *) nandaddr; | ||
103 | } | ||
104 | 113 | ||
105 | if (cmd != NAND_CMD_NONE) { | 114 | if (ctrl & NAND_CLE) |
106 | spin_lock_irqsave(&ebu_lock, flags); | 115 | xway_writeb(mtd, NAND_WRITE_CMD, cmd); |
107 | writeb(cmd, this->IO_ADDR_W); | 116 | else if (ctrl & NAND_ALE) |
108 | while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0) | 117 | xway_writeb(mtd, NAND_WRITE_ADDR, cmd); |
109 | ; | 118 | |
110 | spin_unlock_irqrestore(&ebu_lock, flags); | 119 | while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0) |
111 | } | 120 | ; |
112 | } | 121 | } |
113 | 122 | ||
114 | static int xway_dev_ready(struct mtd_info *mtd) | 123 | static int xway_dev_ready(struct mtd_info *mtd) |
@@ -118,80 +127,122 @@ static int xway_dev_ready(struct mtd_info *mtd) | |||
118 | 127 | ||
119 | static unsigned char xway_read_byte(struct mtd_info *mtd) | 128 | static unsigned char xway_read_byte(struct mtd_info *mtd) |
120 | { | 129 | { |
121 | struct nand_chip *this = mtd_to_nand(mtd); | 130 | return xway_readb(mtd, NAND_READ_DATA); |
122 | unsigned long nandaddr = (unsigned long) this->IO_ADDR_R; | 131 | } |
123 | unsigned long flags; | 132 | |
124 | int ret; | 133 | static void xway_read_buf(struct mtd_info *mtd, u_char *buf, int len) |
134 | { | ||
135 | int i; | ||
125 | 136 | ||
126 | spin_lock_irqsave(&ebu_lock, flags); | 137 | for (i = 0; i < len; i++) |
127 | ret = ltq_r8((void __iomem *)(nandaddr + NAND_READ_DATA)); | 138 | buf[i] = xway_readb(mtd, NAND_WRITE_DATA); |
128 | spin_unlock_irqrestore(&ebu_lock, flags); | 139 | } |
129 | 140 | ||
130 | return ret; | 141 | static void xway_write_buf(struct mtd_info *mtd, const u_char *buf, int len) |
142 | { | ||
143 | int i; | ||
144 | |||
145 | for (i = 0; i < len; i++) | ||
146 | xway_writeb(mtd, NAND_WRITE_DATA, buf[i]); | ||
131 | } | 147 | } |
132 | 148 | ||
149 | /* | ||
150 | * Probe for the NAND device. | ||
151 | */ | ||
133 | static int xway_nand_probe(struct platform_device *pdev) | 152 | static int xway_nand_probe(struct platform_device *pdev) |
134 | { | 153 | { |
135 | struct nand_chip *this = platform_get_drvdata(pdev); | 154 | struct xway_nand_data *data; |
136 | unsigned long nandaddr = (unsigned long) this->IO_ADDR_W; | 155 | struct mtd_info *mtd; |
137 | const __be32 *cs = of_get_property(pdev->dev.of_node, | 156 | struct resource *res; |
138 | "lantiq,cs", NULL); | 157 | int err; |
158 | u32 cs; | ||
139 | u32 cs_flag = 0; | 159 | u32 cs_flag = 0; |
140 | 160 | ||
161 | /* Allocate memory for the device structure (and zero it) */ | ||
162 | data = devm_kzalloc(&pdev->dev, sizeof(struct xway_nand_data), | ||
163 | GFP_KERNEL); | ||
164 | if (!data) | ||
165 | return -ENOMEM; | ||
166 | |||
167 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
168 | data->nandaddr = devm_ioremap_resource(&pdev->dev, res); | ||
169 | if (IS_ERR(data->nandaddr)) | ||
170 | return PTR_ERR(data->nandaddr); | ||
171 | |||
172 | nand_set_flash_node(&data->chip, pdev->dev.of_node); | ||
173 | mtd = nand_to_mtd(&data->chip); | ||
174 | mtd->dev.parent = &pdev->dev; | ||
175 | |||
176 | data->chip.cmd_ctrl = xway_cmd_ctrl; | ||
177 | data->chip.dev_ready = xway_dev_ready; | ||
178 | data->chip.select_chip = xway_select_chip; | ||
179 | data->chip.write_buf = xway_write_buf; | ||
180 | data->chip.read_buf = xway_read_buf; | ||
181 | data->chip.read_byte = xway_read_byte; | ||
182 | data->chip.chip_delay = 30; | ||
183 | |||
184 | data->chip.ecc.mode = NAND_ECC_SOFT; | ||
185 | data->chip.ecc.algo = NAND_ECC_HAMMING; | ||
186 | |||
187 | platform_set_drvdata(pdev, data); | ||
188 | nand_set_controller_data(&data->chip, data); | ||
189 | |||
141 | /* load our CS from the DT. Either we find a valid 1 or default to 0 */ | 190 | /* load our CS from the DT. Either we find a valid 1 or default to 0 */ |
142 | if (cs && (*cs == 1)) | 191 | err = of_property_read_u32(pdev->dev.of_node, "lantiq,cs", &cs); |
192 | if (!err && cs == 1) | ||
143 | cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1; | 193 | cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1; |
144 | 194 | ||
145 | /* setup the EBU to run in NAND mode on our base addr */ | 195 | /* setup the EBU to run in NAND mode on our base addr */ |
146 | ltq_ebu_w32(CPHYSADDR(nandaddr) | 196 | ltq_ebu_w32(CPHYSADDR(data->nandaddr) |
147 | | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1); | 197 | | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1); |
148 | 198 | ||
149 | ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2 | 199 | ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2 |
150 | | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1 | 200 | | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1 |
151 | | BUSCON1_CMULT4, LTQ_EBU_BUSCON1); | 201 | | BUSCON1_CMULT4, LTQ_EBU_BUSCON1); |
152 | 202 | ||
153 | ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P | 203 | ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P |
154 | | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P | 204 | | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P |
155 | | cs_flag, EBU_NAND_CON); | 205 | | cs_flag, EBU_NAND_CON); |
156 | 206 | ||
157 | /* finish with a reset */ | 207 | /* Scan to find existence of the device */ |
158 | xway_reset_chip(this); | 208 | err = nand_scan(mtd, 1); |
209 | if (err) | ||
210 | return err; | ||
159 | 211 | ||
160 | return 0; | 212 | err = mtd_device_register(mtd, NULL, 0); |
161 | } | 213 | if (err) |
214 | nand_release(mtd); | ||
162 | 215 | ||
163 | static struct platform_nand_data xway_nand_data = { | 216 | return err; |
164 | .chip = { | 217 | } |
165 | .nr_chips = 1, | ||
166 | .chip_delay = 30, | ||
167 | }, | ||
168 | .ctrl = { | ||
169 | .probe = xway_nand_probe, | ||
170 | .cmd_ctrl = xway_cmd_ctrl, | ||
171 | .dev_ready = xway_dev_ready, | ||
172 | .select_chip = xway_select_chip, | ||
173 | .read_byte = xway_read_byte, | ||
174 | } | ||
175 | }; | ||
176 | 218 | ||
177 | /* | 219 | /* |
178 | * Try to find the node inside the DT. If it is available attach out | 220 | * Remove a NAND device. |
179 | * platform_nand_data | ||
180 | */ | 221 | */ |
181 | static int __init xway_register_nand(void) | 222 | static int xway_nand_remove(struct platform_device *pdev) |
182 | { | 223 | { |
183 | struct device_node *node; | 224 | struct xway_nand_data *data = platform_get_drvdata(pdev); |
184 | struct platform_device *pdev; | 225 | |
185 | 226 | nand_release(nand_to_mtd(&data->chip)); | |
186 | node = of_find_compatible_node(NULL, NULL, "lantiq,nand-xway"); | 227 | |
187 | if (!node) | ||
188 | return -ENOENT; | ||
189 | pdev = of_find_device_by_node(node); | ||
190 | if (!pdev) | ||
191 | return -EINVAL; | ||
192 | pdev->dev.platform_data = &xway_nand_data; | ||
193 | of_node_put(node); | ||
194 | return 0; | 228 | return 0; |
195 | } | 229 | } |
196 | 230 | ||
197 | subsys_initcall(xway_register_nand); | 231 | static const struct of_device_id xway_nand_match[] = { |
232 | { .compatible = "lantiq,nand-xway" }, | ||
233 | {}, | ||
234 | }; | ||
235 | MODULE_DEVICE_TABLE(of, xway_nand_match); | ||
236 | |||
237 | static struct platform_driver xway_nand_driver = { | ||
238 | .probe = xway_nand_probe, | ||
239 | .remove = xway_nand_remove, | ||
240 | .driver = { | ||
241 | .name = "lantiq,nand-xway", | ||
242 | .of_match_table = xway_nand_match, | ||
243 | }, | ||
244 | }; | ||
245 | |||
246 | module_platform_driver(xway_nand_driver); | ||
247 | |||
248 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c index 09a4ccac53a2..f26dec896afa 100644 --- a/drivers/mtd/tests/nandbiterrs.c +++ b/drivers/mtd/tests/nandbiterrs.c | |||
@@ -290,7 +290,7 @@ static int overwrite_test(void) | |||
290 | 290 | ||
291 | while (opno < max_overwrite) { | 291 | while (opno < max_overwrite) { |
292 | 292 | ||
293 | err = rewrite_page(0); | 293 | err = write_page(0); |
294 | if (err) | 294 | if (err) |
295 | break; | 295 | break; |
296 | 296 | ||
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index fbe8e164a4ee..8dd6e01f45c0 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h | |||
@@ -783,6 +783,7 @@ static inline void nand_set_controller_data(struct nand_chip *chip, void *priv) | |||
783 | * NAND Flash Manufacturer ID Codes | 783 | * NAND Flash Manufacturer ID Codes |
784 | */ | 784 | */ |
785 | #define NAND_MFR_TOSHIBA 0x98 | 785 | #define NAND_MFR_TOSHIBA 0x98 |
786 | #define NAND_MFR_ESMT 0xc8 | ||
786 | #define NAND_MFR_SAMSUNG 0xec | 787 | #define NAND_MFR_SAMSUNG 0xec |
787 | #define NAND_MFR_FUJITSU 0x04 | 788 | #define NAND_MFR_FUJITSU 0x04 |
788 | #define NAND_MFR_NATIONAL 0x8f | 789 | #define NAND_MFR_NATIONAL 0x8f |