diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-02 17:05:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-02 17:05:11 -0400 |
commit | affe8a2abd0d7815bb2653eea2717d0e0f8ac7e3 (patch) | |
tree | 913838395d0480fcf310030d12714439cfb0e4ba | |
parent | 44cee85a8824464e7e951e590243c2a85d79c494 (diff) | |
parent | 1dcff2e4ae728a36876bdb108173f4cbcae128bf (diff) |
Merge tag 'for-linus-20160801' of git://git.infradead.org/linux-mtd
Pull MTD updates from Brian Norris:
"NAND:
Quoting Boris:
'This pull request contains only one notable change:
- Addition of the MTK NAND controller driver
And a bunch of specific NAND driver improvements/fixes. Here are the
changes that are worth mentioning:
- A few fixes/improvements for the xway NAND controller driver
- A few fixes for the sunxi NAND controller driver
- Support for DMA in the sunxi NAND driver
- Support for the sunxi NAND controller IP embedded in A23/A33 SoCs
- Addition for bitflips detection in erased pages to the brcmnand driver
- Support for new brcmnand IPs
- Update of the OMAP-GPMC binding to support DMA channel description'
In addition, some small fixes around error handling, etc., as well
as one long-standing corner case issue (2.6.20, I think?) with
writing 1 byte less than a page.
NOR:
- rework some error handling on reads and writes, so we can better
handle (for instance) SPI controllers which have limitations on
their maximum transfer size
- add new Cadence Quad SPI flash controller driver
- add new Atmel QSPI flash controller driver
- add new Hisilicon SPI flash controller driver
- support a few new flash, and update supported features on others
- fix the logic used for detecting a fully-unlocked flash
And other miscellaneous small fixes"
* tag 'for-linus-20160801' of git://git.infradead.org/linux-mtd: (60 commits)
mtd: spi-nor: don't build Cadence QuadSPI on non-ARM
mtd: mtk-nor: remove duplicated include from mtk-quadspi.c
mtd: nand: fix bug writing 1 byte less than page size
mtd: update description of MTD_BCM47XXSFLASH symbol
mtd: spi-nor: Add driver for Cadence Quad SPI Flash Controller
mtd: spi-nor: Bindings for Cadence Quad SPI Flash Controller driver
mtd: nand: brcmnand: Change BUG_ON in brcmnand_send_cmd
mtd: pmcmsp-flash: Allocating too much in init_msp_flash()
mtd: maps: sa1100-flash: potential NULL dereference
mtd: atmel-quadspi: add driver for Atmel QSPI controller
mtd: nand: omap2: fix return value check in omap_nand_probe()
Documentation: atmel-quadspi: add binding file for Atmel QSPI driver
mtd: spi-nor: add hisilicon spi-nor flash controller driver
mtd: spi-nor: support dual, quad, and WP for Gigadevice
mtd: spi-nor: Added support for n25q00a.
memory: Update dependency of IFC for Layerscape
mtd: nand: jz4780: Update MODULE_AUTHOR email address
mtd: nand: sunxi: prevent a small memory leak
mtd: nand: sunxi: add reset line support
mtd: nand: sunxi: update DT bindings
...
45 files changed, 5837 insertions, 256 deletions
diff --git a/Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt b/Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt index 21055e210234..c1359f4d48d7 100644 --- a/Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt +++ b/Documentation/devicetree/bindings/memory-controllers/omap-gpmc.txt | |||
@@ -46,6 +46,10 @@ Required properties: | |||
46 | 0 maps to GPMC_WAIT0 pin. | 46 | 0 maps to GPMC_WAIT0 pin. |
47 | - gpio-cells: Must be set to 2 | 47 | - gpio-cells: Must be set to 2 |
48 | 48 | ||
49 | Required properties when using NAND prefetch dma: | ||
50 | - dmas GPMC NAND prefetch dma channel | ||
51 | - dma-names Must be set to "rxtx" | ||
52 | |||
49 | Timing properties for child nodes. All are optional and default to 0. | 53 | Timing properties for child nodes. All are optional and default to 0. |
50 | 54 | ||
51 | - gpmc,sync-clk-ps: Minimum clock period for synchronous mode, in picoseconds | 55 | - gpmc,sync-clk-ps: Minimum clock period for synchronous mode, in picoseconds |
@@ -137,7 +141,8 @@ Example for an AM33xx board: | |||
137 | ti,hwmods = "gpmc"; | 141 | ti,hwmods = "gpmc"; |
138 | reg = <0x50000000 0x2000>; | 142 | reg = <0x50000000 0x2000>; |
139 | interrupts = <100>; | 143 | interrupts = <100>; |
140 | 144 | dmas = <&edma 52 0>; | |
145 | dma-names = "rxtx"; | ||
141 | gpmc,num-cs = <8>; | 146 | gpmc,num-cs = <8>; |
142 | gpmc,num-waitpins = <2>; | 147 | gpmc,num-waitpins = <2>; |
143 | #address-cells = <2>; | 148 | #address-cells = <2>; |
diff --git a/Documentation/devicetree/bindings/mtd/atmel-quadspi.txt b/Documentation/devicetree/bindings/mtd/atmel-quadspi.txt new file mode 100644 index 000000000000..489807005eda --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/atmel-quadspi.txt | |||
@@ -0,0 +1,32 @@ | |||
1 | * Atmel Quad Serial Peripheral Interface (QSPI) | ||
2 | |||
3 | Required properties: | ||
4 | - compatible: Should be "atmel,sama5d2-qspi". | ||
5 | - reg: Should contain the locations and lengths of the base registers | ||
6 | and the mapped memory. | ||
7 | - reg-names: Should contain the resource reg names: | ||
8 | - qspi_base: configuration register address space | ||
9 | - qspi_mmap: memory mapped address space | ||
10 | - interrupts: Should contain the interrupt for the device. | ||
11 | - clocks: The phandle of the clock needed by the QSPI controller. | ||
12 | - #address-cells: Should be <1>. | ||
13 | - #size-cells: Should be <0>. | ||
14 | |||
15 | Example: | ||
16 | |||
17 | spi@f0020000 { | ||
18 | compatible = "atmel,sama5d2-qspi"; | ||
19 | reg = <0xf0020000 0x100>, <0xd0000000 0x8000000>; | ||
20 | reg-names = "qspi_base", "qspi_mmap"; | ||
21 | interrupts = <52 IRQ_TYPE_LEVEL_HIGH 7>; | ||
22 | clocks = <&spi0_clk>; | ||
23 | #address-cells = <1>; | ||
24 | #size-cells = <0>; | ||
25 | pinctrl-names = "default"; | ||
26 | pinctrl-0 = <&pinctrl_spi0_default>; | ||
27 | status = "okay"; | ||
28 | |||
29 | m25p80@0 { | ||
30 | ... | ||
31 | }; | ||
32 | }; | ||
diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt index 7066597c9a81..b40f3a492800 100644 --- a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt +++ b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt | |||
@@ -27,6 +27,7 @@ Required properties: | |||
27 | brcm,brcmnand-v6.2 | 27 | brcm,brcmnand-v6.2 |
28 | brcm,brcmnand-v7.0 | 28 | brcm,brcmnand-v7.0 |
29 | brcm,brcmnand-v7.1 | 29 | brcm,brcmnand-v7.1 |
30 | brcm,brcmnand-v7.2 | ||
30 | brcm,brcmnand | 31 | brcm,brcmnand |
31 | - reg : the register start and length for NAND register region. | 32 | - reg : the register start and length for NAND register region. |
32 | (optional) Flash DMA register range (if present) | 33 | (optional) Flash DMA register range (if present) |
diff --git a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt new file mode 100644 index 000000000000..f248056da24c --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt | |||
@@ -0,0 +1,56 @@ | |||
1 | * Cadence Quad SPI controller | ||
2 | |||
3 | Required properties: | ||
4 | - compatible : Should be "cdns,qspi-nor". | ||
5 | - reg : Contains two entries, each of which is a tuple consisting of a | ||
6 | physical address and length. The first entry is the address and | ||
7 | length of the controller register set. The second entry is the | ||
8 | address and length of the QSPI Controller data area. | ||
9 | - interrupts : Unit interrupt specifier for the controller interrupt. | ||
10 | - clocks : phandle to the Quad SPI clock. | ||
11 | - cdns,fifo-depth : Size of the data FIFO in words. | ||
12 | - cdns,fifo-width : Bus width of the data FIFO in bytes. | ||
13 | - cdns,trigger-address : 32-bit indirect AHB trigger address. | ||
14 | |||
15 | Optional properties: | ||
16 | - cdns,is-decoded-cs : Flag to indicate whether decoder is used or not. | ||
17 | |||
18 | Optional subnodes: | ||
19 | Subnodes of the Cadence Quad SPI controller are spi slave nodes with additional | ||
20 | custom properties: | ||
21 | - cdns,read-delay : Delay for read capture logic, in clock cycles | ||
22 | - cdns,tshsl-ns : Delay in nanoseconds for the length that the master | ||
23 | mode chip select outputs are de-asserted between | ||
24 | transactions. | ||
25 | - cdns,tsd2d-ns : Delay in nanoseconds between one chip select being | ||
26 | de-activated and the activation of another. | ||
27 | - cdns,tchsh-ns : Delay in nanoseconds between last bit of current | ||
28 | transaction and deasserting the device chip select | ||
29 | (qspi_n_ss_out). | ||
30 | - cdns,tslch-ns : Delay in nanoseconds between setting qspi_n_ss_out low | ||
31 | and first bit transfer. | ||
32 | |||
33 | Example: | ||
34 | |||
35 | qspi: spi@ff705000 { | ||
36 | compatible = "cdns,qspi-nor"; | ||
37 | #address-cells = <1>; | ||
38 | #size-cells = <0>; | ||
39 | reg = <0xff705000 0x1000>, | ||
40 | <0xffa00000 0x1000>; | ||
41 | interrupts = <0 151 4>; | ||
42 | clocks = <&qspi_clk>; | ||
43 | cdns,is-decoded-cs; | ||
44 | cdns,fifo-depth = <128>; | ||
45 | cdns,fifo-width = <4>; | ||
46 | cdns,trigger-address = <0x00000000>; | ||
47 | |||
48 | flash0: n25q00@0 { | ||
49 | ... | ||
50 | cdns,read-delay = <4>; | ||
51 | cdns,tshsl-ns = <50>; | ||
52 | cdns,tsd2d-ns = <50>; | ||
53 | cdns,tchsh-ns = <4>; | ||
54 | cdns,tslch-ns = <4>; | ||
55 | }; | ||
56 | }; | ||
diff --git a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt index 3ee7e202657c..174f68c26c1b 100644 --- a/Documentation/devicetree/bindings/mtd/gpmc-nand.txt +++ b/Documentation/devicetree/bindings/mtd/gpmc-nand.txt | |||
@@ -39,7 +39,7 @@ Optional properties: | |||
39 | 39 | ||
40 | "prefetch-polled" Prefetch polled mode (default) | 40 | "prefetch-polled" Prefetch polled mode (default) |
41 | "polled" Polled mode, without prefetch | 41 | "polled" Polled mode, without prefetch |
42 | "prefetch-dma" Prefetch enabled sDMA mode | 42 | "prefetch-dma" Prefetch enabled DMA mode |
43 | "prefetch-irq" Prefetch enabled irq mode | 43 | "prefetch-irq" Prefetch enabled irq mode |
44 | 44 | ||
45 | - elm_id: <deprecated> use "ti,elm-id" instead | 45 | - elm_id: <deprecated> use "ti,elm-id" instead |
diff --git a/Documentation/devicetree/bindings/mtd/hisilicon,fmc-spi-nor.txt b/Documentation/devicetree/bindings/mtd/hisilicon,fmc-spi-nor.txt new file mode 100644 index 000000000000..74981520d6dd --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/hisilicon,fmc-spi-nor.txt | |||
@@ -0,0 +1,24 @@ | |||
1 | HiSilicon SPI-NOR Flash Controller | ||
2 | |||
3 | Required properties: | ||
4 | - compatible : Should be "hisilicon,fmc-spi-nor" and one of the following strings: | ||
5 | "hisilicon,hi3519-spi-nor" | ||
6 | - address-cells : Should be 1. | ||
7 | - size-cells : Should be 0. | ||
8 | - reg : Offset and length of the register set for the controller device. | ||
9 | - reg-names : Must include the following two entries: "control", "memory". | ||
10 | - clocks : handle to spi-nor flash controller clock. | ||
11 | |||
12 | Example: | ||
13 | spi-nor-controller@10000000 { | ||
14 | compatible = "hisilicon,hi3519-spi-nor", "hisilicon,fmc-spi-nor"; | ||
15 | #address-cells = <1>; | ||
16 | #size-cells = <0>; | ||
17 | reg = <0x10000000 0x1000>, <0x14000000 0x1000000>; | ||
18 | reg-names = "control", "memory"; | ||
19 | clocks = <&clock HI3519_FMC_CLK>; | ||
20 | spi-nor@0 { | ||
21 | compatible = "jedec,spi-nor"; | ||
22 | reg = <0>; | ||
23 | }; | ||
24 | }; | ||
diff --git a/Documentation/devicetree/bindings/mtd/mtk-nand.txt b/Documentation/devicetree/bindings/mtd/mtk-nand.txt new file mode 100644 index 000000000000..069c192ed5c2 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/mtk-nand.txt | |||
@@ -0,0 +1,160 @@ | |||
1 | MTK SoCs NAND FLASH controller (NFC) DT binding | ||
2 | |||
3 | This file documents the device tree bindings for MTK SoCs NAND controllers. | ||
4 | The functional split of the controller requires two drivers to operate: | ||
5 | the nand controller interface driver and the ECC engine driver. | ||
6 | |||
7 | The hardware description for both devices must be captured as device | ||
8 | tree nodes. | ||
9 | |||
10 | 1) NFC NAND Controller Interface (NFI): | ||
11 | ======================================= | ||
12 | |||
13 | The first part of NFC is NAND Controller Interface (NFI) HW. | ||
14 | Required NFI properties: | ||
15 | - compatible: Should be "mediatek,mtxxxx-nfc". | ||
16 | - reg: Base physical address and size of NFI. | ||
17 | - interrupts: Interrupts of NFI. | ||
18 | - clocks: NFI required clocks. | ||
19 | - clock-names: NFI clocks internal name. | ||
20 | - status: Disabled default. Then set "okay" by platform. | ||
21 | - ecc-engine: Required ECC Engine node. | ||
22 | - #address-cells: NAND chip index, should be 1. | ||
23 | - #size-cells: Should be 0. | ||
24 | |||
25 | Example: | ||
26 | |||
27 | nandc: nfi@1100d000 { | ||
28 | compatible = "mediatek,mt2701-nfc"; | ||
29 | reg = <0 0x1100d000 0 0x1000>; | ||
30 | interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_LOW>; | ||
31 | clocks = <&pericfg CLK_PERI_NFI>, | ||
32 | <&pericfg CLK_PERI_NFI_PAD>; | ||
33 | clock-names = "nfi_clk", "pad_clk"; | ||
34 | status = "disabled"; | ||
35 | ecc-engine = <&bch>; | ||
36 | #address-cells = <1>; | ||
37 | #size-cells = <0>; | ||
38 | }; | ||
39 | |||
40 | Platform related properties, should be set in {platform_name}.dts: | ||
41 | - children nodes: NAND chips. | ||
42 | |||
43 | Children nodes properties: | ||
44 | - reg: Chip Select Signal, default 0. | ||
45 | Set as reg = <0>, <1> when need 2 CS. | ||
46 | Optional: | ||
47 | - nand-on-flash-bbt: Store BBT on NAND Flash. | ||
48 | - nand-ecc-mode: the NAND ecc mode (check driver for supported modes) | ||
49 | - nand-ecc-step-size: Number of data bytes covered by a single ECC step. | ||
50 | valid values: 512 and 1024. | ||
51 | 1024 is recommended for large page NANDs. | ||
52 | - nand-ecc-strength: Number of bits to correct per ECC step. | ||
53 | The valid values that the controller supports are: 4, 6, | ||
54 | 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, 40, 44, | ||
55 | 48, 52, 56, 60. | ||
56 | The strength should be calculated as follows: | ||
57 | E = (S - F) * 8 / 14 | ||
58 | S = O / (P / Q) | ||
59 | E : nand-ecc-strength. | ||
60 | S : spare size per sector. | ||
61 | F : FDM size, should be in the range [1,8]. | ||
62 | It is used to store free oob data. | ||
63 | O : oob size. | ||
64 | P : page size. | ||
65 | Q : nand-ecc-step-size. | ||
66 | If the result does not match any one of the listed | ||
67 | choices above, please select the smaller valid value from | ||
68 | the list. | ||
69 | (otherwise the driver will do the adjustment at runtime) | ||
70 | - pinctrl-names: Default NAND pin GPIO setting name. | ||
71 | - pinctrl-0: GPIO setting node. | ||
72 | |||
73 | Example: | ||
74 | &pio { | ||
75 | nand_pins_default: nanddefault { | ||
76 | pins_dat { | ||
77 | pinmux = <MT2701_PIN_111_MSDC0_DAT7__FUNC_NLD7>, | ||
78 | <MT2701_PIN_112_MSDC0_DAT6__FUNC_NLD6>, | ||
79 | <MT2701_PIN_114_MSDC0_DAT4__FUNC_NLD4>, | ||
80 | <MT2701_PIN_118_MSDC0_DAT3__FUNC_NLD3>, | ||
81 | <MT2701_PIN_121_MSDC0_DAT0__FUNC_NLD0>, | ||
82 | <MT2701_PIN_120_MSDC0_DAT1__FUNC_NLD1>, | ||
83 | <MT2701_PIN_113_MSDC0_DAT5__FUNC_NLD5>, | ||
84 | <MT2701_PIN_115_MSDC0_RSTB__FUNC_NLD8>, | ||
85 | <MT2701_PIN_119_MSDC0_DAT2__FUNC_NLD2>; | ||
86 | input-enable; | ||
87 | drive-strength = <MTK_DRIVE_8mA>; | ||
88 | bias-pull-up; | ||
89 | }; | ||
90 | |||
91 | pins_we { | ||
92 | pinmux = <MT2701_PIN_117_MSDC0_CLK__FUNC_NWEB>; | ||
93 | drive-strength = <MTK_DRIVE_8mA>; | ||
94 | bias-pull-up = <MTK_PUPD_SET_R1R0_10>; | ||
95 | }; | ||
96 | |||
97 | pins_ale { | ||
98 | pinmux = <MT2701_PIN_116_MSDC0_CMD__FUNC_NALE>; | ||
99 | drive-strength = <MTK_DRIVE_8mA>; | ||
100 | bias-pull-down = <MTK_PUPD_SET_R1R0_10>; | ||
101 | }; | ||
102 | }; | ||
103 | }; | ||
104 | |||
105 | &nandc { | ||
106 | status = "okay"; | ||
107 | pinctrl-names = "default"; | ||
108 | pinctrl-0 = <&nand_pins_default>; | ||
109 | nand@0 { | ||
110 | reg = <0>; | ||
111 | nand-on-flash-bbt; | ||
112 | nand-ecc-mode = "hw"; | ||
113 | nand-ecc-strength = <24>; | ||
114 | nand-ecc-step-size = <1024>; | ||
115 | }; | ||
116 | }; | ||
117 | |||
118 | NAND chip optional subnodes: | ||
119 | - Partitions, see Documentation/devicetree/bindings/mtd/partition.txt | ||
120 | |||
121 | Example: | ||
122 | nand@0 { | ||
123 | partitions { | ||
124 | compatible = "fixed-partitions"; | ||
125 | #address-cells = <1>; | ||
126 | #size-cells = <1>; | ||
127 | |||
128 | preloader@0 { | ||
129 | label = "pl"; | ||
130 | read-only; | ||
131 | reg = <0x00000000 0x00400000>; | ||
132 | }; | ||
133 | android@0x00400000 { | ||
134 | label = "android"; | ||
135 | reg = <0x00400000 0x12c00000>; | ||
136 | }; | ||
137 | }; | ||
138 | }; | ||
139 | |||
140 | 2) ECC Engine: | ||
141 | ============== | ||
142 | |||
143 | Required BCH properties: | ||
144 | - compatible: Should be "mediatek,mtxxxx-ecc". | ||
145 | - reg: Base physical address and size of ECC. | ||
146 | - interrupts: Interrupts of ECC. | ||
147 | - clocks: ECC required clocks. | ||
148 | - clock-names: ECC clocks internal name. | ||
149 | - status: Disabled default. Then set "okay" by platform. | ||
150 | |||
151 | Example: | ||
152 | |||
153 | bch: ecc@1100e000 { | ||
154 | compatible = "mediatek,mt2701-ecc"; | ||
155 | reg = <0 0x1100e000 0 0x1000>; | ||
156 | interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_LOW>; | ||
157 | clocks = <&pericfg CLK_PERI_NFI_ECC>; | ||
158 | clock-names = "nfiecc_clk"; | ||
159 | status = "disabled"; | ||
160 | }; | ||
diff --git a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt index 086d6f44c4b9..f322f56aef74 100644 --- a/Documentation/devicetree/bindings/mtd/sunxi-nand.txt +++ b/Documentation/devicetree/bindings/mtd/sunxi-nand.txt | |||
@@ -11,10 +11,16 @@ Required properties: | |||
11 | * "ahb" : AHB gating clock | 11 | * "ahb" : AHB gating clock |
12 | * "mod" : nand controller clock | 12 | * "mod" : nand controller clock |
13 | 13 | ||
14 | Optional properties: | ||
15 | - dmas : shall reference DMA channel associated to the NAND controller. | ||
16 | - dma-names : shall be "rxtx". | ||
17 | |||
14 | Optional children nodes: | 18 | Optional children nodes: |
15 | Children nodes represent the available nand chips. | 19 | Children nodes represent the available nand chips. |
16 | 20 | ||
17 | Optional properties: | 21 | Optional properties: |
22 | - reset : phandle + reset specifier pair | ||
23 | - reset-names : must contain "ahb" | ||
18 | - allwinner,rb : shall contain the native Ready/Busy ids. | 24 | - allwinner,rb : shall contain the native Ready/Busy ids. |
19 | or | 25 | or |
20 | - rb-gpios : shall contain the gpios used as R/B pins. | 26 | - rb-gpios : shall contain the gpios used as R/B pins. |
diff --git a/arch/cris/arch-v10/drivers/axisflashmap.c b/arch/cris/arch-v10/drivers/axisflashmap.c index 60d57c590032..bdc25aa43468 100644 --- a/arch/cris/arch-v10/drivers/axisflashmap.c +++ b/arch/cris/arch-v10/drivers/axisflashmap.c | |||
@@ -397,7 +397,7 @@ static int __init init_axis_flash(void) | |||
397 | if (!romfs_in_flash) { | 397 | if (!romfs_in_flash) { |
398 | /* Create an RAM device for the root partition (romfs). */ | 398 | /* Create an RAM device for the root partition (romfs). */ |
399 | 399 | ||
400 | #if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0) || (CONFIG_MTDRAM_ABS_POS != 0) | 400 | #if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0) |
401 | /* No use trying to boot this kernel from RAM. Panic! */ | 401 | /* No use trying to boot this kernel from RAM. Panic! */ |
402 | printk(KERN_EMERG "axisflashmap: Cannot create an MTD RAM " | 402 | printk(KERN_EMERG "axisflashmap: Cannot create an MTD RAM " |
403 | "device due to kernel (mis)configuration!\n"); | 403 | "device due to kernel (mis)configuration!\n"); |
diff --git a/arch/cris/arch-v32/drivers/axisflashmap.c b/arch/cris/arch-v32/drivers/axisflashmap.c index bd10d3ba0949..87656c41fec7 100644 --- a/arch/cris/arch-v32/drivers/axisflashmap.c +++ b/arch/cris/arch-v32/drivers/axisflashmap.c | |||
@@ -320,7 +320,7 @@ static int __init init_axis_flash(void) | |||
320 | * but its size must be configured as 0 so as not to conflict | 320 | * but its size must be configured as 0 so as not to conflict |
321 | * with our usage. | 321 | * with our usage. |
322 | */ | 322 | */ |
323 | #if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0) || (CONFIG_MTDRAM_ABS_POS != 0) | 323 | #if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0) |
324 | if (!romfs_in_flash && !nand_boot) { | 324 | if (!romfs_in_flash && !nand_boot) { |
325 | printk(KERN_EMERG "axisflashmap: Cannot create an MTD RAM " | 325 | printk(KERN_EMERG "axisflashmap: Cannot create an MTD RAM " |
326 | "device; configure CONFIG_MTD_MTDRAM with size = 0!\n"); | 326 | "device; configure CONFIG_MTD_MTDRAM with size = 0!\n"); |
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig index 133712346911..4b4c0c3c3d2f 100644 --- a/drivers/memory/Kconfig +++ b/drivers/memory/Kconfig | |||
@@ -115,7 +115,7 @@ config FSL_CORENET_CF | |||
115 | 115 | ||
116 | config FSL_IFC | 116 | config FSL_IFC |
117 | bool | 117 | bool |
118 | depends on FSL_SOC | 118 | depends on FSL_SOC || ARCH_LAYERSCAPE |
119 | 119 | ||
120 | config JZ4780_NEMC | 120 | config JZ4780_NEMC |
121 | bool "Ingenic JZ4780 SoC NEMC driver" | 121 | bool "Ingenic JZ4780 SoC NEMC driver" |
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c index 904b4af5f142..1b182b117f9c 100644 --- a/drivers/memory/fsl_ifc.c +++ b/drivers/memory/fsl_ifc.c | |||
@@ -31,7 +31,9 @@ | |||
31 | #include <linux/of_device.h> | 31 | #include <linux/of_device.h> |
32 | #include <linux/platform_device.h> | 32 | #include <linux/platform_device.h> |
33 | #include <linux/fsl_ifc.h> | 33 | #include <linux/fsl_ifc.h> |
34 | #include <asm/prom.h> | 34 | #include <linux/irqdomain.h> |
35 | #include <linux/of_address.h> | ||
36 | #include <linux/of_irq.h> | ||
35 | 37 | ||
36 | struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; | 38 | struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; |
37 | EXPORT_SYMBOL(fsl_ifc_ctrl_dev); | 39 | EXPORT_SYMBOL(fsl_ifc_ctrl_dev); |
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c index 9a1a6ffd16b8..94d3eb42c4d5 100644 --- a/drivers/mtd/chips/cfi_cmdset_0020.c +++ b/drivers/mtd/chips/cfi_cmdset_0020.c | |||
@@ -416,7 +416,7 @@ static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t | |||
416 | return ret; | 416 | return ret; |
417 | } | 417 | } |
418 | 418 | ||
419 | static inline int do_write_buffer(struct map_info *map, struct flchip *chip, | 419 | static int do_write_buffer(struct map_info *map, struct flchip *chip, |
420 | unsigned long adr, const u_char *buf, int len) | 420 | unsigned long adr, const u_char *buf, int len) |
421 | { | 421 | { |
422 | struct cfi_private *cfi = map->fldrv_priv; | 422 | struct cfi_private *cfi = map->fldrv_priv; |
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 64a248556d29..58329d2dacd1 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig | |||
@@ -113,12 +113,12 @@ config MTD_SST25L | |||
113 | if you want to specify device partitioning. | 113 | if you want to specify device partitioning. |
114 | 114 | ||
115 | config MTD_BCM47XXSFLASH | 115 | config MTD_BCM47XXSFLASH |
116 | tristate "R/O support for serial flash on BCMA bus" | 116 | tristate "Support for serial flash on BCMA bus" |
117 | depends on BCMA_SFLASH && (MIPS || ARM) | 117 | depends on BCMA_SFLASH && (MIPS || ARM) |
118 | help | 118 | help |
119 | BCMA bus can have various flash memories attached, they are | 119 | BCMA bus can have various flash memories attached, they are |
120 | registered by bcma as platform devices. This enables driver for | 120 | registered by bcma as platform devices. This enables driver for |
121 | serial flash memories (only read-only mode is implemented). | 121 | serial flash memories. |
122 | 122 | ||
123 | config MTD_SLRAM | 123 | config MTD_SLRAM |
124 | tristate "Uncached system RAM" | 124 | tristate "Uncached system RAM" |
@@ -171,18 +171,6 @@ config MTDRAM_ERASE_SIZE | |||
171 | as a module, it is also possible to specify this as a parameter when | 171 | as a module, it is also possible to specify this as a parameter when |
172 | loading the module. | 172 | loading the module. |
173 | 173 | ||
174 | #If not a module (I don't want to test it as a module) | ||
175 | config MTDRAM_ABS_POS | ||
176 | hex "SRAM Hexadecimal Absolute position or 0" | ||
177 | depends on MTD_MTDRAM=y | ||
178 | default "0" | ||
179 | help | ||
180 | If you have system RAM accessible by the CPU but not used by Linux | ||
181 | in normal operation, you can give the physical address at which the | ||
182 | available RAM starts, and the MTDRAM driver will use it instead of | ||
183 | allocating space from Linux's available memory. Otherwise, leave | ||
184 | this set to zero. Most people will want to leave this as zero. | ||
185 | |||
186 | config MTD_BLOCK2MTD | 174 | config MTD_BLOCK2MTD |
187 | tristate "MTD using block device" | 175 | tristate "MTD using block device" |
188 | depends on BLOCK | 176 | depends on BLOCK |
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 9d6854467651..9cf7fcd28034 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c | |||
@@ -73,14 +73,15 @@ static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) | |||
73 | return spi_write(spi, flash->command, len + 1); | 73 | return spi_write(spi, flash->command, len + 1); |
74 | } | 74 | } |
75 | 75 | ||
76 | static void m25p80_write(struct spi_nor *nor, loff_t to, size_t len, | 76 | static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len, |
77 | size_t *retlen, const u_char *buf) | 77 | const u_char *buf) |
78 | { | 78 | { |
79 | struct m25p *flash = nor->priv; | 79 | struct m25p *flash = nor->priv; |
80 | struct spi_device *spi = flash->spi; | 80 | struct spi_device *spi = flash->spi; |
81 | struct spi_transfer t[2] = {}; | 81 | struct spi_transfer t[2] = {}; |
82 | struct spi_message m; | 82 | struct spi_message m; |
83 | int cmd_sz = m25p_cmdsz(nor); | 83 | int cmd_sz = m25p_cmdsz(nor); |
84 | ssize_t ret; | ||
84 | 85 | ||
85 | spi_message_init(&m); | 86 | spi_message_init(&m); |
86 | 87 | ||
@@ -98,9 +99,14 @@ static void m25p80_write(struct spi_nor *nor, loff_t to, size_t len, | |||
98 | t[1].len = len; | 99 | t[1].len = len; |
99 | spi_message_add_tail(&t[1], &m); | 100 | spi_message_add_tail(&t[1], &m); |
100 | 101 | ||
101 | spi_sync(spi, &m); | 102 | ret = spi_sync(spi, &m); |
103 | if (ret) | ||
104 | return ret; | ||
102 | 105 | ||
103 | *retlen += m.actual_length - cmd_sz; | 106 | ret = m.actual_length - cmd_sz; |
107 | if (ret < 0) | ||
108 | return -EIO; | ||
109 | return ret; | ||
104 | } | 110 | } |
105 | 111 | ||
106 | static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor) | 112 | static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor) |
@@ -119,21 +125,21 @@ static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor) | |||
119 | * Read an address range from the nor chip. The address range | 125 | * Read an address range from the nor chip. The address range |
120 | * may be any size provided it is within the physical boundaries. | 126 | * may be any size provided it is within the physical boundaries. |
121 | */ | 127 | */ |
122 | static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len, | 128 | static ssize_t m25p80_read(struct spi_nor *nor, loff_t from, size_t len, |
123 | size_t *retlen, u_char *buf) | 129 | u_char *buf) |
124 | { | 130 | { |
125 | struct m25p *flash = nor->priv; | 131 | struct m25p *flash = nor->priv; |
126 | struct spi_device *spi = flash->spi; | 132 | struct spi_device *spi = flash->spi; |
127 | struct spi_transfer t[2]; | 133 | struct spi_transfer t[2]; |
128 | struct spi_message m; | 134 | struct spi_message m; |
129 | unsigned int dummy = nor->read_dummy; | 135 | unsigned int dummy = nor->read_dummy; |
136 | ssize_t ret; | ||
130 | 137 | ||
131 | /* convert the dummy cycles to the number of bytes */ | 138 | /* convert the dummy cycles to the number of bytes */ |
132 | dummy /= 8; | 139 | dummy /= 8; |
133 | 140 | ||
134 | if (spi_flash_read_supported(spi)) { | 141 | if (spi_flash_read_supported(spi)) { |
135 | struct spi_flash_read_message msg; | 142 | struct spi_flash_read_message msg; |
136 | int ret; | ||
137 | 143 | ||
138 | memset(&msg, 0, sizeof(msg)); | 144 | memset(&msg, 0, sizeof(msg)); |
139 | 145 | ||
@@ -149,8 +155,9 @@ static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len, | |||
149 | msg.data_nbits = m25p80_rx_nbits(nor); | 155 | msg.data_nbits = m25p80_rx_nbits(nor); |
150 | 156 | ||
151 | ret = spi_flash_read(spi, &msg); | 157 | ret = spi_flash_read(spi, &msg); |
152 | *retlen = msg.retlen; | 158 | if (ret < 0) |
153 | return ret; | 159 | return ret; |
160 | return msg.retlen; | ||
154 | } | 161 | } |
155 | 162 | ||
156 | spi_message_init(&m); | 163 | spi_message_init(&m); |
@@ -165,13 +172,17 @@ static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len, | |||
165 | 172 | ||
166 | t[1].rx_buf = buf; | 173 | t[1].rx_buf = buf; |
167 | t[1].rx_nbits = m25p80_rx_nbits(nor); | 174 | t[1].rx_nbits = m25p80_rx_nbits(nor); |
168 | t[1].len = len; | 175 | t[1].len = min(len, spi_max_transfer_size(spi)); |
169 | spi_message_add_tail(&t[1], &m); | 176 | spi_message_add_tail(&t[1], &m); |
170 | 177 | ||
171 | spi_sync(spi, &m); | 178 | ret = spi_sync(spi, &m); |
179 | if (ret) | ||
180 | return ret; | ||
172 | 181 | ||
173 | *retlen = m.actual_length - m25p_cmdsz(nor) - dummy; | 182 | ret = m.actual_length - m25p_cmdsz(nor) - dummy; |
174 | return 0; | 183 | if (ret < 0) |
184 | return -EIO; | ||
185 | return ret; | ||
175 | } | 186 | } |
176 | 187 | ||
177 | /* | 188 | /* |
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index 22f3858c0364..3fad35942895 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c | |||
@@ -186,7 +186,7 @@ static int of_flash_probe(struct platform_device *dev) | |||
186 | * consists internally of 2 non-identical NOR chips on one die. | 186 | * consists internally of 2 non-identical NOR chips on one die. |
187 | */ | 187 | */ |
188 | p = of_get_property(dp, "reg", &count); | 188 | p = of_get_property(dp, "reg", &count); |
189 | if (count % reg_tuple_size != 0) { | 189 | if (!p || count % reg_tuple_size != 0) { |
190 | dev_err(&dev->dev, "Malformed reg property on %s\n", | 190 | dev_err(&dev->dev, "Malformed reg property on %s\n", |
191 | dev->dev.of_node->full_name); | 191 | dev->dev.of_node->full_name); |
192 | err = -EINVAL; | 192 | err = -EINVAL; |
diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c index 744ca5cacc9b..f9fa3fad728e 100644 --- a/drivers/mtd/maps/pmcmsp-flash.c +++ b/drivers/mtd/maps/pmcmsp-flash.c | |||
@@ -75,15 +75,15 @@ static int __init init_msp_flash(void) | |||
75 | 75 | ||
76 | printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt); | 76 | printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt); |
77 | 77 | ||
78 | msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL); | 78 | msp_flash = kcalloc(fcnt, sizeof(*msp_flash), GFP_KERNEL); |
79 | if (!msp_flash) | 79 | if (!msp_flash) |
80 | return -ENOMEM; | 80 | return -ENOMEM; |
81 | 81 | ||
82 | msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL); | 82 | msp_parts = kcalloc(fcnt, sizeof(*msp_parts), GFP_KERNEL); |
83 | if (!msp_parts) | 83 | if (!msp_parts) |
84 | goto free_msp_flash; | 84 | goto free_msp_flash; |
85 | 85 | ||
86 | msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL); | 86 | msp_maps = kcalloc(fcnt, sizeof(*msp_maps), GFP_KERNEL); |
87 | if (!msp_maps) | 87 | if (!msp_maps) |
88 | goto free_msp_parts; | 88 | goto free_msp_parts; |
89 | 89 | ||
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c index 142fc3d79463..784c6e1a0391 100644 --- a/drivers/mtd/maps/sa1100-flash.c +++ b/drivers/mtd/maps/sa1100-flash.c | |||
@@ -230,8 +230,10 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev, | |||
230 | 230 | ||
231 | info->mtd = mtd_concat_create(cdev, info->num_subdev, | 231 | info->mtd = mtd_concat_create(cdev, info->num_subdev, |
232 | plat->name); | 232 | plat->name); |
233 | if (info->mtd == NULL) | 233 | if (info->mtd == NULL) { |
234 | ret = -ENXIO; | 234 | ret = -ENXIO; |
235 | goto err; | ||
236 | } | ||
235 | } | 237 | } |
236 | info->mtd->dev.parent = &pdev->dev; | 238 | info->mtd->dev.parent = &pdev->dev; |
237 | 239 | ||
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index f05e0e9eb2f7..21ff58099f3b 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig | |||
@@ -438,7 +438,7 @@ config MTD_NAND_FSL_ELBC | |||
438 | 438 | ||
439 | config MTD_NAND_FSL_IFC | 439 | config MTD_NAND_FSL_IFC |
440 | tristate "NAND support for Freescale IFC controller" | 440 | tristate "NAND support for Freescale IFC controller" |
441 | depends on MTD_NAND && FSL_SOC | 441 | depends on MTD_NAND && (FSL_SOC || ARCH_LAYERSCAPE) |
442 | select FSL_IFC | 442 | select FSL_IFC |
443 | select MEMORY | 443 | select MEMORY |
444 | help | 444 | help |
@@ -539,7 +539,6 @@ config MTD_NAND_FSMC | |||
539 | config MTD_NAND_XWAY | 539 | config MTD_NAND_XWAY |
540 | tristate "Support for NAND on Lantiq XWAY SoC" | 540 | tristate "Support for NAND on Lantiq XWAY SoC" |
541 | depends on LANTIQ && SOC_TYPE_XWAY | 541 | depends on LANTIQ && SOC_TYPE_XWAY |
542 | select MTD_NAND_PLATFORM | ||
543 | help | 542 | help |
544 | Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached | 543 | Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached |
545 | to the External Bus Unit (EBU). | 544 | to the External Bus Unit (EBU). |
@@ -563,4 +562,11 @@ config MTD_NAND_QCOM | |||
563 | Enables support for NAND flash chips on SoCs containing the EBI2 NAND | 562 | Enables support for NAND flash chips on SoCs containing the EBI2 NAND |
564 | controller. This controller is found on IPQ806x SoC. | 563 | controller. This controller is found on IPQ806x SoC. |
565 | 564 | ||
565 | config MTD_NAND_MTK | ||
566 | tristate "Support for NAND controller on MTK SoCs" | ||
567 | depends on HAS_DMA | ||
568 | help | ||
569 | Enables support for NAND controller on MTK SoCs. | ||
570 | This controller is found on mt27xx, mt81xx, mt65xx SoCs. | ||
571 | |||
566 | endif # MTD_NAND | 572 | endif # MTD_NAND |
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index f55335373f7c..cafde6f3d957 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile | |||
@@ -57,5 +57,6 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o | |||
57 | obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o | 57 | obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o |
58 | obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ | 58 | obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ |
59 | obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o | 59 | obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o |
60 | obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o | ||
60 | 61 | ||
61 | nand-objs := nand_base.o nand_bbt.o nand_timings.o | 62 | nand-objs := nand_base.o nand_bbt.o nand_timings.o |
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c index b76ad7c0144f..8eb2c64df38c 100644 --- a/drivers/mtd/nand/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/brcmnand/brcmnand.c | |||
@@ -340,6 +340,36 @@ static const u16 brcmnand_regs_v71[] = { | |||
340 | [BRCMNAND_FC_BASE] = 0x400, | 340 | [BRCMNAND_FC_BASE] = 0x400, |
341 | }; | 341 | }; |
342 | 342 | ||
343 | /* BRCMNAND v7.2 */ | ||
344 | static const u16 brcmnand_regs_v72[] = { | ||
345 | [BRCMNAND_CMD_START] = 0x04, | ||
346 | [BRCMNAND_CMD_EXT_ADDRESS] = 0x08, | ||
347 | [BRCMNAND_CMD_ADDRESS] = 0x0c, | ||
348 | [BRCMNAND_INTFC_STATUS] = 0x14, | ||
349 | [BRCMNAND_CS_SELECT] = 0x18, | ||
350 | [BRCMNAND_CS_XOR] = 0x1c, | ||
351 | [BRCMNAND_LL_OP] = 0x20, | ||
352 | [BRCMNAND_CS0_BASE] = 0x50, | ||
353 | [BRCMNAND_CS1_BASE] = 0, | ||
354 | [BRCMNAND_CORR_THRESHOLD] = 0xdc, | ||
355 | [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0, | ||
356 | [BRCMNAND_UNCORR_COUNT] = 0xfc, | ||
357 | [BRCMNAND_CORR_COUNT] = 0x100, | ||
358 | [BRCMNAND_CORR_EXT_ADDR] = 0x10c, | ||
359 | [BRCMNAND_CORR_ADDR] = 0x110, | ||
360 | [BRCMNAND_UNCORR_EXT_ADDR] = 0x114, | ||
361 | [BRCMNAND_UNCORR_ADDR] = 0x118, | ||
362 | [BRCMNAND_SEMAPHORE] = 0x150, | ||
363 | [BRCMNAND_ID] = 0x194, | ||
364 | [BRCMNAND_ID_EXT] = 0x198, | ||
365 | [BRCMNAND_LL_RDATA] = 0x19c, | ||
366 | [BRCMNAND_OOB_READ_BASE] = 0x200, | ||
367 | [BRCMNAND_OOB_READ_10_BASE] = 0, | ||
368 | [BRCMNAND_OOB_WRITE_BASE] = 0x400, | ||
369 | [BRCMNAND_OOB_WRITE_10_BASE] = 0, | ||
370 | [BRCMNAND_FC_BASE] = 0x600, | ||
371 | }; | ||
372 | |||
343 | enum brcmnand_cs_reg { | 373 | enum brcmnand_cs_reg { |
344 | BRCMNAND_CS_CFG_EXT = 0, | 374 | BRCMNAND_CS_CFG_EXT = 0, |
345 | BRCMNAND_CS_CFG, | 375 | BRCMNAND_CS_CFG, |
@@ -435,7 +465,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) | |||
435 | } | 465 | } |
436 | 466 | ||
437 | /* Register offsets */ | 467 | /* Register offsets */ |
438 | if (ctrl->nand_version >= 0x0701) | 468 | if (ctrl->nand_version >= 0x0702) |
469 | ctrl->reg_offsets = brcmnand_regs_v72; | ||
470 | else if (ctrl->nand_version >= 0x0701) | ||
439 | ctrl->reg_offsets = brcmnand_regs_v71; | 471 | ctrl->reg_offsets = brcmnand_regs_v71; |
440 | else if (ctrl->nand_version >= 0x0600) | 472 | else if (ctrl->nand_version >= 0x0600) |
441 | ctrl->reg_offsets = brcmnand_regs_v60; | 473 | ctrl->reg_offsets = brcmnand_regs_v60; |
@@ -480,7 +512,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl) | |||
480 | } | 512 | } |
481 | 513 | ||
482 | /* Maximum spare area sector size (per 512B) */ | 514 | /* Maximum spare area sector size (per 512B) */ |
483 | if (ctrl->nand_version >= 0x0600) | 515 | if (ctrl->nand_version >= 0x0702) |
516 | ctrl->max_oob = 128; | ||
517 | else if (ctrl->nand_version >= 0x0600) | ||
484 | ctrl->max_oob = 64; | 518 | ctrl->max_oob = 64; |
485 | else if (ctrl->nand_version >= 0x0500) | 519 | else if (ctrl->nand_version >= 0x0500) |
486 | ctrl->max_oob = 32; | 520 | ctrl->max_oob = 32; |
@@ -583,14 +617,20 @@ static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val) | |||
583 | enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD; | 617 | enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD; |
584 | int cs = host->cs; | 618 | int cs = host->cs; |
585 | 619 | ||
586 | if (ctrl->nand_version >= 0x0600) | 620 | if (ctrl->nand_version >= 0x0702) |
621 | bits = 7; | ||
622 | else if (ctrl->nand_version >= 0x0600) | ||
587 | bits = 6; | 623 | bits = 6; |
588 | else if (ctrl->nand_version >= 0x0500) | 624 | else if (ctrl->nand_version >= 0x0500) |
589 | bits = 5; | 625 | bits = 5; |
590 | else | 626 | else |
591 | bits = 4; | 627 | bits = 4; |
592 | 628 | ||
593 | if (ctrl->nand_version >= 0x0600) { | 629 | if (ctrl->nand_version >= 0x0702) { |
630 | if (cs >= 4) | ||
631 | reg = BRCMNAND_CORR_THRESHOLD_EXT; | ||
632 | shift = (cs % 4) * bits; | ||
633 | } else if (ctrl->nand_version >= 0x0600) { | ||
594 | if (cs >= 5) | 634 | if (cs >= 5) |
595 | reg = BRCMNAND_CORR_THRESHOLD_EXT; | 635 | reg = BRCMNAND_CORR_THRESHOLD_EXT; |
596 | shift = (cs % 5) * bits; | 636 | shift = (cs % 5) * bits; |
@@ -631,19 +671,28 @@ enum { | |||
631 | 671 | ||
632 | static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl) | 672 | static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl) |
633 | { | 673 | { |
634 | if (ctrl->nand_version >= 0x0600) | 674 | if (ctrl->nand_version >= 0x0702) |
675 | return GENMASK(7, 0); | ||
676 | else if (ctrl->nand_version >= 0x0600) | ||
635 | return GENMASK(6, 0); | 677 | return GENMASK(6, 0); |
636 | else | 678 | else |
637 | return GENMASK(5, 0); | 679 | return GENMASK(5, 0); |
638 | } | 680 | } |
639 | 681 | ||
640 | #define NAND_ACC_CONTROL_ECC_SHIFT 16 | 682 | #define NAND_ACC_CONTROL_ECC_SHIFT 16 |
683 | #define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13 | ||
641 | 684 | ||
642 | static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl) | 685 | static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl) |
643 | { | 686 | { |
644 | u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f; | 687 | u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f; |
645 | 688 | ||
646 | return mask << NAND_ACC_CONTROL_ECC_SHIFT; | 689 | mask <<= NAND_ACC_CONTROL_ECC_SHIFT; |
690 | |||
691 | /* v7.2 includes additional ECC levels */ | ||
692 | if (ctrl->nand_version >= 0x0702) | ||
693 | mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT; | ||
694 | |||
695 | return mask; | ||
647 | } | 696 | } |
648 | 697 | ||
649 | static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en) | 698 | static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en) |
@@ -667,7 +716,9 @@ static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en) | |||
667 | 716 | ||
668 | static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl) | 717 | static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl) |
669 | { | 718 | { |
670 | if (ctrl->nand_version >= 0x0600) | 719 | if (ctrl->nand_version >= 0x0702) |
720 | return 9; | ||
721 | else if (ctrl->nand_version >= 0x0600) | ||
671 | return 7; | 722 | return 7; |
672 | else if (ctrl->nand_version >= 0x0500) | 723 | else if (ctrl->nand_version >= 0x0500) |
673 | return 6; | 724 | return 6; |
@@ -773,10 +824,16 @@ enum brcmnand_llop_type { | |||
773 | * Internal support functions | 824 | * Internal support functions |
774 | ***********************************************************************/ | 825 | ***********************************************************************/ |
775 | 826 | ||
776 | static inline bool is_hamming_ecc(struct brcmnand_cfg *cfg) | 827 | static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl, |
828 | struct brcmnand_cfg *cfg) | ||
777 | { | 829 | { |
778 | return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 && | 830 | if (ctrl->nand_version <= 0x0701) |
779 | cfg->ecc_level == 15; | 831 | return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 && |
832 | cfg->ecc_level == 15; | ||
833 | else | ||
834 | return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 && | ||
835 | cfg->ecc_level == 15) || | ||
836 | (cfg->spare_area_size == 28 && cfg->ecc_level == 16)); | ||
780 | } | 837 | } |
781 | 838 | ||
782 | /* | 839 | /* |
@@ -931,7 +988,7 @@ static int brcmstb_choose_ecc_layout(struct brcmnand_host *host) | |||
931 | if (p->sector_size_1k) | 988 | if (p->sector_size_1k) |
932 | ecc_level <<= 1; | 989 | ecc_level <<= 1; |
933 | 990 | ||
934 | if (is_hamming_ecc(p)) { | 991 | if (is_hamming_ecc(host->ctrl, p)) { |
935 | ecc->bytes = 3 * sectors; | 992 | ecc->bytes = 3 * sectors; |
936 | mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops); | 993 | mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops); |
937 | return 0; | 994 | return 0; |
@@ -1108,7 +1165,7 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd) | |||
1108 | ctrl->cmd_pending = cmd; | 1165 | ctrl->cmd_pending = cmd; |
1109 | 1166 | ||
1110 | intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); | 1167 | intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); |
1111 | BUG_ON(!(intfc & INTFC_CTLR_READY)); | 1168 | WARN_ON(!(intfc & INTFC_CTLR_READY)); |
1112 | 1169 | ||
1113 | mb(); /* flush previous writes */ | 1170 | mb(); /* flush previous writes */ |
1114 | brcmnand_write_reg(ctrl, BRCMNAND_CMD_START, | 1171 | brcmnand_write_reg(ctrl, BRCMNAND_CMD_START, |
@@ -1545,6 +1602,56 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip, | |||
1545 | return ret; | 1602 | return ret; |
1546 | } | 1603 | } |
1547 | 1604 | ||
1605 | /* | ||
1606 | * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC | ||
1607 | * error | ||
1608 | * | ||
1609 | * Because the HW ECC signals an ECC error if an erase paged has even a single | ||
1610 | * bitflip, we must check each ECC error to see if it is actually an erased | ||
1611 | * page with bitflips, not a truly corrupted page. | ||
1612 | * | ||
1613 | * On a real error, return a negative error code (-EBADMSG for ECC error), and | ||
1614 | * buf will contain raw data. | ||
1615 | * Otherwise, buf gets filled with 0xffs and return the maximum number of | ||
1616 | * bitflips-per-ECC-sector to the caller. | ||
1617 | * | ||
1618 | */ | ||
1619 | static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd, | ||
1620 | struct nand_chip *chip, void *buf, u64 addr) | ||
1621 | { | ||
1622 | int i, sas; | ||
1623 | void *oob = chip->oob_poi; | ||
1624 | int bitflips = 0; | ||
1625 | int page = addr >> chip->page_shift; | ||
1626 | int ret; | ||
1627 | |||
1628 | if (!buf) { | ||
1629 | buf = chip->buffers->databuf; | ||
1630 | /* Invalidate page cache */ | ||
1631 | chip->pagebuf = -1; | ||
1632 | } | ||
1633 | |||
1634 | sas = mtd->oobsize / chip->ecc.steps; | ||
1635 | |||
1636 | /* read without ecc for verification */ | ||
1637 | chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page); | ||
1638 | ret = chip->ecc.read_page_raw(mtd, chip, buf, true, page); | ||
1639 | if (ret) | ||
1640 | return ret; | ||
1641 | |||
1642 | for (i = 0; i < chip->ecc.steps; i++, oob += sas) { | ||
1643 | ret = nand_check_erased_ecc_chunk(buf, chip->ecc.size, | ||
1644 | oob, sas, NULL, 0, | ||
1645 | chip->ecc.strength); | ||
1646 | if (ret < 0) | ||
1647 | return ret; | ||
1648 | |||
1649 | bitflips = max(bitflips, ret); | ||
1650 | } | ||
1651 | |||
1652 | return bitflips; | ||
1653 | } | ||
1654 | |||
1548 | static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, | 1655 | static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, |
1549 | u64 addr, unsigned int trans, u32 *buf, u8 *oob) | 1656 | u64 addr, unsigned int trans, u32 *buf, u8 *oob) |
1550 | { | 1657 | { |
@@ -1552,9 +1659,11 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, | |||
1552 | struct brcmnand_controller *ctrl = host->ctrl; | 1659 | struct brcmnand_controller *ctrl = host->ctrl; |
1553 | u64 err_addr = 0; | 1660 | u64 err_addr = 0; |
1554 | int err; | 1661 | int err; |
1662 | bool retry = true; | ||
1555 | 1663 | ||
1556 | dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf); | 1664 | dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf); |
1557 | 1665 | ||
1666 | try_dmaread: | ||
1558 | brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0); | 1667 | brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0); |
1559 | 1668 | ||
1560 | if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) { | 1669 | if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) { |
@@ -1575,6 +1684,34 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, | |||
1575 | } | 1684 | } |
1576 | 1685 | ||
1577 | if (mtd_is_eccerr(err)) { | 1686 | if (mtd_is_eccerr(err)) { |
1687 | /* | ||
1688 | * On controller version and 7.0, 7.1 , DMA read after a | ||
1689 | * prior PIO read that reported uncorrectable error, | ||
1690 | * the DMA engine captures this error following DMA read | ||
1691 | * cleared only on subsequent DMA read, so just retry once | ||
1692 | * to clear a possible false error reported for current DMA | ||
1693 | * read | ||
1694 | */ | ||
1695 | if ((ctrl->nand_version == 0x0700) || | ||
1696 | (ctrl->nand_version == 0x0701)) { | ||
1697 | if (retry) { | ||
1698 | retry = false; | ||
1699 | goto try_dmaread; | ||
1700 | } | ||
1701 | } | ||
1702 | |||
1703 | /* | ||
1704 | * Controller version 7.2 has hw encoder to detect erased page | ||
1705 | * bitflips, apply sw verification for older controllers only | ||
1706 | */ | ||
1707 | if (ctrl->nand_version < 0x0702) { | ||
1708 | err = brcmstb_nand_verify_erased_page(mtd, chip, buf, | ||
1709 | addr); | ||
1710 | /* erased page bitflips corrected */ | ||
1711 | if (err > 0) | ||
1712 | return err; | ||
1713 | } | ||
1714 | |||
1578 | dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n", | 1715 | dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n", |
1579 | (unsigned long long)err_addr); | 1716 | (unsigned long long)err_addr); |
1580 | mtd->ecc_stats.failed++; | 1717 | mtd->ecc_stats.failed++; |
@@ -1857,7 +1994,8 @@ static int brcmnand_set_cfg(struct brcmnand_host *host, | |||
1857 | return 0; | 1994 | return 0; |
1858 | } | 1995 | } |
1859 | 1996 | ||
1860 | static void brcmnand_print_cfg(char *buf, struct brcmnand_cfg *cfg) | 1997 | static void brcmnand_print_cfg(struct brcmnand_host *host, |
1998 | char *buf, struct brcmnand_cfg *cfg) | ||
1861 | { | 1999 | { |
1862 | buf += sprintf(buf, | 2000 | buf += sprintf(buf, |
1863 | "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit", | 2001 | "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit", |
@@ -1868,7 +2006,7 @@ static void brcmnand_print_cfg(char *buf, struct brcmnand_cfg *cfg) | |||
1868 | cfg->spare_area_size, cfg->device_width); | 2006 | cfg->spare_area_size, cfg->device_width); |
1869 | 2007 | ||
1870 | /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */ | 2008 | /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */ |
1871 | if (is_hamming_ecc(cfg)) | 2009 | if (is_hamming_ecc(host->ctrl, cfg)) |
1872 | sprintf(buf, ", Hamming ECC"); | 2010 | sprintf(buf, ", Hamming ECC"); |
1873 | else if (cfg->sector_size_1k) | 2011 | else if (cfg->sector_size_1k) |
1874 | sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1); | 2012 | sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1); |
@@ -1987,7 +2125,7 @@ static int brcmnand_setup_dev(struct brcmnand_host *host) | |||
1987 | 2125 | ||
1988 | brcmnand_set_ecc_enabled(host, 1); | 2126 | brcmnand_set_ecc_enabled(host, 1); |
1989 | 2127 | ||
1990 | brcmnand_print_cfg(msg, cfg); | 2128 | brcmnand_print_cfg(host, msg, cfg); |
1991 | dev_info(ctrl->dev, "detected %s\n", msg); | 2129 | dev_info(ctrl->dev, "detected %s\n", msg); |
1992 | 2130 | ||
1993 | /* Configure ACC_CONTROL */ | 2131 | /* Configure ACC_CONTROL */ |
@@ -1995,6 +2133,10 @@ static int brcmnand_setup_dev(struct brcmnand_host *host) | |||
1995 | tmp = nand_readreg(ctrl, offs); | 2133 | tmp = nand_readreg(ctrl, offs); |
1996 | tmp &= ~ACC_CONTROL_PARTIAL_PAGE; | 2134 | tmp &= ~ACC_CONTROL_PARTIAL_PAGE; |
1997 | tmp &= ~ACC_CONTROL_RD_ERASED; | 2135 | tmp &= ~ACC_CONTROL_RD_ERASED; |
2136 | |||
2137 | /* We need to turn on Read from erased paged protected by ECC */ | ||
2138 | if (ctrl->nand_version >= 0x0702) | ||
2139 | tmp |= ACC_CONTROL_RD_ERASED; | ||
1998 | tmp &= ~ACC_CONTROL_FAST_PGM_RDIN; | 2140 | tmp &= ~ACC_CONTROL_FAST_PGM_RDIN; |
1999 | if (ctrl->features & BRCMNAND_HAS_PREFETCH) { | 2141 | if (ctrl->features & BRCMNAND_HAS_PREFETCH) { |
2000 | /* | 2142 | /* |
@@ -2195,6 +2337,7 @@ static const struct of_device_id brcmnand_of_match[] = { | |||
2195 | { .compatible = "brcm,brcmnand-v6.2" }, | 2337 | { .compatible = "brcm,brcmnand-v6.2" }, |
2196 | { .compatible = "brcm,brcmnand-v7.0" }, | 2338 | { .compatible = "brcm,brcmnand-v7.0" }, |
2197 | { .compatible = "brcm,brcmnand-v7.1" }, | 2339 | { .compatible = "brcm,brcmnand-v7.1" }, |
2340 | { .compatible = "brcm,brcmnand-v7.2" }, | ||
2198 | {}, | 2341 | {}, |
2199 | }; | 2342 | }; |
2200 | MODULE_DEVICE_TABLE(of, brcmnand_of_match); | 2343 | MODULE_DEVICE_TABLE(of, brcmnand_of_match); |
diff --git a/drivers/mtd/nand/jz4780_bch.c b/drivers/mtd/nand/jz4780_bch.c index d74f4ba4a6f4..731c6051d91e 100644 --- a/drivers/mtd/nand/jz4780_bch.c +++ b/drivers/mtd/nand/jz4780_bch.c | |||
@@ -375,6 +375,6 @@ static struct platform_driver jz4780_bch_driver = { | |||
375 | module_platform_driver(jz4780_bch_driver); | 375 | module_platform_driver(jz4780_bch_driver); |
376 | 376 | ||
377 | MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>"); | 377 | MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>"); |
378 | MODULE_AUTHOR("Harvey Hunt <harvey.hunt@imgtec.com>"); | 378 | MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>"); |
379 | MODULE_DESCRIPTION("Ingenic JZ4780 BCH error correction driver"); | 379 | MODULE_DESCRIPTION("Ingenic JZ4780 BCH error correction driver"); |
380 | MODULE_LICENSE("GPL v2"); | 380 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/mtd/nand/jz4780_nand.c b/drivers/mtd/nand/jz4780_nand.c index daf3c4217f4d..175f67da25af 100644 --- a/drivers/mtd/nand/jz4780_nand.c +++ b/drivers/mtd/nand/jz4780_nand.c | |||
@@ -412,6 +412,6 @@ static struct platform_driver jz4780_nand_driver = { | |||
412 | module_platform_driver(jz4780_nand_driver); | 412 | module_platform_driver(jz4780_nand_driver); |
413 | 413 | ||
414 | MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>"); | 414 | MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>"); |
415 | MODULE_AUTHOR("Harvey Hunt <harvey.hunt@imgtec.com>"); | 415 | MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>"); |
416 | MODULE_DESCRIPTION("Ingenic JZ4780 NAND driver"); | 416 | MODULE_DESCRIPTION("Ingenic JZ4780 NAND driver"); |
417 | MODULE_LICENSE("GPL v2"); | 417 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c new file mode 100644 index 000000000000..25a4fbd4d24a --- /dev/null +++ b/drivers/mtd/nand/mtk_ecc.c | |||
@@ -0,0 +1,530 @@ | |||
1 | /* | ||
2 | * MTK ECC controller driver. | ||
3 | * Copyright (C) 2016 MediaTek Inc. | ||
4 | * Authors: Xiaolei Li <xiaolei.li@mediatek.com> | ||
5 | * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | */ | ||
16 | |||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/dma-mapping.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/clk.h> | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/iopoll.h> | ||
23 | #include <linux/of.h> | ||
24 | #include <linux/of_platform.h> | ||
25 | #include <linux/mutex.h> | ||
26 | |||
27 | #include "mtk_ecc.h" | ||
28 | |||
29 | #define ECC_IDLE_MASK BIT(0) | ||
30 | #define ECC_IRQ_EN BIT(0) | ||
31 | #define ECC_OP_ENABLE (1) | ||
32 | #define ECC_OP_DISABLE (0) | ||
33 | |||
34 | #define ECC_ENCCON (0x00) | ||
35 | #define ECC_ENCCNFG (0x04) | ||
36 | #define ECC_CNFG_4BIT (0) | ||
37 | #define ECC_CNFG_6BIT (1) | ||
38 | #define ECC_CNFG_8BIT (2) | ||
39 | #define ECC_CNFG_10BIT (3) | ||
40 | #define ECC_CNFG_12BIT (4) | ||
41 | #define ECC_CNFG_14BIT (5) | ||
42 | #define ECC_CNFG_16BIT (6) | ||
43 | #define ECC_CNFG_18BIT (7) | ||
44 | #define ECC_CNFG_20BIT (8) | ||
45 | #define ECC_CNFG_22BIT (9) | ||
46 | #define ECC_CNFG_24BIT (0xa) | ||
47 | #define ECC_CNFG_28BIT (0xb) | ||
48 | #define ECC_CNFG_32BIT (0xc) | ||
49 | #define ECC_CNFG_36BIT (0xd) | ||
50 | #define ECC_CNFG_40BIT (0xe) | ||
51 | #define ECC_CNFG_44BIT (0xf) | ||
52 | #define ECC_CNFG_48BIT (0x10) | ||
53 | #define ECC_CNFG_52BIT (0x11) | ||
54 | #define ECC_CNFG_56BIT (0x12) | ||
55 | #define ECC_CNFG_60BIT (0x13) | ||
56 | #define ECC_MODE_SHIFT (5) | ||
57 | #define ECC_MS_SHIFT (16) | ||
58 | #define ECC_ENCDIADDR (0x08) | ||
59 | #define ECC_ENCIDLE (0x0C) | ||
60 | #define ECC_ENCPAR(x) (0x10 + (x) * sizeof(u32)) | ||
61 | #define ECC_ENCIRQ_EN (0x80) | ||
62 | #define ECC_ENCIRQ_STA (0x84) | ||
63 | #define ECC_DECCON (0x100) | ||
64 | #define ECC_DECCNFG (0x104) | ||
65 | #define DEC_EMPTY_EN BIT(31) | ||
66 | #define DEC_CNFG_CORRECT (0x3 << 12) | ||
67 | #define ECC_DECIDLE (0x10C) | ||
68 | #define ECC_DECENUM0 (0x114) | ||
69 | #define ERR_MASK (0x3f) | ||
70 | #define ECC_DECDONE (0x124) | ||
71 | #define ECC_DECIRQ_EN (0x200) | ||
72 | #define ECC_DECIRQ_STA (0x204) | ||
73 | |||
74 | #define ECC_TIMEOUT (500000) | ||
75 | |||
76 | #define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE) | ||
77 | #define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON) | ||
78 | #define ECC_IRQ_REG(op) ((op) == ECC_ENCODE ? \ | ||
79 | ECC_ENCIRQ_EN : ECC_DECIRQ_EN) | ||
80 | |||
81 | struct mtk_ecc { | ||
82 | struct device *dev; | ||
83 | void __iomem *regs; | ||
84 | struct clk *clk; | ||
85 | |||
86 | struct completion done; | ||
87 | struct mutex lock; | ||
88 | u32 sectors; | ||
89 | }; | ||
90 | |||
91 | static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc, | ||
92 | enum mtk_ecc_operation op) | ||
93 | { | ||
94 | struct device *dev = ecc->dev; | ||
95 | u32 val; | ||
96 | int ret; | ||
97 | |||
98 | ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val, | ||
99 | val & ECC_IDLE_MASK, | ||
100 | 10, ECC_TIMEOUT); | ||
101 | if (ret) | ||
102 | dev_warn(dev, "%s NOT idle\n", | ||
103 | op == ECC_ENCODE ? "encoder" : "decoder"); | ||
104 | } | ||
105 | |||
106 | static irqreturn_t mtk_ecc_irq(int irq, void *id) | ||
107 | { | ||
108 | struct mtk_ecc *ecc = id; | ||
109 | enum mtk_ecc_operation op; | ||
110 | u32 dec, enc; | ||
111 | |||
112 | dec = readw(ecc->regs + ECC_DECIRQ_STA) & ECC_IRQ_EN; | ||
113 | if (dec) { | ||
114 | op = ECC_DECODE; | ||
115 | dec = readw(ecc->regs + ECC_DECDONE); | ||
116 | if (dec & ecc->sectors) { | ||
117 | ecc->sectors = 0; | ||
118 | complete(&ecc->done); | ||
119 | } else { | ||
120 | return IRQ_HANDLED; | ||
121 | } | ||
122 | } else { | ||
123 | enc = readl(ecc->regs + ECC_ENCIRQ_STA) & ECC_IRQ_EN; | ||
124 | if (enc) { | ||
125 | op = ECC_ENCODE; | ||
126 | complete(&ecc->done); | ||
127 | } else { | ||
128 | return IRQ_NONE; | ||
129 | } | ||
130 | } | ||
131 | |||
132 | writel(0, ecc->regs + ECC_IRQ_REG(op)); | ||
133 | |||
134 | return IRQ_HANDLED; | ||
135 | } | ||
136 | |||
137 | static void mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config) | ||
138 | { | ||
139 | u32 ecc_bit = ECC_CNFG_4BIT, dec_sz, enc_sz; | ||
140 | u32 reg; | ||
141 | |||
142 | switch (config->strength) { | ||
143 | case 4: | ||
144 | ecc_bit = ECC_CNFG_4BIT; | ||
145 | break; | ||
146 | case 6: | ||
147 | ecc_bit = ECC_CNFG_6BIT; | ||
148 | break; | ||
149 | case 8: | ||
150 | ecc_bit = ECC_CNFG_8BIT; | ||
151 | break; | ||
152 | case 10: | ||
153 | ecc_bit = ECC_CNFG_10BIT; | ||
154 | break; | ||
155 | case 12: | ||
156 | ecc_bit = ECC_CNFG_12BIT; | ||
157 | break; | ||
158 | case 14: | ||
159 | ecc_bit = ECC_CNFG_14BIT; | ||
160 | break; | ||
161 | case 16: | ||
162 | ecc_bit = ECC_CNFG_16BIT; | ||
163 | break; | ||
164 | case 18: | ||
165 | ecc_bit = ECC_CNFG_18BIT; | ||
166 | break; | ||
167 | case 20: | ||
168 | ecc_bit = ECC_CNFG_20BIT; | ||
169 | break; | ||
170 | case 22: | ||
171 | ecc_bit = ECC_CNFG_22BIT; | ||
172 | break; | ||
173 | case 24: | ||
174 | ecc_bit = ECC_CNFG_24BIT; | ||
175 | break; | ||
176 | case 28: | ||
177 | ecc_bit = ECC_CNFG_28BIT; | ||
178 | break; | ||
179 | case 32: | ||
180 | ecc_bit = ECC_CNFG_32BIT; | ||
181 | break; | ||
182 | case 36: | ||
183 | ecc_bit = ECC_CNFG_36BIT; | ||
184 | break; | ||
185 | case 40: | ||
186 | ecc_bit = ECC_CNFG_40BIT; | ||
187 | break; | ||
188 | case 44: | ||
189 | ecc_bit = ECC_CNFG_44BIT; | ||
190 | break; | ||
191 | case 48: | ||
192 | ecc_bit = ECC_CNFG_48BIT; | ||
193 | break; | ||
194 | case 52: | ||
195 | ecc_bit = ECC_CNFG_52BIT; | ||
196 | break; | ||
197 | case 56: | ||
198 | ecc_bit = ECC_CNFG_56BIT; | ||
199 | break; | ||
200 | case 60: | ||
201 | ecc_bit = ECC_CNFG_60BIT; | ||
202 | break; | ||
203 | default: | ||
204 | dev_err(ecc->dev, "invalid strength %d, default to 4 bits\n", | ||
205 | config->strength); | ||
206 | } | ||
207 | |||
208 | if (config->op == ECC_ENCODE) { | ||
209 | /* configure ECC encoder (in bits) */ | ||
210 | enc_sz = config->len << 3; | ||
211 | |||
212 | reg = ecc_bit | (config->mode << ECC_MODE_SHIFT); | ||
213 | reg |= (enc_sz << ECC_MS_SHIFT); | ||
214 | writel(reg, ecc->regs + ECC_ENCCNFG); | ||
215 | |||
216 | if (config->mode != ECC_NFI_MODE) | ||
217 | writel(lower_32_bits(config->addr), | ||
218 | ecc->regs + ECC_ENCDIADDR); | ||
219 | |||
220 | } else { | ||
221 | /* configure ECC decoder (in bits) */ | ||
222 | dec_sz = (config->len << 3) + | ||
223 | config->strength * ECC_PARITY_BITS; | ||
224 | |||
225 | reg = ecc_bit | (config->mode << ECC_MODE_SHIFT); | ||
226 | reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT; | ||
227 | reg |= DEC_EMPTY_EN; | ||
228 | writel(reg, ecc->regs + ECC_DECCNFG); | ||
229 | |||
230 | if (config->sectors) | ||
231 | ecc->sectors = 1 << (config->sectors - 1); | ||
232 | } | ||
233 | } | ||
234 | |||
235 | void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats, | ||
236 | int sectors) | ||
237 | { | ||
238 | u32 offset, i, err; | ||
239 | u32 bitflips = 0; | ||
240 | |||
241 | stats->corrected = 0; | ||
242 | stats->failed = 0; | ||
243 | |||
244 | for (i = 0; i < sectors; i++) { | ||
245 | offset = (i >> 2) << 2; | ||
246 | err = readl(ecc->regs + ECC_DECENUM0 + offset); | ||
247 | err = err >> ((i % 4) * 8); | ||
248 | err &= ERR_MASK; | ||
249 | if (err == ERR_MASK) { | ||
250 | /* uncorrectable errors */ | ||
251 | stats->failed++; | ||
252 | continue; | ||
253 | } | ||
254 | |||
255 | stats->corrected += err; | ||
256 | bitflips = max_t(u32, bitflips, err); | ||
257 | } | ||
258 | |||
259 | stats->bitflips = bitflips; | ||
260 | } | ||
261 | EXPORT_SYMBOL(mtk_ecc_get_stats); | ||
262 | |||
263 | void mtk_ecc_release(struct mtk_ecc *ecc) | ||
264 | { | ||
265 | clk_disable_unprepare(ecc->clk); | ||
266 | put_device(ecc->dev); | ||
267 | } | ||
268 | EXPORT_SYMBOL(mtk_ecc_release); | ||
269 | |||
270 | static void mtk_ecc_hw_init(struct mtk_ecc *ecc) | ||
271 | { | ||
272 | mtk_ecc_wait_idle(ecc, ECC_ENCODE); | ||
273 | writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON); | ||
274 | |||
275 | mtk_ecc_wait_idle(ecc, ECC_DECODE); | ||
276 | writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON); | ||
277 | } | ||
278 | |||
279 | static struct mtk_ecc *mtk_ecc_get(struct device_node *np) | ||
280 | { | ||
281 | struct platform_device *pdev; | ||
282 | struct mtk_ecc *ecc; | ||
283 | |||
284 | pdev = of_find_device_by_node(np); | ||
285 | if (!pdev || !platform_get_drvdata(pdev)) | ||
286 | return ERR_PTR(-EPROBE_DEFER); | ||
287 | |||
288 | get_device(&pdev->dev); | ||
289 | ecc = platform_get_drvdata(pdev); | ||
290 | clk_prepare_enable(ecc->clk); | ||
291 | mtk_ecc_hw_init(ecc); | ||
292 | |||
293 | return ecc; | ||
294 | } | ||
295 | |||
296 | struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node) | ||
297 | { | ||
298 | struct mtk_ecc *ecc = NULL; | ||
299 | struct device_node *np; | ||
300 | |||
301 | np = of_parse_phandle(of_node, "ecc-engine", 0); | ||
302 | if (np) { | ||
303 | ecc = mtk_ecc_get(np); | ||
304 | of_node_put(np); | ||
305 | } | ||
306 | |||
307 | return ecc; | ||
308 | } | ||
309 | EXPORT_SYMBOL(of_mtk_ecc_get); | ||
310 | |||
311 | int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config) | ||
312 | { | ||
313 | enum mtk_ecc_operation op = config->op; | ||
314 | int ret; | ||
315 | |||
316 | ret = mutex_lock_interruptible(&ecc->lock); | ||
317 | if (ret) { | ||
318 | dev_err(ecc->dev, "interrupted when attempting to lock\n"); | ||
319 | return ret; | ||
320 | } | ||
321 | |||
322 | mtk_ecc_wait_idle(ecc, op); | ||
323 | mtk_ecc_config(ecc, config); | ||
324 | writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op)); | ||
325 | |||
326 | init_completion(&ecc->done); | ||
327 | writew(ECC_IRQ_EN, ecc->regs + ECC_IRQ_REG(op)); | ||
328 | |||
329 | return 0; | ||
330 | } | ||
331 | EXPORT_SYMBOL(mtk_ecc_enable); | ||
332 | |||
333 | void mtk_ecc_disable(struct mtk_ecc *ecc) | ||
334 | { | ||
335 | enum mtk_ecc_operation op = ECC_ENCODE; | ||
336 | |||
337 | /* find out the running operation */ | ||
338 | if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE) | ||
339 | op = ECC_DECODE; | ||
340 | |||
341 | /* disable it */ | ||
342 | mtk_ecc_wait_idle(ecc, op); | ||
343 | writew(0, ecc->regs + ECC_IRQ_REG(op)); | ||
344 | writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op)); | ||
345 | |||
346 | mutex_unlock(&ecc->lock); | ||
347 | } | ||
348 | EXPORT_SYMBOL(mtk_ecc_disable); | ||
349 | |||
350 | int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op) | ||
351 | { | ||
352 | int ret; | ||
353 | |||
354 | ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500)); | ||
355 | if (!ret) { | ||
356 | dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n", | ||
357 | (op == ECC_ENCODE) ? "encoder" : "decoder"); | ||
358 | return -ETIMEDOUT; | ||
359 | } | ||
360 | |||
361 | return 0; | ||
362 | } | ||
363 | EXPORT_SYMBOL(mtk_ecc_wait_done); | ||
364 | |||
365 | int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config, | ||
366 | u8 *data, u32 bytes) | ||
367 | { | ||
368 | dma_addr_t addr; | ||
369 | u32 *p, len, i; | ||
370 | int ret = 0; | ||
371 | |||
372 | addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE); | ||
373 | ret = dma_mapping_error(ecc->dev, addr); | ||
374 | if (ret) { | ||
375 | dev_err(ecc->dev, "dma mapping error\n"); | ||
376 | return -EINVAL; | ||
377 | } | ||
378 | |||
379 | config->op = ECC_ENCODE; | ||
380 | config->addr = addr; | ||
381 | ret = mtk_ecc_enable(ecc, config); | ||
382 | if (ret) { | ||
383 | dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); | ||
384 | return ret; | ||
385 | } | ||
386 | |||
387 | ret = mtk_ecc_wait_done(ecc, ECC_ENCODE); | ||
388 | if (ret) | ||
389 | goto timeout; | ||
390 | |||
391 | mtk_ecc_wait_idle(ecc, ECC_ENCODE); | ||
392 | |||
393 | /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */ | ||
394 | len = (config->strength * ECC_PARITY_BITS + 7) >> 3; | ||
395 | p = (u32 *)(data + bytes); | ||
396 | |||
397 | /* write the parity bytes generated by the ECC back to the OOB region */ | ||
398 | for (i = 0; i < len; i++) | ||
399 | p[i] = readl(ecc->regs + ECC_ENCPAR(i)); | ||
400 | timeout: | ||
401 | |||
402 | dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); | ||
403 | mtk_ecc_disable(ecc); | ||
404 | |||
405 | return ret; | ||
406 | } | ||
407 | EXPORT_SYMBOL(mtk_ecc_encode); | ||
408 | |||
409 | void mtk_ecc_adjust_strength(u32 *p) | ||
410 | { | ||
411 | u32 ecc[] = {4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, | ||
412 | 40, 44, 48, 52, 56, 60}; | ||
413 | int i; | ||
414 | |||
415 | for (i = 0; i < ARRAY_SIZE(ecc); i++) { | ||
416 | if (*p <= ecc[i]) { | ||
417 | if (!i) | ||
418 | *p = ecc[i]; | ||
419 | else if (*p != ecc[i]) | ||
420 | *p = ecc[i - 1]; | ||
421 | return; | ||
422 | } | ||
423 | } | ||
424 | |||
425 | *p = ecc[ARRAY_SIZE(ecc) - 1]; | ||
426 | } | ||
427 | EXPORT_SYMBOL(mtk_ecc_adjust_strength); | ||
428 | |||
429 | static int mtk_ecc_probe(struct platform_device *pdev) | ||
430 | { | ||
431 | struct device *dev = &pdev->dev; | ||
432 | struct mtk_ecc *ecc; | ||
433 | struct resource *res; | ||
434 | int irq, ret; | ||
435 | |||
436 | ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); | ||
437 | if (!ecc) | ||
438 | return -ENOMEM; | ||
439 | |||
440 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
441 | ecc->regs = devm_ioremap_resource(dev, res); | ||
442 | if (IS_ERR(ecc->regs)) { | ||
443 | dev_err(dev, "failed to map regs: %ld\n", PTR_ERR(ecc->regs)); | ||
444 | return PTR_ERR(ecc->regs); | ||
445 | } | ||
446 | |||
447 | ecc->clk = devm_clk_get(dev, NULL); | ||
448 | if (IS_ERR(ecc->clk)) { | ||
449 | dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk)); | ||
450 | return PTR_ERR(ecc->clk); | ||
451 | } | ||
452 | |||
453 | irq = platform_get_irq(pdev, 0); | ||
454 | if (irq < 0) { | ||
455 | dev_err(dev, "failed to get irq\n"); | ||
456 | return -EINVAL; | ||
457 | } | ||
458 | |||
459 | ret = dma_set_mask(dev, DMA_BIT_MASK(32)); | ||
460 | if (ret) { | ||
461 | dev_err(dev, "failed to set DMA mask\n"); | ||
462 | return ret; | ||
463 | } | ||
464 | |||
465 | ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc); | ||
466 | if (ret) { | ||
467 | dev_err(dev, "failed to request irq\n"); | ||
468 | return -EINVAL; | ||
469 | } | ||
470 | |||
471 | ecc->dev = dev; | ||
472 | mutex_init(&ecc->lock); | ||
473 | platform_set_drvdata(pdev, ecc); | ||
474 | dev_info(dev, "probed\n"); | ||
475 | |||
476 | return 0; | ||
477 | } | ||
478 | |||
479 | #ifdef CONFIG_PM_SLEEP | ||
480 | static int mtk_ecc_suspend(struct device *dev) | ||
481 | { | ||
482 | struct mtk_ecc *ecc = dev_get_drvdata(dev); | ||
483 | |||
484 | clk_disable_unprepare(ecc->clk); | ||
485 | |||
486 | return 0; | ||
487 | } | ||
488 | |||
489 | static int mtk_ecc_resume(struct device *dev) | ||
490 | { | ||
491 | struct mtk_ecc *ecc = dev_get_drvdata(dev); | ||
492 | int ret; | ||
493 | |||
494 | ret = clk_prepare_enable(ecc->clk); | ||
495 | if (ret) { | ||
496 | dev_err(dev, "failed to enable clk\n"); | ||
497 | return ret; | ||
498 | } | ||
499 | |||
500 | mtk_ecc_hw_init(ecc); | ||
501 | |||
502 | return 0; | ||
503 | } | ||
504 | |||
505 | static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume); | ||
506 | #endif | ||
507 | |||
508 | static const struct of_device_id mtk_ecc_dt_match[] = { | ||
509 | { .compatible = "mediatek,mt2701-ecc" }, | ||
510 | {}, | ||
511 | }; | ||
512 | |||
513 | MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match); | ||
514 | |||
515 | static struct platform_driver mtk_ecc_driver = { | ||
516 | .probe = mtk_ecc_probe, | ||
517 | .driver = { | ||
518 | .name = "mtk-ecc", | ||
519 | .of_match_table = of_match_ptr(mtk_ecc_dt_match), | ||
520 | #ifdef CONFIG_PM_SLEEP | ||
521 | .pm = &mtk_ecc_pm_ops, | ||
522 | #endif | ||
523 | }, | ||
524 | }; | ||
525 | |||
526 | module_platform_driver(mtk_ecc_driver); | ||
527 | |||
528 | MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>"); | ||
529 | MODULE_DESCRIPTION("MTK Nand ECC Driver"); | ||
530 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mtd/nand/mtk_ecc.h b/drivers/mtd/nand/mtk_ecc.h new file mode 100644 index 000000000000..cbeba5cd1c13 --- /dev/null +++ b/drivers/mtd/nand/mtk_ecc.h | |||
@@ -0,0 +1,50 @@ | |||
1 | /* | ||
2 | * MTK SDG1 ECC controller | ||
3 | * | ||
4 | * Copyright (c) 2016 Mediatek | ||
5 | * Authors: Xiaolei Li <xiaolei.li@mediatek.com> | ||
6 | * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org> | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License version 2 as published | ||
9 | * by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__ | ||
13 | #define __DRIVERS_MTD_NAND_MTK_ECC_H__ | ||
14 | |||
15 | #include <linux/types.h> | ||
16 | |||
17 | #define ECC_PARITY_BITS (14) | ||
18 | |||
19 | enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1}; | ||
20 | enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE}; | ||
21 | |||
22 | struct device_node; | ||
23 | struct mtk_ecc; | ||
24 | |||
25 | struct mtk_ecc_stats { | ||
26 | u32 corrected; | ||
27 | u32 bitflips; | ||
28 | u32 failed; | ||
29 | }; | ||
30 | |||
31 | struct mtk_ecc_config { | ||
32 | enum mtk_ecc_operation op; | ||
33 | enum mtk_ecc_mode mode; | ||
34 | dma_addr_t addr; | ||
35 | u32 strength; | ||
36 | u32 sectors; | ||
37 | u32 len; | ||
38 | }; | ||
39 | |||
40 | int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32); | ||
41 | void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int); | ||
42 | int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation); | ||
43 | int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *); | ||
44 | void mtk_ecc_disable(struct mtk_ecc *); | ||
45 | void mtk_ecc_adjust_strength(u32 *); | ||
46 | |||
47 | struct mtk_ecc *of_mtk_ecc_get(struct device_node *); | ||
48 | void mtk_ecc_release(struct mtk_ecc *); | ||
49 | |||
50 | #endif | ||
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c new file mode 100644 index 000000000000..ddaa2acb9dd7 --- /dev/null +++ b/drivers/mtd/nand/mtk_nand.c | |||
@@ -0,0 +1,1526 @@ | |||
1 | /* | ||
2 | * MTK NAND Flash controller driver. | ||
3 | * Copyright (C) 2016 MediaTek Inc. | ||
4 | * Authors: Xiaolei Li <xiaolei.li@mediatek.com> | ||
5 | * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | */ | ||
16 | |||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/dma-mapping.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/clk.h> | ||
22 | #include <linux/mtd/nand.h> | ||
23 | #include <linux/mtd/mtd.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/iopoll.h> | ||
26 | #include <linux/of.h> | ||
27 | #include "mtk_ecc.h" | ||
28 | |||
29 | /* NAND controller register definition */ | ||
30 | #define NFI_CNFG (0x00) | ||
31 | #define CNFG_AHB BIT(0) | ||
32 | #define CNFG_READ_EN BIT(1) | ||
33 | #define CNFG_DMA_BURST_EN BIT(2) | ||
34 | #define CNFG_BYTE_RW BIT(6) | ||
35 | #define CNFG_HW_ECC_EN BIT(8) | ||
36 | #define CNFG_AUTO_FMT_EN BIT(9) | ||
37 | #define CNFG_OP_CUST (6 << 12) | ||
38 | #define NFI_PAGEFMT (0x04) | ||
39 | #define PAGEFMT_FDM_ECC_SHIFT (12) | ||
40 | #define PAGEFMT_FDM_SHIFT (8) | ||
41 | #define PAGEFMT_SPARE_16 (0) | ||
42 | #define PAGEFMT_SPARE_26 (1) | ||
43 | #define PAGEFMT_SPARE_27 (2) | ||
44 | #define PAGEFMT_SPARE_28 (3) | ||
45 | #define PAGEFMT_SPARE_32 (4) | ||
46 | #define PAGEFMT_SPARE_36 (5) | ||
47 | #define PAGEFMT_SPARE_40 (6) | ||
48 | #define PAGEFMT_SPARE_44 (7) | ||
49 | #define PAGEFMT_SPARE_48 (8) | ||
50 | #define PAGEFMT_SPARE_49 (9) | ||
51 | #define PAGEFMT_SPARE_50 (0xa) | ||
52 | #define PAGEFMT_SPARE_51 (0xb) | ||
53 | #define PAGEFMT_SPARE_52 (0xc) | ||
54 | #define PAGEFMT_SPARE_62 (0xd) | ||
55 | #define PAGEFMT_SPARE_63 (0xe) | ||
56 | #define PAGEFMT_SPARE_64 (0xf) | ||
57 | #define PAGEFMT_SPARE_SHIFT (4) | ||
58 | #define PAGEFMT_SEC_SEL_512 BIT(2) | ||
59 | #define PAGEFMT_512_2K (0) | ||
60 | #define PAGEFMT_2K_4K (1) | ||
61 | #define PAGEFMT_4K_8K (2) | ||
62 | #define PAGEFMT_8K_16K (3) | ||
63 | /* NFI control */ | ||
64 | #define NFI_CON (0x08) | ||
65 | #define CON_FIFO_FLUSH BIT(0) | ||
66 | #define CON_NFI_RST BIT(1) | ||
67 | #define CON_BRD BIT(8) /* burst read */ | ||
68 | #define CON_BWR BIT(9) /* burst write */ | ||
69 | #define CON_SEC_SHIFT (12) | ||
70 | /* Timming control register */ | ||
71 | #define NFI_ACCCON (0x0C) | ||
72 | #define NFI_INTR_EN (0x10) | ||
73 | #define INTR_AHB_DONE_EN BIT(6) | ||
74 | #define NFI_INTR_STA (0x14) | ||
75 | #define NFI_CMD (0x20) | ||
76 | #define NFI_ADDRNOB (0x30) | ||
77 | #define NFI_COLADDR (0x34) | ||
78 | #define NFI_ROWADDR (0x38) | ||
79 | #define NFI_STRDATA (0x40) | ||
80 | #define STAR_EN (1) | ||
81 | #define STAR_DE (0) | ||
82 | #define NFI_CNRNB (0x44) | ||
83 | #define NFI_DATAW (0x50) | ||
84 | #define NFI_DATAR (0x54) | ||
85 | #define NFI_PIO_DIRDY (0x58) | ||
86 | #define PIO_DI_RDY (0x01) | ||
87 | #define NFI_STA (0x60) | ||
88 | #define STA_CMD BIT(0) | ||
89 | #define STA_ADDR BIT(1) | ||
90 | #define STA_BUSY BIT(8) | ||
91 | #define STA_EMP_PAGE BIT(12) | ||
92 | #define NFI_FSM_CUSTDATA (0xe << 16) | ||
93 | #define NFI_FSM_MASK (0xf << 16) | ||
94 | #define NFI_ADDRCNTR (0x70) | ||
95 | #define CNTR_MASK GENMASK(16, 12) | ||
96 | #define NFI_STRADDR (0x80) | ||
97 | #define NFI_BYTELEN (0x84) | ||
98 | #define NFI_CSEL (0x90) | ||
99 | #define NFI_FDML(x) (0xA0 + (x) * sizeof(u32) * 2) | ||
100 | #define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2) | ||
101 | #define NFI_FDM_MAX_SIZE (8) | ||
102 | #define NFI_FDM_MIN_SIZE (1) | ||
103 | #define NFI_MASTER_STA (0x224) | ||
104 | #define MASTER_STA_MASK (0x0FFF) | ||
105 | #define NFI_EMPTY_THRESH (0x23C) | ||
106 | |||
107 | #define MTK_NAME "mtk-nand" | ||
108 | #define KB(x) ((x) * 1024UL) | ||
109 | #define MB(x) (KB(x) * 1024UL) | ||
110 | |||
111 | #define MTK_TIMEOUT (500000) | ||
112 | #define MTK_RESET_TIMEOUT (1000000) | ||
113 | #define MTK_MAX_SECTOR (16) | ||
114 | #define MTK_NAND_MAX_NSELS (2) | ||
115 | |||
116 | struct mtk_nfc_bad_mark_ctl { | ||
117 | void (*bm_swap)(struct mtd_info *, u8 *buf, int raw); | ||
118 | u32 sec; | ||
119 | u32 pos; | ||
120 | }; | ||
121 | |||
122 | /* | ||
123 | * FDM: region used to store free OOB data | ||
124 | */ | ||
125 | struct mtk_nfc_fdm { | ||
126 | u32 reg_size; | ||
127 | u32 ecc_size; | ||
128 | }; | ||
129 | |||
130 | struct mtk_nfc_nand_chip { | ||
131 | struct list_head node; | ||
132 | struct nand_chip nand; | ||
133 | |||
134 | struct mtk_nfc_bad_mark_ctl bad_mark; | ||
135 | struct mtk_nfc_fdm fdm; | ||
136 | u32 spare_per_sector; | ||
137 | |||
138 | int nsels; | ||
139 | u8 sels[0]; | ||
140 | /* nothing after this field */ | ||
141 | }; | ||
142 | |||
143 | struct mtk_nfc_clk { | ||
144 | struct clk *nfi_clk; | ||
145 | struct clk *pad_clk; | ||
146 | }; | ||
147 | |||
148 | struct mtk_nfc { | ||
149 | struct nand_hw_control controller; | ||
150 | struct mtk_ecc_config ecc_cfg; | ||
151 | struct mtk_nfc_clk clk; | ||
152 | struct mtk_ecc *ecc; | ||
153 | |||
154 | struct device *dev; | ||
155 | void __iomem *regs; | ||
156 | |||
157 | struct completion done; | ||
158 | struct list_head chips; | ||
159 | |||
160 | u8 *buffer; | ||
161 | }; | ||
162 | |||
163 | static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand) | ||
164 | { | ||
165 | return container_of(nand, struct mtk_nfc_nand_chip, nand); | ||
166 | } | ||
167 | |||
168 | static inline u8 *data_ptr(struct nand_chip *chip, const u8 *p, int i) | ||
169 | { | ||
170 | return (u8 *)p + i * chip->ecc.size; | ||
171 | } | ||
172 | |||
173 | static inline u8 *oob_ptr(struct nand_chip *chip, int i) | ||
174 | { | ||
175 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
176 | u8 *poi; | ||
177 | |||
178 | /* map the sector's FDM data to free oob: | ||
179 | * the beginning of the oob area stores the FDM data of bad mark sectors | ||
180 | */ | ||
181 | |||
182 | if (i < mtk_nand->bad_mark.sec) | ||
183 | poi = chip->oob_poi + (i + 1) * mtk_nand->fdm.reg_size; | ||
184 | else if (i == mtk_nand->bad_mark.sec) | ||
185 | poi = chip->oob_poi; | ||
186 | else | ||
187 | poi = chip->oob_poi + i * mtk_nand->fdm.reg_size; | ||
188 | |||
189 | return poi; | ||
190 | } | ||
191 | |||
192 | static inline int mtk_data_len(struct nand_chip *chip) | ||
193 | { | ||
194 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
195 | |||
196 | return chip->ecc.size + mtk_nand->spare_per_sector; | ||
197 | } | ||
198 | |||
199 | static inline u8 *mtk_data_ptr(struct nand_chip *chip, int i) | ||
200 | { | ||
201 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
202 | |||
203 | return nfc->buffer + i * mtk_data_len(chip); | ||
204 | } | ||
205 | |||
206 | static inline u8 *mtk_oob_ptr(struct nand_chip *chip, int i) | ||
207 | { | ||
208 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
209 | |||
210 | return nfc->buffer + i * mtk_data_len(chip) + chip->ecc.size; | ||
211 | } | ||
212 | |||
213 | static inline void nfi_writel(struct mtk_nfc *nfc, u32 val, u32 reg) | ||
214 | { | ||
215 | writel(val, nfc->regs + reg); | ||
216 | } | ||
217 | |||
218 | static inline void nfi_writew(struct mtk_nfc *nfc, u16 val, u32 reg) | ||
219 | { | ||
220 | writew(val, nfc->regs + reg); | ||
221 | } | ||
222 | |||
223 | static inline void nfi_writeb(struct mtk_nfc *nfc, u8 val, u32 reg) | ||
224 | { | ||
225 | writeb(val, nfc->regs + reg); | ||
226 | } | ||
227 | |||
228 | static inline u32 nfi_readl(struct mtk_nfc *nfc, u32 reg) | ||
229 | { | ||
230 | return readl_relaxed(nfc->regs + reg); | ||
231 | } | ||
232 | |||
233 | static inline u16 nfi_readw(struct mtk_nfc *nfc, u32 reg) | ||
234 | { | ||
235 | return readw_relaxed(nfc->regs + reg); | ||
236 | } | ||
237 | |||
238 | static inline u8 nfi_readb(struct mtk_nfc *nfc, u32 reg) | ||
239 | { | ||
240 | return readb_relaxed(nfc->regs + reg); | ||
241 | } | ||
242 | |||
243 | static void mtk_nfc_hw_reset(struct mtk_nfc *nfc) | ||
244 | { | ||
245 | struct device *dev = nfc->dev; | ||
246 | u32 val; | ||
247 | int ret; | ||
248 | |||
249 | /* reset all registers and force the NFI master to terminate */ | ||
250 | nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON); | ||
251 | |||
252 | /* wait for the master to finish the last transaction */ | ||
253 | ret = readl_poll_timeout(nfc->regs + NFI_MASTER_STA, val, | ||
254 | !(val & MASTER_STA_MASK), 50, | ||
255 | MTK_RESET_TIMEOUT); | ||
256 | if (ret) | ||
257 | dev_warn(dev, "master active in reset [0x%x] = 0x%x\n", | ||
258 | NFI_MASTER_STA, val); | ||
259 | |||
260 | /* ensure any status register affected by the NFI master is reset */ | ||
261 | nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON); | ||
262 | nfi_writew(nfc, STAR_DE, NFI_STRDATA); | ||
263 | } | ||
264 | |||
265 | static int mtk_nfc_send_command(struct mtk_nfc *nfc, u8 command) | ||
266 | { | ||
267 | struct device *dev = nfc->dev; | ||
268 | u32 val; | ||
269 | int ret; | ||
270 | |||
271 | nfi_writel(nfc, command, NFI_CMD); | ||
272 | |||
273 | ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val, | ||
274 | !(val & STA_CMD), 10, MTK_TIMEOUT); | ||
275 | if (ret) { | ||
276 | dev_warn(dev, "nfi core timed out entering command mode\n"); | ||
277 | return -EIO; | ||
278 | } | ||
279 | |||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | static int mtk_nfc_send_address(struct mtk_nfc *nfc, int addr) | ||
284 | { | ||
285 | struct device *dev = nfc->dev; | ||
286 | u32 val; | ||
287 | int ret; | ||
288 | |||
289 | nfi_writel(nfc, addr, NFI_COLADDR); | ||
290 | nfi_writel(nfc, 0, NFI_ROWADDR); | ||
291 | nfi_writew(nfc, 1, NFI_ADDRNOB); | ||
292 | |||
293 | ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val, | ||
294 | !(val & STA_ADDR), 10, MTK_TIMEOUT); | ||
295 | if (ret) { | ||
296 | dev_warn(dev, "nfi core timed out entering address mode\n"); | ||
297 | return -EIO; | ||
298 | } | ||
299 | |||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd) | ||
304 | { | ||
305 | struct nand_chip *chip = mtd_to_nand(mtd); | ||
306 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
307 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
308 | u32 fmt, spare; | ||
309 | |||
310 | if (!mtd->writesize) | ||
311 | return 0; | ||
312 | |||
313 | spare = mtk_nand->spare_per_sector; | ||
314 | |||
315 | switch (mtd->writesize) { | ||
316 | case 512: | ||
317 | fmt = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512; | ||
318 | break; | ||
319 | case KB(2): | ||
320 | if (chip->ecc.size == 512) | ||
321 | fmt = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512; | ||
322 | else | ||
323 | fmt = PAGEFMT_512_2K; | ||
324 | break; | ||
325 | case KB(4): | ||
326 | if (chip->ecc.size == 512) | ||
327 | fmt = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512; | ||
328 | else | ||
329 | fmt = PAGEFMT_2K_4K; | ||
330 | break; | ||
331 | case KB(8): | ||
332 | if (chip->ecc.size == 512) | ||
333 | fmt = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512; | ||
334 | else | ||
335 | fmt = PAGEFMT_4K_8K; | ||
336 | break; | ||
337 | case KB(16): | ||
338 | fmt = PAGEFMT_8K_16K; | ||
339 | break; | ||
340 | default: | ||
341 | dev_err(nfc->dev, "invalid page len: %d\n", mtd->writesize); | ||
342 | return -EINVAL; | ||
343 | } | ||
344 | |||
345 | /* | ||
346 | * the hardware will double the value for this eccsize, so we need to | ||
347 | * halve it | ||
348 | */ | ||
349 | if (chip->ecc.size == 1024) | ||
350 | spare >>= 1; | ||
351 | |||
352 | switch (spare) { | ||
353 | case 16: | ||
354 | fmt |= (PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT); | ||
355 | break; | ||
356 | case 26: | ||
357 | fmt |= (PAGEFMT_SPARE_26 << PAGEFMT_SPARE_SHIFT); | ||
358 | break; | ||
359 | case 27: | ||
360 | fmt |= (PAGEFMT_SPARE_27 << PAGEFMT_SPARE_SHIFT); | ||
361 | break; | ||
362 | case 28: | ||
363 | fmt |= (PAGEFMT_SPARE_28 << PAGEFMT_SPARE_SHIFT); | ||
364 | break; | ||
365 | case 32: | ||
366 | fmt |= (PAGEFMT_SPARE_32 << PAGEFMT_SPARE_SHIFT); | ||
367 | break; | ||
368 | case 36: | ||
369 | fmt |= (PAGEFMT_SPARE_36 << PAGEFMT_SPARE_SHIFT); | ||
370 | break; | ||
371 | case 40: | ||
372 | fmt |= (PAGEFMT_SPARE_40 << PAGEFMT_SPARE_SHIFT); | ||
373 | break; | ||
374 | case 44: | ||
375 | fmt |= (PAGEFMT_SPARE_44 << PAGEFMT_SPARE_SHIFT); | ||
376 | break; | ||
377 | case 48: | ||
378 | fmt |= (PAGEFMT_SPARE_48 << PAGEFMT_SPARE_SHIFT); | ||
379 | break; | ||
380 | case 49: | ||
381 | fmt |= (PAGEFMT_SPARE_49 << PAGEFMT_SPARE_SHIFT); | ||
382 | break; | ||
383 | case 50: | ||
384 | fmt |= (PAGEFMT_SPARE_50 << PAGEFMT_SPARE_SHIFT); | ||
385 | break; | ||
386 | case 51: | ||
387 | fmt |= (PAGEFMT_SPARE_51 << PAGEFMT_SPARE_SHIFT); | ||
388 | break; | ||
389 | case 52: | ||
390 | fmt |= (PAGEFMT_SPARE_52 << PAGEFMT_SPARE_SHIFT); | ||
391 | break; | ||
392 | case 62: | ||
393 | fmt |= (PAGEFMT_SPARE_62 << PAGEFMT_SPARE_SHIFT); | ||
394 | break; | ||
395 | case 63: | ||
396 | fmt |= (PAGEFMT_SPARE_63 << PAGEFMT_SPARE_SHIFT); | ||
397 | break; | ||
398 | case 64: | ||
399 | fmt |= (PAGEFMT_SPARE_64 << PAGEFMT_SPARE_SHIFT); | ||
400 | break; | ||
401 | default: | ||
402 | dev_err(nfc->dev, "invalid spare per sector %d\n", spare); | ||
403 | return -EINVAL; | ||
404 | } | ||
405 | |||
406 | fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT; | ||
407 | fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT; | ||
408 | nfi_writew(nfc, fmt, NFI_PAGEFMT); | ||
409 | |||
410 | nfc->ecc_cfg.strength = chip->ecc.strength; | ||
411 | nfc->ecc_cfg.len = chip->ecc.size + mtk_nand->fdm.ecc_size; | ||
412 | |||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | static void mtk_nfc_select_chip(struct mtd_info *mtd, int chip) | ||
417 | { | ||
418 | struct nand_chip *nand = mtd_to_nand(mtd); | ||
419 | struct mtk_nfc *nfc = nand_get_controller_data(nand); | ||
420 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand); | ||
421 | |||
422 | if (chip < 0) | ||
423 | return; | ||
424 | |||
425 | mtk_nfc_hw_runtime_config(mtd); | ||
426 | |||
427 | nfi_writel(nfc, mtk_nand->sels[chip], NFI_CSEL); | ||
428 | } | ||
429 | |||
430 | static int mtk_nfc_dev_ready(struct mtd_info *mtd) | ||
431 | { | ||
432 | struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd)); | ||
433 | |||
434 | if (nfi_readl(nfc, NFI_STA) & STA_BUSY) | ||
435 | return 0; | ||
436 | |||
437 | return 1; | ||
438 | } | ||
439 | |||
440 | static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) | ||
441 | { | ||
442 | struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd)); | ||
443 | |||
444 | if (ctrl & NAND_ALE) { | ||
445 | mtk_nfc_send_address(nfc, dat); | ||
446 | } else if (ctrl & NAND_CLE) { | ||
447 | mtk_nfc_hw_reset(nfc); | ||
448 | |||
449 | nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG); | ||
450 | mtk_nfc_send_command(nfc, dat); | ||
451 | } | ||
452 | } | ||
453 | |||
454 | static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc) | ||
455 | { | ||
456 | int rc; | ||
457 | u8 val; | ||
458 | |||
459 | rc = readb_poll_timeout_atomic(nfc->regs + NFI_PIO_DIRDY, val, | ||
460 | val & PIO_DI_RDY, 10, MTK_TIMEOUT); | ||
461 | if (rc < 0) | ||
462 | dev_err(nfc->dev, "data not ready\n"); | ||
463 | } | ||
464 | |||
465 | static inline u8 mtk_nfc_read_byte(struct mtd_info *mtd) | ||
466 | { | ||
467 | struct nand_chip *chip = mtd_to_nand(mtd); | ||
468 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
469 | u32 reg; | ||
470 | |||
471 | /* after each byte read, the NFI_STA reg is reset by the hardware */ | ||
472 | reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK; | ||
473 | if (reg != NFI_FSM_CUSTDATA) { | ||
474 | reg = nfi_readw(nfc, NFI_CNFG); | ||
475 | reg |= CNFG_BYTE_RW | CNFG_READ_EN; | ||
476 | nfi_writew(nfc, reg, NFI_CNFG); | ||
477 | |||
478 | /* | ||
479 | * set to max sector to allow the HW to continue reading over | ||
480 | * unaligned accesses | ||
481 | */ | ||
482 | reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD; | ||
483 | nfi_writel(nfc, reg, NFI_CON); | ||
484 | |||
485 | /* trigger to fetch data */ | ||
486 | nfi_writew(nfc, STAR_EN, NFI_STRDATA); | ||
487 | } | ||
488 | |||
489 | mtk_nfc_wait_ioready(nfc); | ||
490 | |||
491 | return nfi_readb(nfc, NFI_DATAR); | ||
492 | } | ||
493 | |||
494 | static void mtk_nfc_read_buf(struct mtd_info *mtd, u8 *buf, int len) | ||
495 | { | ||
496 | int i; | ||
497 | |||
498 | for (i = 0; i < len; i++) | ||
499 | buf[i] = mtk_nfc_read_byte(mtd); | ||
500 | } | ||
501 | |||
502 | static void mtk_nfc_write_byte(struct mtd_info *mtd, u8 byte) | ||
503 | { | ||
504 | struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd)); | ||
505 | u32 reg; | ||
506 | |||
507 | reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK; | ||
508 | |||
509 | if (reg != NFI_FSM_CUSTDATA) { | ||
510 | reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW; | ||
511 | nfi_writew(nfc, reg, NFI_CNFG); | ||
512 | |||
513 | reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR; | ||
514 | nfi_writel(nfc, reg, NFI_CON); | ||
515 | |||
516 | nfi_writew(nfc, STAR_EN, NFI_STRDATA); | ||
517 | } | ||
518 | |||
519 | mtk_nfc_wait_ioready(nfc); | ||
520 | nfi_writeb(nfc, byte, NFI_DATAW); | ||
521 | } | ||
522 | |||
523 | static void mtk_nfc_write_buf(struct mtd_info *mtd, const u8 *buf, int len) | ||
524 | { | ||
525 | int i; | ||
526 | |||
527 | for (i = 0; i < len; i++) | ||
528 | mtk_nfc_write_byte(mtd, buf[i]); | ||
529 | } | ||
530 | |||
531 | static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data) | ||
532 | { | ||
533 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
534 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
535 | int size = chip->ecc.size + mtk_nand->fdm.reg_size; | ||
536 | |||
537 | nfc->ecc_cfg.mode = ECC_DMA_MODE; | ||
538 | nfc->ecc_cfg.op = ECC_ENCODE; | ||
539 | |||
540 | return mtk_ecc_encode(nfc->ecc, &nfc->ecc_cfg, data, size); | ||
541 | } | ||
542 | |||
543 | static void mtk_nfc_no_bad_mark_swap(struct mtd_info *a, u8 *b, int c) | ||
544 | { | ||
545 | /* nop */ | ||
546 | } | ||
547 | |||
548 | static void mtk_nfc_bad_mark_swap(struct mtd_info *mtd, u8 *buf, int raw) | ||
549 | { | ||
550 | struct nand_chip *chip = mtd_to_nand(mtd); | ||
551 | struct mtk_nfc_nand_chip *nand = to_mtk_nand(chip); | ||
552 | u32 bad_pos = nand->bad_mark.pos; | ||
553 | |||
554 | if (raw) | ||
555 | bad_pos += nand->bad_mark.sec * mtk_data_len(chip); | ||
556 | else | ||
557 | bad_pos += nand->bad_mark.sec * chip->ecc.size; | ||
558 | |||
559 | swap(chip->oob_poi[0], buf[bad_pos]); | ||
560 | } | ||
561 | |||
562 | static int mtk_nfc_format_subpage(struct mtd_info *mtd, u32 offset, | ||
563 | u32 len, const u8 *buf) | ||
564 | { | ||
565 | struct nand_chip *chip = mtd_to_nand(mtd); | ||
566 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
567 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
568 | struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; | ||
569 | u32 start, end; | ||
570 | int i, ret; | ||
571 | |||
572 | start = offset / chip->ecc.size; | ||
573 | end = DIV_ROUND_UP(offset + len, chip->ecc.size); | ||
574 | |||
575 | memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize); | ||
576 | for (i = 0; i < chip->ecc.steps; i++) { | ||
577 | memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i), | ||
578 | chip->ecc.size); | ||
579 | |||
580 | if (start > i || i >= end) | ||
581 | continue; | ||
582 | |||
583 | if (i == mtk_nand->bad_mark.sec) | ||
584 | mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1); | ||
585 | |||
586 | memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size); | ||
587 | |||
588 | /* program the CRC back to the OOB */ | ||
589 | ret = mtk_nfc_sector_encode(chip, mtk_data_ptr(chip, i)); | ||
590 | if (ret < 0) | ||
591 | return ret; | ||
592 | } | ||
593 | |||
594 | return 0; | ||
595 | } | ||
596 | |||
597 | static void mtk_nfc_format_page(struct mtd_info *mtd, const u8 *buf) | ||
598 | { | ||
599 | struct nand_chip *chip = mtd_to_nand(mtd); | ||
600 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
601 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
602 | struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; | ||
603 | u32 i; | ||
604 | |||
605 | memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize); | ||
606 | for (i = 0; i < chip->ecc.steps; i++) { | ||
607 | if (buf) | ||
608 | memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i), | ||
609 | chip->ecc.size); | ||
610 | |||
611 | if (i == mtk_nand->bad_mark.sec) | ||
612 | mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1); | ||
613 | |||
614 | memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size); | ||
615 | } | ||
616 | } | ||
617 | |||
618 | static inline void mtk_nfc_read_fdm(struct nand_chip *chip, u32 start, | ||
619 | u32 sectors) | ||
620 | { | ||
621 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
622 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
623 | struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; | ||
624 | u32 vall, valm; | ||
625 | u8 *oobptr; | ||
626 | int i, j; | ||
627 | |||
628 | for (i = 0; i < sectors; i++) { | ||
629 | oobptr = oob_ptr(chip, start + i); | ||
630 | vall = nfi_readl(nfc, NFI_FDML(i)); | ||
631 | valm = nfi_readl(nfc, NFI_FDMM(i)); | ||
632 | |||
633 | for (j = 0; j < fdm->reg_size; j++) | ||
634 | oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8); | ||
635 | } | ||
636 | } | ||
637 | |||
638 | static inline void mtk_nfc_write_fdm(struct nand_chip *chip) | ||
639 | { | ||
640 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
641 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
642 | struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; | ||
643 | u32 vall, valm; | ||
644 | u8 *oobptr; | ||
645 | int i, j; | ||
646 | |||
647 | for (i = 0; i < chip->ecc.steps; i++) { | ||
648 | oobptr = oob_ptr(chip, i); | ||
649 | vall = 0; | ||
650 | valm = 0; | ||
651 | for (j = 0; j < 8; j++) { | ||
652 | if (j < 4) | ||
653 | vall |= (j < fdm->reg_size ? oobptr[j] : 0xff) | ||
654 | << (j * 8); | ||
655 | else | ||
656 | valm |= (j < fdm->reg_size ? oobptr[j] : 0xff) | ||
657 | << ((j - 4) * 8); | ||
658 | } | ||
659 | nfi_writel(nfc, vall, NFI_FDML(i)); | ||
660 | nfi_writel(nfc, valm, NFI_FDMM(i)); | ||
661 | } | ||
662 | } | ||
663 | |||
664 | static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip, | ||
665 | const u8 *buf, int page, int len) | ||
666 | { | ||
667 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
668 | struct device *dev = nfc->dev; | ||
669 | dma_addr_t addr; | ||
670 | u32 reg; | ||
671 | int ret; | ||
672 | |||
673 | addr = dma_map_single(dev, (void *)buf, len, DMA_TO_DEVICE); | ||
674 | ret = dma_mapping_error(nfc->dev, addr); | ||
675 | if (ret) { | ||
676 | dev_err(nfc->dev, "dma mapping error\n"); | ||
677 | return -EINVAL; | ||
678 | } | ||
679 | |||
680 | reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AHB | CNFG_DMA_BURST_EN; | ||
681 | nfi_writew(nfc, reg, NFI_CNFG); | ||
682 | |||
683 | nfi_writel(nfc, chip->ecc.steps << CON_SEC_SHIFT, NFI_CON); | ||
684 | nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR); | ||
685 | nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN); | ||
686 | |||
687 | init_completion(&nfc->done); | ||
688 | |||
689 | reg = nfi_readl(nfc, NFI_CON) | CON_BWR; | ||
690 | nfi_writel(nfc, reg, NFI_CON); | ||
691 | nfi_writew(nfc, STAR_EN, NFI_STRDATA); | ||
692 | |||
693 | ret = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500)); | ||
694 | if (!ret) { | ||
695 | dev_err(dev, "program ahb done timeout\n"); | ||
696 | nfi_writew(nfc, 0, NFI_INTR_EN); | ||
697 | ret = -ETIMEDOUT; | ||
698 | goto timeout; | ||
699 | } | ||
700 | |||
701 | ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg, | ||
702 | (reg & CNTR_MASK) >= chip->ecc.steps, | ||
703 | 10, MTK_TIMEOUT); | ||
704 | if (ret) | ||
705 | dev_err(dev, "hwecc write timeout\n"); | ||
706 | |||
707 | timeout: | ||
708 | |||
709 | dma_unmap_single(nfc->dev, addr, len, DMA_TO_DEVICE); | ||
710 | nfi_writel(nfc, 0, NFI_CON); | ||
711 | |||
712 | return ret; | ||
713 | } | ||
714 | |||
715 | static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip, | ||
716 | const u8 *buf, int page, int raw) | ||
717 | { | ||
718 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
719 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
720 | size_t len; | ||
721 | const u8 *bufpoi; | ||
722 | u32 reg; | ||
723 | int ret; | ||
724 | |||
725 | if (!raw) { | ||
726 | /* OOB => FDM: from register, ECC: from HW */ | ||
727 | reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN; | ||
728 | nfi_writew(nfc, reg | CNFG_HW_ECC_EN, NFI_CNFG); | ||
729 | |||
730 | nfc->ecc_cfg.op = ECC_ENCODE; | ||
731 | nfc->ecc_cfg.mode = ECC_NFI_MODE; | ||
732 | ret = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg); | ||
733 | if (ret) { | ||
734 | /* clear NFI config */ | ||
735 | reg = nfi_readw(nfc, NFI_CNFG); | ||
736 | reg &= ~(CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN); | ||
737 | nfi_writew(nfc, reg, NFI_CNFG); | ||
738 | |||
739 | return ret; | ||
740 | } | ||
741 | |||
742 | memcpy(nfc->buffer, buf, mtd->writesize); | ||
743 | mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, raw); | ||
744 | bufpoi = nfc->buffer; | ||
745 | |||
746 | /* write OOB into the FDM registers (OOB area in MTK NAND) */ | ||
747 | mtk_nfc_write_fdm(chip); | ||
748 | } else { | ||
749 | bufpoi = buf; | ||
750 | } | ||
751 | |||
752 | len = mtd->writesize + (raw ? mtd->oobsize : 0); | ||
753 | ret = mtk_nfc_do_write_page(mtd, chip, bufpoi, page, len); | ||
754 | |||
755 | if (!raw) | ||
756 | mtk_ecc_disable(nfc->ecc); | ||
757 | |||
758 | return ret; | ||
759 | } | ||
760 | |||
761 | static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd, | ||
762 | struct nand_chip *chip, const u8 *buf, | ||
763 | int oob_on, int page) | ||
764 | { | ||
765 | return mtk_nfc_write_page(mtd, chip, buf, page, 0); | ||
766 | } | ||
767 | |||
768 | static int mtk_nfc_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, | ||
769 | const u8 *buf, int oob_on, int pg) | ||
770 | { | ||
771 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
772 | |||
773 | mtk_nfc_format_page(mtd, buf); | ||
774 | return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1); | ||
775 | } | ||
776 | |||
777 | static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd, | ||
778 | struct nand_chip *chip, u32 offset, | ||
779 | u32 data_len, const u8 *buf, | ||
780 | int oob_on, int page) | ||
781 | { | ||
782 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
783 | int ret; | ||
784 | |||
785 | ret = mtk_nfc_format_subpage(mtd, offset, data_len, buf); | ||
786 | if (ret < 0) | ||
787 | return ret; | ||
788 | |||
789 | /* use the data in the private buffer (now with FDM and CRC) */ | ||
790 | return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1); | ||
791 | } | ||
792 | |||
793 | static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, | ||
794 | int page) | ||
795 | { | ||
796 | int ret; | ||
797 | |||
798 | chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); | ||
799 | |||
800 | ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page); | ||
801 | if (ret < 0) | ||
802 | return -EIO; | ||
803 | |||
804 | chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); | ||
805 | ret = chip->waitfunc(mtd, chip); | ||
806 | |||
807 | return ret & NAND_STATUS_FAIL ? -EIO : 0; | ||
808 | } | ||
809 | |||
810 | static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors) | ||
811 | { | ||
812 | struct nand_chip *chip = mtd_to_nand(mtd); | ||
813 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
814 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
815 | struct mtk_ecc_stats stats; | ||
816 | int rc, i; | ||
817 | |||
818 | rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE; | ||
819 | if (rc) { | ||
820 | memset(buf, 0xff, sectors * chip->ecc.size); | ||
821 | for (i = 0; i < sectors; i++) | ||
822 | memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size); | ||
823 | return 0; | ||
824 | } | ||
825 | |||
826 | mtk_ecc_get_stats(nfc->ecc, &stats, sectors); | ||
827 | mtd->ecc_stats.corrected += stats.corrected; | ||
828 | mtd->ecc_stats.failed += stats.failed; | ||
829 | |||
830 | return stats.bitflips; | ||
831 | } | ||
832 | |||
833 | static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, | ||
834 | u32 data_offs, u32 readlen, | ||
835 | u8 *bufpoi, int page, int raw) | ||
836 | { | ||
837 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
838 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
839 | u32 spare = mtk_nand->spare_per_sector; | ||
840 | u32 column, sectors, start, end, reg; | ||
841 | dma_addr_t addr; | ||
842 | int bitflips; | ||
843 | size_t len; | ||
844 | u8 *buf; | ||
845 | int rc; | ||
846 | |||
847 | start = data_offs / chip->ecc.size; | ||
848 | end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size); | ||
849 | |||
850 | sectors = end - start; | ||
851 | column = start * (chip->ecc.size + spare); | ||
852 | |||
853 | len = sectors * chip->ecc.size + (raw ? sectors * spare : 0); | ||
854 | buf = bufpoi + start * chip->ecc.size; | ||
855 | |||
856 | if (column != 0) | ||
857 | chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1); | ||
858 | |||
859 | addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE); | ||
860 | rc = dma_mapping_error(nfc->dev, addr); | ||
861 | if (rc) { | ||
862 | dev_err(nfc->dev, "dma mapping error\n"); | ||
863 | |||
864 | return -EINVAL; | ||
865 | } | ||
866 | |||
867 | reg = nfi_readw(nfc, NFI_CNFG); | ||
868 | reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_AHB; | ||
869 | if (!raw) { | ||
870 | reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN; | ||
871 | nfi_writew(nfc, reg, NFI_CNFG); | ||
872 | |||
873 | nfc->ecc_cfg.mode = ECC_NFI_MODE; | ||
874 | nfc->ecc_cfg.sectors = sectors; | ||
875 | nfc->ecc_cfg.op = ECC_DECODE; | ||
876 | rc = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg); | ||
877 | if (rc) { | ||
878 | dev_err(nfc->dev, "ecc enable\n"); | ||
879 | /* clear NFI_CNFG */ | ||
880 | reg &= ~(CNFG_DMA_BURST_EN | CNFG_AHB | CNFG_READ_EN | | ||
881 | CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN); | ||
882 | nfi_writew(nfc, reg, NFI_CNFG); | ||
883 | dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE); | ||
884 | |||
885 | return rc; | ||
886 | } | ||
887 | } else { | ||
888 | nfi_writew(nfc, reg, NFI_CNFG); | ||
889 | } | ||
890 | |||
891 | nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON); | ||
892 | nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN); | ||
893 | nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR); | ||
894 | |||
895 | init_completion(&nfc->done); | ||
896 | reg = nfi_readl(nfc, NFI_CON) | CON_BRD; | ||
897 | nfi_writel(nfc, reg, NFI_CON); | ||
898 | nfi_writew(nfc, STAR_EN, NFI_STRDATA); | ||
899 | |||
900 | rc = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500)); | ||
901 | if (!rc) | ||
902 | dev_warn(nfc->dev, "read ahb/dma done timeout\n"); | ||
903 | |||
904 | rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg, | ||
905 | (reg & CNTR_MASK) >= sectors, 10, | ||
906 | MTK_TIMEOUT); | ||
907 | if (rc < 0) { | ||
908 | dev_err(nfc->dev, "subpage done timeout\n"); | ||
909 | bitflips = -EIO; | ||
910 | } else { | ||
911 | bitflips = 0; | ||
912 | if (!raw) { | ||
913 | rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE); | ||
914 | bitflips = rc < 0 ? -ETIMEDOUT : | ||
915 | mtk_nfc_update_ecc_stats(mtd, buf, sectors); | ||
916 | mtk_nfc_read_fdm(chip, start, sectors); | ||
917 | } | ||
918 | } | ||
919 | |||
920 | dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE); | ||
921 | |||
922 | if (raw) | ||
923 | goto done; | ||
924 | |||
925 | mtk_ecc_disable(nfc->ecc); | ||
926 | |||
927 | if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec) | ||
928 | mtk_nand->bad_mark.bm_swap(mtd, bufpoi, raw); | ||
929 | done: | ||
930 | nfi_writel(nfc, 0, NFI_CON); | ||
931 | |||
932 | return bitflips; | ||
933 | } | ||
934 | |||
935 | static int mtk_nfc_read_subpage_hwecc(struct mtd_info *mtd, | ||
936 | struct nand_chip *chip, u32 off, | ||
937 | u32 len, u8 *p, int pg) | ||
938 | { | ||
939 | return mtk_nfc_read_subpage(mtd, chip, off, len, p, pg, 0); | ||
940 | } | ||
941 | |||
942 | static int mtk_nfc_read_page_hwecc(struct mtd_info *mtd, | ||
943 | struct nand_chip *chip, u8 *p, | ||
944 | int oob_on, int pg) | ||
945 | { | ||
946 | return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0); | ||
947 | } | ||
948 | |||
949 | static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, | ||
950 | u8 *buf, int oob_on, int page) | ||
951 | { | ||
952 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
953 | struct mtk_nfc *nfc = nand_get_controller_data(chip); | ||
954 | struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; | ||
955 | int i, ret; | ||
956 | |||
957 | memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize); | ||
958 | ret = mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, nfc->buffer, | ||
959 | page, 1); | ||
960 | if (ret < 0) | ||
961 | return ret; | ||
962 | |||
963 | for (i = 0; i < chip->ecc.steps; i++) { | ||
964 | memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size); | ||
965 | |||
966 | if (i == mtk_nand->bad_mark.sec) | ||
967 | mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1); | ||
968 | |||
969 | if (buf) | ||
970 | memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i), | ||
971 | chip->ecc.size); | ||
972 | } | ||
973 | |||
974 | return ret; | ||
975 | } | ||
976 | |||
977 | static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, | ||
978 | int page) | ||
979 | { | ||
980 | chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); | ||
981 | |||
982 | return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page); | ||
983 | } | ||
984 | |||
985 | static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc) | ||
986 | { | ||
987 | /* | ||
988 | * ACCON: access timing control register | ||
989 | * ------------------------------------- | ||
990 | * 31:28: minimum required time for CS post pulling down after accessing | ||
991 | * the device | ||
992 | * 27:22: minimum required time for CS pre pulling down before accessing | ||
993 | * the device | ||
994 | * 21:16: minimum required time from NCEB low to NREB low | ||
995 | * 15:12: minimum required time from NWEB high to NREB low. | ||
996 | * 11:08: write enable hold time | ||
997 | * 07:04: write wait states | ||
998 | * 03:00: read wait states | ||
999 | */ | ||
1000 | nfi_writel(nfc, 0x10804211, NFI_ACCCON); | ||
1001 | |||
1002 | /* | ||
1003 | * CNRNB: nand ready/busy register | ||
1004 | * ------------------------------- | ||
1005 | * 7:4: timeout register for polling the NAND busy/ready signal | ||
1006 | * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles. | ||
1007 | */ | ||
1008 | nfi_writew(nfc, 0xf1, NFI_CNRNB); | ||
1009 | nfi_writew(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT); | ||
1010 | |||
1011 | mtk_nfc_hw_reset(nfc); | ||
1012 | |||
1013 | nfi_readl(nfc, NFI_INTR_STA); | ||
1014 | nfi_writel(nfc, 0, NFI_INTR_EN); | ||
1015 | } | ||
1016 | |||
1017 | static irqreturn_t mtk_nfc_irq(int irq, void *id) | ||
1018 | { | ||
1019 | struct mtk_nfc *nfc = id; | ||
1020 | u16 sta, ien; | ||
1021 | |||
1022 | sta = nfi_readw(nfc, NFI_INTR_STA); | ||
1023 | ien = nfi_readw(nfc, NFI_INTR_EN); | ||
1024 | |||
1025 | if (!(sta & ien)) | ||
1026 | return IRQ_NONE; | ||
1027 | |||
1028 | nfi_writew(nfc, ~sta & ien, NFI_INTR_EN); | ||
1029 | complete(&nfc->done); | ||
1030 | |||
1031 | return IRQ_HANDLED; | ||
1032 | } | ||
1033 | |||
1034 | static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk) | ||
1035 | { | ||
1036 | int ret; | ||
1037 | |||
1038 | ret = clk_prepare_enable(clk->nfi_clk); | ||
1039 | if (ret) { | ||
1040 | dev_err(dev, "failed to enable nfi clk\n"); | ||
1041 | return ret; | ||
1042 | } | ||
1043 | |||
1044 | ret = clk_prepare_enable(clk->pad_clk); | ||
1045 | if (ret) { | ||
1046 | dev_err(dev, "failed to enable pad clk\n"); | ||
1047 | clk_disable_unprepare(clk->nfi_clk); | ||
1048 | return ret; | ||
1049 | } | ||
1050 | |||
1051 | return 0; | ||
1052 | } | ||
1053 | |||
1054 | static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk) | ||
1055 | { | ||
1056 | clk_disable_unprepare(clk->nfi_clk); | ||
1057 | clk_disable_unprepare(clk->pad_clk); | ||
1058 | } | ||
1059 | |||
1060 | static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section, | ||
1061 | struct mtd_oob_region *oob_region) | ||
1062 | { | ||
1063 | struct nand_chip *chip = mtd_to_nand(mtd); | ||
1064 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
1065 | struct mtk_nfc_fdm *fdm = &mtk_nand->fdm; | ||
1066 | u32 eccsteps; | ||
1067 | |||
1068 | eccsteps = mtd->writesize / chip->ecc.size; | ||
1069 | |||
1070 | if (section >= eccsteps) | ||
1071 | return -ERANGE; | ||
1072 | |||
1073 | oob_region->length = fdm->reg_size - fdm->ecc_size; | ||
1074 | oob_region->offset = section * fdm->reg_size + fdm->ecc_size; | ||
1075 | |||
1076 | return 0; | ||
1077 | } | ||
1078 | |||
1079 | static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section, | ||
1080 | struct mtd_oob_region *oob_region) | ||
1081 | { | ||
1082 | struct nand_chip *chip = mtd_to_nand(mtd); | ||
1083 | struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); | ||
1084 | u32 eccsteps; | ||
1085 | |||
1086 | if (section) | ||
1087 | return -ERANGE; | ||
1088 | |||
1089 | eccsteps = mtd->writesize / chip->ecc.size; | ||
1090 | oob_region->offset = mtk_nand->fdm.reg_size * eccsteps; | ||
1091 | oob_region->length = mtd->oobsize - oob_region->offset; | ||
1092 | |||
1093 | return 0; | ||
1094 | } | ||
1095 | |||
1096 | static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = { | ||
1097 | .free = mtk_nfc_ooblayout_free, | ||
1098 | .ecc = mtk_nfc_ooblayout_ecc, | ||
1099 | }; | ||
1100 | |||
1101 | static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd) | ||
1102 | { | ||
1103 | struct nand_chip *nand = mtd_to_nand(mtd); | ||
1104 | struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand); | ||
1105 | u32 ecc_bytes; | ||
1106 | |||
1107 | ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8); | ||
1108 | |||
1109 | fdm->reg_size = chip->spare_per_sector - ecc_bytes; | ||
1110 | if (fdm->reg_size > NFI_FDM_MAX_SIZE) | ||
1111 | fdm->reg_size = NFI_FDM_MAX_SIZE; | ||
1112 | |||
1113 | /* bad block mark storage */ | ||
1114 | fdm->ecc_size = 1; | ||
1115 | } | ||
1116 | |||
1117 | static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl, | ||
1118 | struct mtd_info *mtd) | ||
1119 | { | ||
1120 | struct nand_chip *nand = mtd_to_nand(mtd); | ||
1121 | |||
1122 | if (mtd->writesize == 512) { | ||
1123 | bm_ctl->bm_swap = mtk_nfc_no_bad_mark_swap; | ||
1124 | } else { | ||
1125 | bm_ctl->bm_swap = mtk_nfc_bad_mark_swap; | ||
1126 | bm_ctl->sec = mtd->writesize / mtk_data_len(nand); | ||
1127 | bm_ctl->pos = mtd->writesize % mtk_data_len(nand); | ||
1128 | } | ||
1129 | } | ||
1130 | |||
1131 | static void mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd) | ||
1132 | { | ||
1133 | struct nand_chip *nand = mtd_to_nand(mtd); | ||
1134 | u32 spare[] = {16, 26, 27, 28, 32, 36, 40, 44, | ||
1135 | 48, 49, 50, 51, 52, 62, 63, 64}; | ||
1136 | u32 eccsteps, i; | ||
1137 | |||
1138 | eccsteps = mtd->writesize / nand->ecc.size; | ||
1139 | *sps = mtd->oobsize / eccsteps; | ||
1140 | |||
1141 | if (nand->ecc.size == 1024) | ||
1142 | *sps >>= 1; | ||
1143 | |||
1144 | for (i = 0; i < ARRAY_SIZE(spare); i++) { | ||
1145 | if (*sps <= spare[i]) { | ||
1146 | if (!i) | ||
1147 | *sps = spare[i]; | ||
1148 | else if (*sps != spare[i]) | ||
1149 | *sps = spare[i - 1]; | ||
1150 | break; | ||
1151 | } | ||
1152 | } | ||
1153 | |||
1154 | if (i >= ARRAY_SIZE(spare)) | ||
1155 | *sps = spare[ARRAY_SIZE(spare) - 1]; | ||
1156 | |||
1157 | if (nand->ecc.size == 1024) | ||
1158 | *sps <<= 1; | ||
1159 | } | ||
1160 | |||
1161 | static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd) | ||
1162 | { | ||
1163 | struct nand_chip *nand = mtd_to_nand(mtd); | ||
1164 | u32 spare; | ||
1165 | int free; | ||
1166 | |||
1167 | /* support only ecc hw mode */ | ||
1168 | if (nand->ecc.mode != NAND_ECC_HW) { | ||
1169 | dev_err(dev, "ecc.mode not supported\n"); | ||
1170 | return -EINVAL; | ||
1171 | } | ||
1172 | |||
1173 | /* if optional dt settings not present */ | ||
1174 | if (!nand->ecc.size || !nand->ecc.strength) { | ||
1175 | /* use datasheet requirements */ | ||
1176 | nand->ecc.strength = nand->ecc_strength_ds; | ||
1177 | nand->ecc.size = nand->ecc_step_ds; | ||
1178 | |||
1179 | /* | ||
1180 | * align eccstrength and eccsize | ||
1181 | * this controller only supports 512 and 1024 sizes | ||
1182 | */ | ||
1183 | if (nand->ecc.size < 1024) { | ||
1184 | if (mtd->writesize > 512) { | ||
1185 | nand->ecc.size = 1024; | ||
1186 | nand->ecc.strength <<= 1; | ||
1187 | } else { | ||
1188 | nand->ecc.size = 512; | ||
1189 | } | ||
1190 | } else { | ||
1191 | nand->ecc.size = 1024; | ||
1192 | } | ||
1193 | |||
1194 | mtk_nfc_set_spare_per_sector(&spare, mtd); | ||
1195 | |||
1196 | /* calculate oob bytes except ecc parity data */ | ||
1197 | free = ((nand->ecc.strength * ECC_PARITY_BITS) + 7) >> 3; | ||
1198 | free = spare - free; | ||
1199 | |||
1200 | /* | ||
1201 | * enhance ecc strength if oob left is bigger than max FDM size | ||
1202 | * or reduce ecc strength if oob size is not enough for ecc | ||
1203 | * parity data. | ||
1204 | */ | ||
1205 | if (free > NFI_FDM_MAX_SIZE) { | ||
1206 | spare -= NFI_FDM_MAX_SIZE; | ||
1207 | nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS; | ||
1208 | } else if (free < 0) { | ||
1209 | spare -= NFI_FDM_MIN_SIZE; | ||
1210 | nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS; | ||
1211 | } | ||
1212 | } | ||
1213 | |||
1214 | mtk_ecc_adjust_strength(&nand->ecc.strength); | ||
1215 | |||
1216 | dev_info(dev, "eccsize %d eccstrength %d\n", | ||
1217 | nand->ecc.size, nand->ecc.strength); | ||
1218 | |||
1219 | return 0; | ||
1220 | } | ||
1221 | |||
1222 | static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, | ||
1223 | struct device_node *np) | ||
1224 | { | ||
1225 | struct mtk_nfc_nand_chip *chip; | ||
1226 | struct nand_chip *nand; | ||
1227 | struct mtd_info *mtd; | ||
1228 | int nsels, len; | ||
1229 | u32 tmp; | ||
1230 | int ret; | ||
1231 | int i; | ||
1232 | |||
1233 | if (!of_get_property(np, "reg", &nsels)) | ||
1234 | return -ENODEV; | ||
1235 | |||
1236 | nsels /= sizeof(u32); | ||
1237 | if (!nsels || nsels > MTK_NAND_MAX_NSELS) { | ||
1238 | dev_err(dev, "invalid reg property size %d\n", nsels); | ||
1239 | return -EINVAL; | ||
1240 | } | ||
1241 | |||
1242 | chip = devm_kzalloc(dev, sizeof(*chip) + nsels * sizeof(u8), | ||
1243 | GFP_KERNEL); | ||
1244 | if (!chip) | ||
1245 | return -ENOMEM; | ||
1246 | |||
1247 | chip->nsels = nsels; | ||
1248 | for (i = 0; i < nsels; i++) { | ||
1249 | ret = of_property_read_u32_index(np, "reg", i, &tmp); | ||
1250 | if (ret) { | ||
1251 | dev_err(dev, "reg property failure : %d\n", ret); | ||
1252 | return ret; | ||
1253 | } | ||
1254 | chip->sels[i] = tmp; | ||
1255 | } | ||
1256 | |||
1257 | nand = &chip->nand; | ||
1258 | nand->controller = &nfc->controller; | ||
1259 | |||
1260 | nand_set_flash_node(nand, np); | ||
1261 | nand_set_controller_data(nand, nfc); | ||
1262 | |||
1263 | nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ; | ||
1264 | nand->dev_ready = mtk_nfc_dev_ready; | ||
1265 | nand->select_chip = mtk_nfc_select_chip; | ||
1266 | nand->write_byte = mtk_nfc_write_byte; | ||
1267 | nand->write_buf = mtk_nfc_write_buf; | ||
1268 | nand->read_byte = mtk_nfc_read_byte; | ||
1269 | nand->read_buf = mtk_nfc_read_buf; | ||
1270 | nand->cmd_ctrl = mtk_nfc_cmd_ctrl; | ||
1271 | |||
1272 | /* set default mode in case dt entry is missing */ | ||
1273 | nand->ecc.mode = NAND_ECC_HW; | ||
1274 | |||
1275 | nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc; | ||
1276 | nand->ecc.write_page_raw = mtk_nfc_write_page_raw; | ||
1277 | nand->ecc.write_page = mtk_nfc_write_page_hwecc; | ||
1278 | nand->ecc.write_oob_raw = mtk_nfc_write_oob_std; | ||
1279 | nand->ecc.write_oob = mtk_nfc_write_oob_std; | ||
1280 | |||
1281 | nand->ecc.read_subpage = mtk_nfc_read_subpage_hwecc; | ||
1282 | nand->ecc.read_page_raw = mtk_nfc_read_page_raw; | ||
1283 | nand->ecc.read_page = mtk_nfc_read_page_hwecc; | ||
1284 | nand->ecc.read_oob_raw = mtk_nfc_read_oob_std; | ||
1285 | nand->ecc.read_oob = mtk_nfc_read_oob_std; | ||
1286 | |||
1287 | mtd = nand_to_mtd(nand); | ||
1288 | mtd->owner = THIS_MODULE; | ||
1289 | mtd->dev.parent = dev; | ||
1290 | mtd->name = MTK_NAME; | ||
1291 | mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops); | ||
1292 | |||
1293 | mtk_nfc_hw_init(nfc); | ||
1294 | |||
1295 | ret = nand_scan_ident(mtd, nsels, NULL); | ||
1296 | if (ret) | ||
1297 | return -ENODEV; | ||
1298 | |||
1299 | /* store bbt magic in page, cause OOB is not protected */ | ||
1300 | if (nand->bbt_options & NAND_BBT_USE_FLASH) | ||
1301 | nand->bbt_options |= NAND_BBT_NO_OOB; | ||
1302 | |||
1303 | ret = mtk_nfc_ecc_init(dev, mtd); | ||
1304 | if (ret) | ||
1305 | return -EINVAL; | ||
1306 | |||
1307 | if (nand->options & NAND_BUSWIDTH_16) { | ||
1308 | dev_err(dev, "16bits buswidth not supported"); | ||
1309 | return -EINVAL; | ||
1310 | } | ||
1311 | |||
1312 | mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd); | ||
1313 | mtk_nfc_set_fdm(&chip->fdm, mtd); | ||
1314 | mtk_nfc_set_bad_mark_ctl(&chip->bad_mark, mtd); | ||
1315 | |||
1316 | len = mtd->writesize + mtd->oobsize; | ||
1317 | nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL); | ||
1318 | if (!nfc->buffer) | ||
1319 | return -ENOMEM; | ||
1320 | |||
1321 | ret = nand_scan_tail(mtd); | ||
1322 | if (ret) | ||
1323 | return -ENODEV; | ||
1324 | |||
1325 | ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0); | ||
1326 | if (ret) { | ||
1327 | dev_err(dev, "mtd parse partition error\n"); | ||
1328 | nand_release(mtd); | ||
1329 | return ret; | ||
1330 | } | ||
1331 | |||
1332 | list_add_tail(&chip->node, &nfc->chips); | ||
1333 | |||
1334 | return 0; | ||
1335 | } | ||
1336 | |||
1337 | static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc) | ||
1338 | { | ||
1339 | struct device_node *np = dev->of_node; | ||
1340 | struct device_node *nand_np; | ||
1341 | int ret; | ||
1342 | |||
1343 | for_each_child_of_node(np, nand_np) { | ||
1344 | ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np); | ||
1345 | if (ret) { | ||
1346 | of_node_put(nand_np); | ||
1347 | return ret; | ||
1348 | } | ||
1349 | } | ||
1350 | |||
1351 | return 0; | ||
1352 | } | ||
1353 | |||
1354 | static int mtk_nfc_probe(struct platform_device *pdev) | ||
1355 | { | ||
1356 | struct device *dev = &pdev->dev; | ||
1357 | struct device_node *np = dev->of_node; | ||
1358 | struct mtk_nfc *nfc; | ||
1359 | struct resource *res; | ||
1360 | int ret, irq; | ||
1361 | |||
1362 | nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL); | ||
1363 | if (!nfc) | ||
1364 | return -ENOMEM; | ||
1365 | |||
1366 | spin_lock_init(&nfc->controller.lock); | ||
1367 | init_waitqueue_head(&nfc->controller.wq); | ||
1368 | INIT_LIST_HEAD(&nfc->chips); | ||
1369 | |||
1370 | /* probe defer if not ready */ | ||
1371 | nfc->ecc = of_mtk_ecc_get(np); | ||
1372 | if (IS_ERR(nfc->ecc)) | ||
1373 | return PTR_ERR(nfc->ecc); | ||
1374 | else if (!nfc->ecc) | ||
1375 | return -ENODEV; | ||
1376 | |||
1377 | nfc->dev = dev; | ||
1378 | |||
1379 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1380 | nfc->regs = devm_ioremap_resource(dev, res); | ||
1381 | if (IS_ERR(nfc->regs)) { | ||
1382 | ret = PTR_ERR(nfc->regs); | ||
1383 | dev_err(dev, "no nfi base\n"); | ||
1384 | goto release_ecc; | ||
1385 | } | ||
1386 | |||
1387 | nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk"); | ||
1388 | if (IS_ERR(nfc->clk.nfi_clk)) { | ||
1389 | dev_err(dev, "no clk\n"); | ||
1390 | ret = PTR_ERR(nfc->clk.nfi_clk); | ||
1391 | goto release_ecc; | ||
1392 | } | ||
1393 | |||
1394 | nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk"); | ||
1395 | if (IS_ERR(nfc->clk.pad_clk)) { | ||
1396 | dev_err(dev, "no pad clk\n"); | ||
1397 | ret = PTR_ERR(nfc->clk.pad_clk); | ||
1398 | goto release_ecc; | ||
1399 | } | ||
1400 | |||
1401 | ret = mtk_nfc_enable_clk(dev, &nfc->clk); | ||
1402 | if (ret) | ||
1403 | goto release_ecc; | ||
1404 | |||
1405 | irq = platform_get_irq(pdev, 0); | ||
1406 | if (irq < 0) { | ||
1407 | dev_err(dev, "no nfi irq resource\n"); | ||
1408 | ret = -EINVAL; | ||
1409 | goto clk_disable; | ||
1410 | } | ||
1411 | |||
1412 | ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc); | ||
1413 | if (ret) { | ||
1414 | dev_err(dev, "failed to request nfi irq\n"); | ||
1415 | goto clk_disable; | ||
1416 | } | ||
1417 | |||
1418 | ret = dma_set_mask(dev, DMA_BIT_MASK(32)); | ||
1419 | if (ret) { | ||
1420 | dev_err(dev, "failed to set dma mask\n"); | ||
1421 | goto clk_disable; | ||
1422 | } | ||
1423 | |||
1424 | platform_set_drvdata(pdev, nfc); | ||
1425 | |||
1426 | ret = mtk_nfc_nand_chips_init(dev, nfc); | ||
1427 | if (ret) { | ||
1428 | dev_err(dev, "failed to init nand chips\n"); | ||
1429 | goto clk_disable; | ||
1430 | } | ||
1431 | |||
1432 | return 0; | ||
1433 | |||
1434 | clk_disable: | ||
1435 | mtk_nfc_disable_clk(&nfc->clk); | ||
1436 | |||
1437 | release_ecc: | ||
1438 | mtk_ecc_release(nfc->ecc); | ||
1439 | |||
1440 | return ret; | ||
1441 | } | ||
1442 | |||
1443 | static int mtk_nfc_remove(struct platform_device *pdev) | ||
1444 | { | ||
1445 | struct mtk_nfc *nfc = platform_get_drvdata(pdev); | ||
1446 | struct mtk_nfc_nand_chip *chip; | ||
1447 | |||
1448 | while (!list_empty(&nfc->chips)) { | ||
1449 | chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip, | ||
1450 | node); | ||
1451 | nand_release(nand_to_mtd(&chip->nand)); | ||
1452 | list_del(&chip->node); | ||
1453 | } | ||
1454 | |||
1455 | mtk_ecc_release(nfc->ecc); | ||
1456 | mtk_nfc_disable_clk(&nfc->clk); | ||
1457 | |||
1458 | return 0; | ||
1459 | } | ||
1460 | |||
1461 | #ifdef CONFIG_PM_SLEEP | ||
1462 | static int mtk_nfc_suspend(struct device *dev) | ||
1463 | { | ||
1464 | struct mtk_nfc *nfc = dev_get_drvdata(dev); | ||
1465 | |||
1466 | mtk_nfc_disable_clk(&nfc->clk); | ||
1467 | |||
1468 | return 0; | ||
1469 | } | ||
1470 | |||
1471 | static int mtk_nfc_resume(struct device *dev) | ||
1472 | { | ||
1473 | struct mtk_nfc *nfc = dev_get_drvdata(dev); | ||
1474 | struct mtk_nfc_nand_chip *chip; | ||
1475 | struct nand_chip *nand; | ||
1476 | struct mtd_info *mtd; | ||
1477 | int ret; | ||
1478 | u32 i; | ||
1479 | |||
1480 | udelay(200); | ||
1481 | |||
1482 | ret = mtk_nfc_enable_clk(dev, &nfc->clk); | ||
1483 | if (ret) | ||
1484 | return ret; | ||
1485 | |||
1486 | mtk_nfc_hw_init(nfc); | ||
1487 | |||
1488 | /* reset NAND chip if VCC was powered off */ | ||
1489 | list_for_each_entry(chip, &nfc->chips, node) { | ||
1490 | nand = &chip->nand; | ||
1491 | mtd = nand_to_mtd(nand); | ||
1492 | for (i = 0; i < chip->nsels; i++) { | ||
1493 | nand->select_chip(mtd, i); | ||
1494 | nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); | ||
1495 | } | ||
1496 | } | ||
1497 | |||
1498 | return 0; | ||
1499 | } | ||
1500 | |||
1501 | static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume); | ||
1502 | #endif | ||
1503 | |||
1504 | static const struct of_device_id mtk_nfc_id_table[] = { | ||
1505 | { .compatible = "mediatek,mt2701-nfc" }, | ||
1506 | {} | ||
1507 | }; | ||
1508 | MODULE_DEVICE_TABLE(of, mtk_nfc_id_table); | ||
1509 | |||
1510 | static struct platform_driver mtk_nfc_driver = { | ||
1511 | .probe = mtk_nfc_probe, | ||
1512 | .remove = mtk_nfc_remove, | ||
1513 | .driver = { | ||
1514 | .name = MTK_NAME, | ||
1515 | .of_match_table = mtk_nfc_id_table, | ||
1516 | #ifdef CONFIG_PM_SLEEP | ||
1517 | .pm = &mtk_nfc_pm_ops, | ||
1518 | #endif | ||
1519 | }, | ||
1520 | }; | ||
1521 | |||
1522 | module_platform_driver(mtk_nfc_driver); | ||
1523 | |||
1524 | MODULE_LICENSE("GPL"); | ||
1525 | MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>"); | ||
1526 | MODULE_DESCRIPTION("MTK Nand Flash Controller Driver"); | ||
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 0b0dc29d2af7..77533f7f2429 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -2610,7 +2610,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, | |||
2610 | int cached = writelen > bytes && page != blockmask; | 2610 | int cached = writelen > bytes && page != blockmask; |
2611 | uint8_t *wbuf = buf; | 2611 | uint8_t *wbuf = buf; |
2612 | int use_bufpoi; | 2612 | int use_bufpoi; |
2613 | int part_pagewr = (column || writelen < (mtd->writesize - 1)); | 2613 | int part_pagewr = (column || writelen < mtd->writesize); |
2614 | 2614 | ||
2615 | if (part_pagewr) | 2615 | if (part_pagewr) |
2616 | use_bufpoi = 1; | 2616 | use_bufpoi = 1; |
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index ccc05f5b2695..2af9869a115e 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c | |||
@@ -168,6 +168,7 @@ struct nand_flash_dev nand_flash_ids[] = { | |||
168 | /* Manufacturer IDs */ | 168 | /* Manufacturer IDs */ |
169 | struct nand_manufacturers nand_manuf_ids[] = { | 169 | struct nand_manufacturers nand_manuf_ids[] = { |
170 | {NAND_MFR_TOSHIBA, "Toshiba"}, | 170 | {NAND_MFR_TOSHIBA, "Toshiba"}, |
171 | {NAND_MFR_ESMT, "ESMT"}, | ||
171 | {NAND_MFR_SAMSUNG, "Samsung"}, | 172 | {NAND_MFR_SAMSUNG, "Samsung"}, |
172 | {NAND_MFR_FUJITSU, "Fujitsu"}, | 173 | {NAND_MFR_FUJITSU, "Fujitsu"}, |
173 | {NAND_MFR_NATIONAL, "National"}, | 174 | {NAND_MFR_NATIONAL, "National"}, |
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index a136da8df6fe..a59361c36f40 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
@@ -118,8 +118,6 @@ | |||
118 | #define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F) | 118 | #define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F) |
119 | #define STATUS_BUFF_EMPTY 0x00000001 | 119 | #define STATUS_BUFF_EMPTY 0x00000001 |
120 | 120 | ||
121 | #define OMAP24XX_DMA_GPMC 4 | ||
122 | |||
123 | #define SECTOR_BYTES 512 | 121 | #define SECTOR_BYTES 512 |
124 | /* 4 bit padding to make byte aligned, 56 = 52 + 4 */ | 122 | /* 4 bit padding to make byte aligned, 56 = 52 + 4 */ |
125 | #define BCH4_BIT_PAD 4 | 123 | #define BCH4_BIT_PAD 4 |
@@ -1811,7 +1809,6 @@ static int omap_nand_probe(struct platform_device *pdev) | |||
1811 | struct nand_chip *nand_chip; | 1809 | struct nand_chip *nand_chip; |
1812 | int err; | 1810 | int err; |
1813 | dma_cap_mask_t mask; | 1811 | dma_cap_mask_t mask; |
1814 | unsigned sig; | ||
1815 | struct resource *res; | 1812 | struct resource *res; |
1816 | struct device *dev = &pdev->dev; | 1813 | struct device *dev = &pdev->dev; |
1817 | int min_oobbytes = BADBLOCK_MARKER_LENGTH; | 1814 | int min_oobbytes = BADBLOCK_MARKER_LENGTH; |
@@ -1924,11 +1921,11 @@ static int omap_nand_probe(struct platform_device *pdev) | |||
1924 | case NAND_OMAP_PREFETCH_DMA: | 1921 | case NAND_OMAP_PREFETCH_DMA: |
1925 | dma_cap_zero(mask); | 1922 | dma_cap_zero(mask); |
1926 | dma_cap_set(DMA_SLAVE, mask); | 1923 | dma_cap_set(DMA_SLAVE, mask); |
1927 | sig = OMAP24XX_DMA_GPMC; | 1924 | info->dma = dma_request_chan(pdev->dev.parent, "rxtx"); |
1928 | info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig); | 1925 | |
1929 | if (!info->dma) { | 1926 | if (IS_ERR(info->dma)) { |
1930 | dev_err(&pdev->dev, "DMA engine request failed\n"); | 1927 | dev_err(&pdev->dev, "DMA engine request failed\n"); |
1931 | err = -ENXIO; | 1928 | err = PTR_ERR(info->dma); |
1932 | goto return_error; | 1929 | goto return_error; |
1933 | } else { | 1930 | } else { |
1934 | struct dma_slave_config cfg; | 1931 | struct dma_slave_config cfg; |
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index a83a690688b4..e414b31b71c1 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/gpio.h> | 39 | #include <linux/gpio.h> |
40 | #include <linux/interrupt.h> | 40 | #include <linux/interrupt.h> |
41 | #include <linux/iopoll.h> | 41 | #include <linux/iopoll.h> |
42 | #include <linux/reset.h> | ||
42 | 43 | ||
43 | #define NFC_REG_CTL 0x0000 | 44 | #define NFC_REG_CTL 0x0000 |
44 | #define NFC_REG_ST 0x0004 | 45 | #define NFC_REG_ST 0x0004 |
@@ -153,6 +154,7 @@ | |||
153 | 154 | ||
154 | /* define bit use in NFC_ECC_ST */ | 155 | /* define bit use in NFC_ECC_ST */ |
155 | #define NFC_ECC_ERR(x) BIT(x) | 156 | #define NFC_ECC_ERR(x) BIT(x) |
157 | #define NFC_ECC_ERR_MSK GENMASK(15, 0) | ||
156 | #define NFC_ECC_PAT_FOUND(x) BIT(x + 16) | 158 | #define NFC_ECC_PAT_FOUND(x) BIT(x + 16) |
157 | #define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff) | 159 | #define NFC_ECC_ERR_CNT(b, x) (((x) >> (((b) % 4) * 8)) & 0xff) |
158 | 160 | ||
@@ -269,10 +271,12 @@ struct sunxi_nfc { | |||
269 | void __iomem *regs; | 271 | void __iomem *regs; |
270 | struct clk *ahb_clk; | 272 | struct clk *ahb_clk; |
271 | struct clk *mod_clk; | 273 | struct clk *mod_clk; |
274 | struct reset_control *reset; | ||
272 | unsigned long assigned_cs; | 275 | unsigned long assigned_cs; |
273 | unsigned long clk_rate; | 276 | unsigned long clk_rate; |
274 | struct list_head chips; | 277 | struct list_head chips; |
275 | struct completion complete; | 278 | struct completion complete; |
279 | struct dma_chan *dmac; | ||
276 | }; | 280 | }; |
277 | 281 | ||
278 | static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl) | 282 | static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl) |
@@ -365,6 +369,67 @@ static int sunxi_nfc_rst(struct sunxi_nfc *nfc) | |||
365 | return ret; | 369 | return ret; |
366 | } | 370 | } |
367 | 371 | ||
372 | static int sunxi_nfc_dma_op_prepare(struct mtd_info *mtd, const void *buf, | ||
373 | int chunksize, int nchunks, | ||
374 | enum dma_data_direction ddir, | ||
375 | struct scatterlist *sg) | ||
376 | { | ||
377 | struct nand_chip *nand = mtd_to_nand(mtd); | ||
378 | struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); | ||
379 | struct dma_async_tx_descriptor *dmad; | ||
380 | enum dma_transfer_direction tdir; | ||
381 | dma_cookie_t dmat; | ||
382 | int ret; | ||
383 | |||
384 | if (ddir == DMA_FROM_DEVICE) | ||
385 | tdir = DMA_DEV_TO_MEM; | ||
386 | else | ||
387 | tdir = DMA_MEM_TO_DEV; | ||
388 | |||
389 | sg_init_one(sg, buf, nchunks * chunksize); | ||
390 | ret = dma_map_sg(nfc->dev, sg, 1, ddir); | ||
391 | if (!ret) | ||
392 | return -ENOMEM; | ||
393 | |||
394 | dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK); | ||
395 | if (!dmad) { | ||
396 | ret = -EINVAL; | ||
397 | goto err_unmap_buf; | ||
398 | } | ||
399 | |||
400 | writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD, | ||
401 | nfc->regs + NFC_REG_CTL); | ||
402 | writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM); | ||
403 | writel(chunksize, nfc->regs + NFC_REG_CNT); | ||
404 | dmat = dmaengine_submit(dmad); | ||
405 | |||
406 | ret = dma_submit_error(dmat); | ||
407 | if (ret) | ||
408 | goto err_clr_dma_flag; | ||
409 | |||
410 | return 0; | ||
411 | |||
412 | err_clr_dma_flag: | ||
413 | writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD, | ||
414 | nfc->regs + NFC_REG_CTL); | ||
415 | |||
416 | err_unmap_buf: | ||
417 | dma_unmap_sg(nfc->dev, sg, 1, ddir); | ||
418 | return ret; | ||
419 | } | ||
420 | |||
421 | static void sunxi_nfc_dma_op_cleanup(struct mtd_info *mtd, | ||
422 | enum dma_data_direction ddir, | ||
423 | struct scatterlist *sg) | ||
424 | { | ||
425 | struct nand_chip *nand = mtd_to_nand(mtd); | ||
426 | struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); | ||
427 | |||
428 | dma_unmap_sg(nfc->dev, sg, 1, ddir); | ||
429 | writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD, | ||
430 | nfc->regs + NFC_REG_CTL); | ||
431 | } | ||
432 | |||
368 | static int sunxi_nfc_dev_ready(struct mtd_info *mtd) | 433 | static int sunxi_nfc_dev_ready(struct mtd_info *mtd) |
369 | { | 434 | { |
370 | struct nand_chip *nand = mtd_to_nand(mtd); | 435 | struct nand_chip *nand = mtd_to_nand(mtd); |
@@ -822,17 +887,15 @@ static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd, | |||
822 | } | 887 | } |
823 | 888 | ||
824 | static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob, | 889 | static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob, |
825 | int step, bool *erased) | 890 | int step, u32 status, bool *erased) |
826 | { | 891 | { |
827 | struct nand_chip *nand = mtd_to_nand(mtd); | 892 | struct nand_chip *nand = mtd_to_nand(mtd); |
828 | struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); | 893 | struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); |
829 | struct nand_ecc_ctrl *ecc = &nand->ecc; | 894 | struct nand_ecc_ctrl *ecc = &nand->ecc; |
830 | u32 status, tmp; | 895 | u32 tmp; |
831 | 896 | ||
832 | *erased = false; | 897 | *erased = false; |
833 | 898 | ||
834 | status = readl(nfc->regs + NFC_REG_ECC_ST); | ||
835 | |||
836 | if (status & NFC_ECC_ERR(step)) | 899 | if (status & NFC_ECC_ERR(step)) |
837 | return -EBADMSG; | 900 | return -EBADMSG; |
838 | 901 | ||
@@ -898,6 +961,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd, | |||
898 | *cur_off = oob_off + ecc->bytes + 4; | 961 | *cur_off = oob_off + ecc->bytes + 4; |
899 | 962 | ||
900 | ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0, | 963 | ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0, |
964 | readl(nfc->regs + NFC_REG_ECC_ST), | ||
901 | &erased); | 965 | &erased); |
902 | if (erased) | 966 | if (erased) |
903 | return 1; | 967 | return 1; |
@@ -967,6 +1031,130 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd, | |||
967 | *cur_off = mtd->oobsize + mtd->writesize; | 1031 | *cur_off = mtd->oobsize + mtd->writesize; |
968 | } | 1032 | } |
969 | 1033 | ||
1034 | static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf, | ||
1035 | int oob_required, int page, | ||
1036 | int nchunks) | ||
1037 | { | ||
1038 | struct nand_chip *nand = mtd_to_nand(mtd); | ||
1039 | bool randomized = nand->options & NAND_NEED_SCRAMBLING; | ||
1040 | struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); | ||
1041 | struct nand_ecc_ctrl *ecc = &nand->ecc; | ||
1042 | unsigned int max_bitflips = 0; | ||
1043 | int ret, i, raw_mode = 0; | ||
1044 | struct scatterlist sg; | ||
1045 | u32 status; | ||
1046 | |||
1047 | ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); | ||
1048 | if (ret) | ||
1049 | return ret; | ||
1050 | |||
1051 | ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, nchunks, | ||
1052 | DMA_FROM_DEVICE, &sg); | ||
1053 | if (ret) | ||
1054 | return ret; | ||
1055 | |||
1056 | sunxi_nfc_hw_ecc_enable(mtd); | ||
1057 | sunxi_nfc_randomizer_config(mtd, page, false); | ||
1058 | sunxi_nfc_randomizer_enable(mtd); | ||
1059 | |||
1060 | writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) | | ||
1061 | NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET); | ||
1062 | |||
1063 | dma_async_issue_pending(nfc->dmac); | ||
1064 | |||
1065 | writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS, | ||
1066 | nfc->regs + NFC_REG_CMD); | ||
1067 | |||
1068 | ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0); | ||
1069 | if (ret) | ||
1070 | dmaengine_terminate_all(nfc->dmac); | ||
1071 | |||
1072 | sunxi_nfc_randomizer_disable(mtd); | ||
1073 | sunxi_nfc_hw_ecc_disable(mtd); | ||
1074 | |||
1075 | sunxi_nfc_dma_op_cleanup(mtd, DMA_FROM_DEVICE, &sg); | ||
1076 | |||
1077 | if (ret) | ||
1078 | return ret; | ||
1079 | |||
1080 | status = readl(nfc->regs + NFC_REG_ECC_ST); | ||
1081 | |||
1082 | for (i = 0; i < nchunks; i++) { | ||
1083 | int data_off = i * ecc->size; | ||
1084 | int oob_off = i * (ecc->bytes + 4); | ||
1085 | u8 *data = buf + data_off; | ||
1086 | u8 *oob = nand->oob_poi + oob_off; | ||
1087 | bool erased; | ||
1088 | |||
1089 | ret = sunxi_nfc_hw_ecc_correct(mtd, randomized ? data : NULL, | ||
1090 | oob_required ? oob : NULL, | ||
1091 | i, status, &erased); | ||
1092 | |||
1093 | /* ECC errors are handled in the second loop. */ | ||
1094 | if (ret < 0) | ||
1095 | continue; | ||
1096 | |||
1097 | if (oob_required && !erased) { | ||
1098 | /* TODO: use DMA to retrieve OOB */ | ||
1099 | nand->cmdfunc(mtd, NAND_CMD_RNDOUT, | ||
1100 | mtd->writesize + oob_off, -1); | ||
1101 | nand->read_buf(mtd, oob, ecc->bytes + 4); | ||
1102 | |||
1103 | sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i, | ||
1104 | !i, page); | ||
1105 | } | ||
1106 | |||
1107 | if (erased) | ||
1108 | raw_mode = 1; | ||
1109 | |||
1110 | sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret); | ||
1111 | } | ||
1112 | |||
1113 | if (status & NFC_ECC_ERR_MSK) { | ||
1114 | for (i = 0; i < nchunks; i++) { | ||
1115 | int data_off = i * ecc->size; | ||
1116 | int oob_off = i * (ecc->bytes + 4); | ||
1117 | u8 *data = buf + data_off; | ||
1118 | u8 *oob = nand->oob_poi + oob_off; | ||
1119 | |||
1120 | if (!(status & NFC_ECC_ERR(i))) | ||
1121 | continue; | ||
1122 | |||
1123 | /* | ||
1124 | * Re-read the data with the randomizer disabled to | ||
1125 | * identify bitflips in erased pages. | ||
1126 | */ | ||
1127 | if (randomized) { | ||
1128 | /* TODO: use DMA to read page in raw mode */ | ||
1129 | nand->cmdfunc(mtd, NAND_CMD_RNDOUT, | ||
1130 | data_off, -1); | ||
1131 | nand->read_buf(mtd, data, ecc->size); | ||
1132 | } | ||
1133 | |||
1134 | /* TODO: use DMA to retrieve OOB */ | ||
1135 | nand->cmdfunc(mtd, NAND_CMD_RNDOUT, | ||
1136 | mtd->writesize + oob_off, -1); | ||
1137 | nand->read_buf(mtd, oob, ecc->bytes + 4); | ||
1138 | |||
1139 | ret = nand_check_erased_ecc_chunk(data, ecc->size, | ||
1140 | oob, ecc->bytes + 4, | ||
1141 | NULL, 0, | ||
1142 | ecc->strength); | ||
1143 | if (ret >= 0) | ||
1144 | raw_mode = 1; | ||
1145 | |||
1146 | sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret); | ||
1147 | } | ||
1148 | } | ||
1149 | |||
1150 | if (oob_required) | ||
1151 | sunxi_nfc_hw_ecc_read_extra_oob(mtd, nand->oob_poi, | ||
1152 | NULL, !raw_mode, | ||
1153 | page); | ||
1154 | |||
1155 | return max_bitflips; | ||
1156 | } | ||
1157 | |||
970 | static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, | 1158 | static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd, |
971 | const u8 *data, int data_off, | 1159 | const u8 *data, int data_off, |
972 | const u8 *oob, int oob_off, | 1160 | const u8 *oob, int oob_off, |
@@ -1065,6 +1253,23 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd, | |||
1065 | return max_bitflips; | 1253 | return max_bitflips; |
1066 | } | 1254 | } |
1067 | 1255 | ||
1256 | static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd, | ||
1257 | struct nand_chip *chip, u8 *buf, | ||
1258 | int oob_required, int page) | ||
1259 | { | ||
1260 | int ret; | ||
1261 | |||
1262 | ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page, | ||
1263 | chip->ecc.steps); | ||
1264 | if (ret >= 0) | ||
1265 | return ret; | ||
1266 | |||
1267 | /* Fallback to PIO mode */ | ||
1268 | chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1); | ||
1269 | |||
1270 | return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page); | ||
1271 | } | ||
1272 | |||
1068 | static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd, | 1273 | static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd, |
1069 | struct nand_chip *chip, | 1274 | struct nand_chip *chip, |
1070 | u32 data_offs, u32 readlen, | 1275 | u32 data_offs, u32 readlen, |
@@ -1098,6 +1303,25 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd, | |||
1098 | return max_bitflips; | 1303 | return max_bitflips; |
1099 | } | 1304 | } |
1100 | 1305 | ||
1306 | static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd, | ||
1307 | struct nand_chip *chip, | ||
1308 | u32 data_offs, u32 readlen, | ||
1309 | u8 *buf, int page) | ||
1310 | { | ||
1311 | int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size); | ||
1312 | int ret; | ||
1313 | |||
1314 | ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks); | ||
1315 | if (ret >= 0) | ||
1316 | return ret; | ||
1317 | |||
1318 | /* Fallback to PIO mode */ | ||
1319 | chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1); | ||
1320 | |||
1321 | return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen, | ||
1322 | buf, page); | ||
1323 | } | ||
1324 | |||
1101 | static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd, | 1325 | static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd, |
1102 | struct nand_chip *chip, | 1326 | struct nand_chip *chip, |
1103 | const uint8_t *buf, int oob_required, | 1327 | const uint8_t *buf, int oob_required, |
@@ -1130,6 +1354,99 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd, | |||
1130 | return 0; | 1354 | return 0; |
1131 | } | 1355 | } |
1132 | 1356 | ||
1357 | static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd, | ||
1358 | struct nand_chip *chip, | ||
1359 | u32 data_offs, u32 data_len, | ||
1360 | const u8 *buf, int oob_required, | ||
1361 | int page) | ||
1362 | { | ||
1363 | struct nand_ecc_ctrl *ecc = &chip->ecc; | ||
1364 | int ret, i, cur_off = 0; | ||
1365 | |||
1366 | sunxi_nfc_hw_ecc_enable(mtd); | ||
1367 | |||
1368 | for (i = data_offs / ecc->size; | ||
1369 | i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) { | ||
1370 | int data_off = i * ecc->size; | ||
1371 | int oob_off = i * (ecc->bytes + 4); | ||
1372 | const u8 *data = buf + data_off; | ||
1373 | const u8 *oob = chip->oob_poi + oob_off; | ||
1374 | |||
1375 | ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob, | ||
1376 | oob_off + mtd->writesize, | ||
1377 | &cur_off, !i, page); | ||
1378 | if (ret) | ||
1379 | return ret; | ||
1380 | } | ||
1381 | |||
1382 | sunxi_nfc_hw_ecc_disable(mtd); | ||
1383 | |||
1384 | return 0; | ||
1385 | } | ||
1386 | |||
1387 | static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd, | ||
1388 | struct nand_chip *chip, | ||
1389 | const u8 *buf, | ||
1390 | int oob_required, | ||
1391 | int page) | ||
1392 | { | ||
1393 | struct nand_chip *nand = mtd_to_nand(mtd); | ||
1394 | struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller); | ||
1395 | struct nand_ecc_ctrl *ecc = &nand->ecc; | ||
1396 | struct scatterlist sg; | ||
1397 | int ret, i; | ||
1398 | |||
1399 | ret = sunxi_nfc_wait_cmd_fifo_empty(nfc); | ||
1400 | if (ret) | ||
1401 | return ret; | ||
1402 | |||
1403 | ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, ecc->steps, | ||
1404 | DMA_TO_DEVICE, &sg); | ||
1405 | if (ret) | ||
1406 | goto pio_fallback; | ||
1407 | |||
1408 | for (i = 0; i < ecc->steps; i++) { | ||
1409 | const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4)); | ||
1410 | |||
1411 | sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page); | ||
1412 | } | ||
1413 | |||
1414 | sunxi_nfc_hw_ecc_enable(mtd); | ||
1415 | sunxi_nfc_randomizer_config(mtd, page, false); | ||
1416 | sunxi_nfc_randomizer_enable(mtd); | ||
1417 | |||
1418 | writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG, | ||
1419 | nfc->regs + NFC_REG_RCMD_SET); | ||
1420 | |||
1421 | dma_async_issue_pending(nfc->dmac); | ||
1422 | |||
1423 | writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | | ||
1424 | NFC_DATA_TRANS | NFC_ACCESS_DIR, | ||
1425 | nfc->regs + NFC_REG_CMD); | ||
1426 | |||
1427 | ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0); | ||
1428 | if (ret) | ||
1429 | dmaengine_terminate_all(nfc->dmac); | ||
1430 | |||
1431 | sunxi_nfc_randomizer_disable(mtd); | ||
1432 | sunxi_nfc_hw_ecc_disable(mtd); | ||
1433 | |||
1434 | sunxi_nfc_dma_op_cleanup(mtd, DMA_TO_DEVICE, &sg); | ||
1435 | |||
1436 | if (ret) | ||
1437 | return ret; | ||
1438 | |||
1439 | if (oob_required || (chip->options & NAND_NEED_SCRAMBLING)) | ||
1440 | /* TODO: use DMA to transfer extra OOB bytes ? */ | ||
1441 | sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, | ||
1442 | NULL, page); | ||
1443 | |||
1444 | return 0; | ||
1445 | |||
1446 | pio_fallback: | ||
1447 | return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page); | ||
1448 | } | ||
1449 | |||
1133 | static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd, | 1450 | static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd, |
1134 | struct nand_chip *chip, | 1451 | struct nand_chip *chip, |
1135 | uint8_t *buf, int oob_required, | 1452 | uint8_t *buf, int oob_required, |
@@ -1497,10 +1814,19 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd, | |||
1497 | int ret; | 1814 | int ret; |
1498 | int i; | 1815 | int i; |
1499 | 1816 | ||
1817 | if (ecc->size != 512 && ecc->size != 1024) | ||
1818 | return -EINVAL; | ||
1819 | |||
1500 | data = kzalloc(sizeof(*data), GFP_KERNEL); | 1820 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
1501 | if (!data) | 1821 | if (!data) |
1502 | return -ENOMEM; | 1822 | return -ENOMEM; |
1503 | 1823 | ||
1824 | /* Prefer 1k ECC chunk over 512 ones */ | ||
1825 | if (ecc->size == 512 && mtd->writesize > 512) { | ||
1826 | ecc->size = 1024; | ||
1827 | ecc->strength *= 2; | ||
1828 | } | ||
1829 | |||
1504 | /* Add ECC info retrieval from DT */ | 1830 | /* Add ECC info retrieval from DT */ |
1505 | for (i = 0; i < ARRAY_SIZE(strengths); i++) { | 1831 | for (i = 0; i < ARRAY_SIZE(strengths); i++) { |
1506 | if (ecc->strength <= strengths[i]) | 1832 | if (ecc->strength <= strengths[i]) |
@@ -1550,14 +1876,28 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd, | |||
1550 | struct nand_ecc_ctrl *ecc, | 1876 | struct nand_ecc_ctrl *ecc, |
1551 | struct device_node *np) | 1877 | struct device_node *np) |
1552 | { | 1878 | { |
1879 | struct nand_chip *nand = mtd_to_nand(mtd); | ||
1880 | struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); | ||
1881 | struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); | ||
1553 | int ret; | 1882 | int ret; |
1554 | 1883 | ||
1555 | ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np); | 1884 | ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np); |
1556 | if (ret) | 1885 | if (ret) |
1557 | return ret; | 1886 | return ret; |
1558 | 1887 | ||
1559 | ecc->read_page = sunxi_nfc_hw_ecc_read_page; | 1888 | if (nfc->dmac) { |
1560 | ecc->write_page = sunxi_nfc_hw_ecc_write_page; | 1889 | ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma; |
1890 | ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma; | ||
1891 | ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma; | ||
1892 | nand->options |= NAND_USE_BOUNCE_BUFFER; | ||
1893 | } else { | ||
1894 | ecc->read_page = sunxi_nfc_hw_ecc_read_page; | ||
1895 | ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage; | ||
1896 | ecc->write_page = sunxi_nfc_hw_ecc_write_page; | ||
1897 | } | ||
1898 | |||
1899 | /* TODO: support DMA for raw accesses and subpage write */ | ||
1900 | ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage; | ||
1561 | ecc->read_oob_raw = nand_read_oob_std; | 1901 | ecc->read_oob_raw = nand_read_oob_std; |
1562 | ecc->write_oob_raw = nand_write_oob_std; | 1902 | ecc->write_oob_raw = nand_write_oob_std; |
1563 | ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage; | 1903 | ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage; |
@@ -1871,26 +2211,59 @@ static int sunxi_nfc_probe(struct platform_device *pdev) | |||
1871 | if (ret) | 2211 | if (ret) |
1872 | goto out_ahb_clk_unprepare; | 2212 | goto out_ahb_clk_unprepare; |
1873 | 2213 | ||
2214 | nfc->reset = devm_reset_control_get_optional(dev, "ahb"); | ||
2215 | if (!IS_ERR(nfc->reset)) { | ||
2216 | ret = reset_control_deassert(nfc->reset); | ||
2217 | if (ret) { | ||
2218 | dev_err(dev, "reset err %d\n", ret); | ||
2219 | goto out_mod_clk_unprepare; | ||
2220 | } | ||
2221 | } else if (PTR_ERR(nfc->reset) != -ENOENT) { | ||
2222 | ret = PTR_ERR(nfc->reset); | ||
2223 | goto out_mod_clk_unprepare; | ||
2224 | } | ||
2225 | |||
1874 | ret = sunxi_nfc_rst(nfc); | 2226 | ret = sunxi_nfc_rst(nfc); |
1875 | if (ret) | 2227 | if (ret) |
1876 | goto out_mod_clk_unprepare; | 2228 | goto out_ahb_reset_reassert; |
1877 | 2229 | ||
1878 | writel(0, nfc->regs + NFC_REG_INT); | 2230 | writel(0, nfc->regs + NFC_REG_INT); |
1879 | ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt, | 2231 | ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt, |
1880 | 0, "sunxi-nand", nfc); | 2232 | 0, "sunxi-nand", nfc); |
1881 | if (ret) | 2233 | if (ret) |
1882 | goto out_mod_clk_unprepare; | 2234 | goto out_ahb_reset_reassert; |
2235 | |||
2236 | nfc->dmac = dma_request_slave_channel(dev, "rxtx"); | ||
2237 | if (nfc->dmac) { | ||
2238 | struct dma_slave_config dmac_cfg = { }; | ||
2239 | |||
2240 | dmac_cfg.src_addr = r->start + NFC_REG_IO_DATA; | ||
2241 | dmac_cfg.dst_addr = dmac_cfg.src_addr; | ||
2242 | dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
2243 | dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width; | ||
2244 | dmac_cfg.src_maxburst = 4; | ||
2245 | dmac_cfg.dst_maxburst = 4; | ||
2246 | dmaengine_slave_config(nfc->dmac, &dmac_cfg); | ||
2247 | } else { | ||
2248 | dev_warn(dev, "failed to request rxtx DMA channel\n"); | ||
2249 | } | ||
1883 | 2250 | ||
1884 | platform_set_drvdata(pdev, nfc); | 2251 | platform_set_drvdata(pdev, nfc); |
1885 | 2252 | ||
1886 | ret = sunxi_nand_chips_init(dev, nfc); | 2253 | ret = sunxi_nand_chips_init(dev, nfc); |
1887 | if (ret) { | 2254 | if (ret) { |
1888 | dev_err(dev, "failed to init nand chips\n"); | 2255 | dev_err(dev, "failed to init nand chips\n"); |
1889 | goto out_mod_clk_unprepare; | 2256 | goto out_release_dmac; |
1890 | } | 2257 | } |
1891 | 2258 | ||
1892 | return 0; | 2259 | return 0; |
1893 | 2260 | ||
2261 | out_release_dmac: | ||
2262 | if (nfc->dmac) | ||
2263 | dma_release_channel(nfc->dmac); | ||
2264 | out_ahb_reset_reassert: | ||
2265 | if (!IS_ERR(nfc->reset)) | ||
2266 | reset_control_assert(nfc->reset); | ||
1894 | out_mod_clk_unprepare: | 2267 | out_mod_clk_unprepare: |
1895 | clk_disable_unprepare(nfc->mod_clk); | 2268 | clk_disable_unprepare(nfc->mod_clk); |
1896 | out_ahb_clk_unprepare: | 2269 | out_ahb_clk_unprepare: |
@@ -1904,6 +2277,12 @@ static int sunxi_nfc_remove(struct platform_device *pdev) | |||
1904 | struct sunxi_nfc *nfc = platform_get_drvdata(pdev); | 2277 | struct sunxi_nfc *nfc = platform_get_drvdata(pdev); |
1905 | 2278 | ||
1906 | sunxi_nand_chips_cleanup(nfc); | 2279 | sunxi_nand_chips_cleanup(nfc); |
2280 | |||
2281 | if (!IS_ERR(nfc->reset)) | ||
2282 | reset_control_assert(nfc->reset); | ||
2283 | |||
2284 | if (nfc->dmac) | ||
2285 | dma_release_channel(nfc->dmac); | ||
1907 | clk_disable_unprepare(nfc->mod_clk); | 2286 | clk_disable_unprepare(nfc->mod_clk); |
1908 | clk_disable_unprepare(nfc->ahb_clk); | 2287 | clk_disable_unprepare(nfc->ahb_clk); |
1909 | 2288 | ||
diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c index 0cf0ac07a8c2..1f2948c0c458 100644 --- a/drivers/mtd/nand/xway_nand.c +++ b/drivers/mtd/nand/xway_nand.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * by the Free Software Foundation. | 4 | * by the Free Software Foundation. |
5 | * | 5 | * |
6 | * Copyright © 2012 John Crispin <blogic@openwrt.org> | 6 | * Copyright © 2012 John Crispin <blogic@openwrt.org> |
7 | * Copyright © 2016 Hauke Mehrtens <hauke@hauke-m.de> | ||
7 | */ | 8 | */ |
8 | 9 | ||
9 | #include <linux/mtd/nand.h> | 10 | #include <linux/mtd/nand.h> |
@@ -16,20 +17,28 @@ | |||
16 | #define EBU_ADDSEL1 0x24 | 17 | #define EBU_ADDSEL1 0x24 |
17 | #define EBU_NAND_CON 0xB0 | 18 | #define EBU_NAND_CON 0xB0 |
18 | #define EBU_NAND_WAIT 0xB4 | 19 | #define EBU_NAND_WAIT 0xB4 |
20 | #define NAND_WAIT_RD BIT(0) /* NAND flash status output */ | ||
21 | #define NAND_WAIT_WR_C BIT(3) /* NAND Write/Read complete */ | ||
19 | #define EBU_NAND_ECC0 0xB8 | 22 | #define EBU_NAND_ECC0 0xB8 |
20 | #define EBU_NAND_ECC_AC 0xBC | 23 | #define EBU_NAND_ECC_AC 0xBC |
21 | 24 | ||
22 | /* nand commands */ | 25 | /* |
23 | #define NAND_CMD_ALE (1 << 2) | 26 | * nand commands |
24 | #define NAND_CMD_CLE (1 << 3) | 27 | * The pins of the NAND chip are selected based on the address bits of the |
25 | #define NAND_CMD_CS (1 << 4) | 28 | * "register" read and write. There are no special registers, but an |
26 | #define NAND_WRITE_CMD_RESET 0xff | 29 | * address range and the lower address bits are used to activate the |
30 | * correct line. For example when the bit (1 << 2) is set in the address | ||
31 | * the ALE pin will be activated. | ||
32 | */ | ||
33 | #define NAND_CMD_ALE BIT(2) /* address latch enable */ | ||
34 | #define NAND_CMD_CLE BIT(3) /* command latch enable */ | ||
35 | #define NAND_CMD_CS BIT(4) /* chip select */ | ||
36 | #define NAND_CMD_SE BIT(5) /* spare area access latch */ | ||
37 | #define NAND_CMD_WP BIT(6) /* write protect */ | ||
27 | #define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE) | 38 | #define NAND_WRITE_CMD (NAND_CMD_CS | NAND_CMD_CLE) |
28 | #define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE) | 39 | #define NAND_WRITE_ADDR (NAND_CMD_CS | NAND_CMD_ALE) |
29 | #define NAND_WRITE_DATA (NAND_CMD_CS) | 40 | #define NAND_WRITE_DATA (NAND_CMD_CS) |
30 | #define NAND_READ_DATA (NAND_CMD_CS) | 41 | #define NAND_READ_DATA (NAND_CMD_CS) |
31 | #define NAND_WAIT_WR_C (1 << 3) | ||
32 | #define NAND_WAIT_RD (0x1) | ||
33 | 42 | ||
34 | /* we need to tel the ebu which addr we mapped the nand to */ | 43 | /* we need to tel the ebu which addr we mapped the nand to */ |
35 | #define ADDSEL1_MASK(x) (x << 4) | 44 | #define ADDSEL1_MASK(x) (x << 4) |
@@ -54,31 +63,41 @@ | |||
54 | #define NAND_CON_CSMUX (1 << 1) | 63 | #define NAND_CON_CSMUX (1 << 1) |
55 | #define NAND_CON_NANDM 1 | 64 | #define NAND_CON_NANDM 1 |
56 | 65 | ||
57 | static void xway_reset_chip(struct nand_chip *chip) | 66 | struct xway_nand_data { |
67 | struct nand_chip chip; | ||
68 | unsigned long csflags; | ||
69 | void __iomem *nandaddr; | ||
70 | }; | ||
71 | |||
72 | static u8 xway_readb(struct mtd_info *mtd, int op) | ||
58 | { | 73 | { |
59 | unsigned long nandaddr = (unsigned long) chip->IO_ADDR_W; | 74 | struct nand_chip *chip = mtd_to_nand(mtd); |
60 | unsigned long flags; | 75 | struct xway_nand_data *data = nand_get_controller_data(chip); |
61 | 76 | ||
62 | nandaddr &= ~NAND_WRITE_ADDR; | 77 | return readb(data->nandaddr + op); |
63 | nandaddr |= NAND_WRITE_CMD; | 78 | } |
64 | 79 | ||
65 | /* finish with a reset */ | 80 | static void xway_writeb(struct mtd_info *mtd, int op, u8 value) |
66 | spin_lock_irqsave(&ebu_lock, flags); | 81 | { |
67 | writeb(NAND_WRITE_CMD_RESET, (void __iomem *) nandaddr); | 82 | struct nand_chip *chip = mtd_to_nand(mtd); |
68 | while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0) | 83 | struct xway_nand_data *data = nand_get_controller_data(chip); |
69 | ; | 84 | |
70 | spin_unlock_irqrestore(&ebu_lock, flags); | 85 | writeb(value, data->nandaddr + op); |
71 | } | 86 | } |
72 | 87 | ||
73 | static void xway_select_chip(struct mtd_info *mtd, int chip) | 88 | static void xway_select_chip(struct mtd_info *mtd, int select) |
74 | { | 89 | { |
90 | struct nand_chip *chip = mtd_to_nand(mtd); | ||
91 | struct xway_nand_data *data = nand_get_controller_data(chip); | ||
75 | 92 | ||
76 | switch (chip) { | 93 | switch (select) { |
77 | case -1: | 94 | case -1: |
78 | ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON); | 95 | ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON); |
79 | ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON); | 96 | ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON); |
97 | spin_unlock_irqrestore(&ebu_lock, data->csflags); | ||
80 | break; | 98 | break; |
81 | case 0: | 99 | case 0: |
100 | spin_lock_irqsave(&ebu_lock, data->csflags); | ||
82 | ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON); | 101 | ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON); |
83 | ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON); | 102 | ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON); |
84 | break; | 103 | break; |
@@ -89,26 +108,16 @@ static void xway_select_chip(struct mtd_info *mtd, int chip) | |||
89 | 108 | ||
90 | static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) | 109 | static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) |
91 | { | 110 | { |
92 | struct nand_chip *this = mtd_to_nand(mtd); | 111 | if (cmd == NAND_CMD_NONE) |
93 | unsigned long nandaddr = (unsigned long) this->IO_ADDR_W; | 112 | return; |
94 | unsigned long flags; | ||
95 | |||
96 | if (ctrl & NAND_CTRL_CHANGE) { | ||
97 | nandaddr &= ~(NAND_WRITE_CMD | NAND_WRITE_ADDR); | ||
98 | if (ctrl & NAND_CLE) | ||
99 | nandaddr |= NAND_WRITE_CMD; | ||
100 | else | ||
101 | nandaddr |= NAND_WRITE_ADDR; | ||
102 | this->IO_ADDR_W = (void __iomem *) nandaddr; | ||
103 | } | ||
104 | 113 | ||
105 | if (cmd != NAND_CMD_NONE) { | 114 | if (ctrl & NAND_CLE) |
106 | spin_lock_irqsave(&ebu_lock, flags); | 115 | xway_writeb(mtd, NAND_WRITE_CMD, cmd); |
107 | writeb(cmd, this->IO_ADDR_W); | 116 | else if (ctrl & NAND_ALE) |
108 | while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0) | 117 | xway_writeb(mtd, NAND_WRITE_ADDR, cmd); |
109 | ; | 118 | |
110 | spin_unlock_irqrestore(&ebu_lock, flags); | 119 | while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0) |
111 | } | 120 | ; |
112 | } | 121 | } |
113 | 122 | ||
114 | static int xway_dev_ready(struct mtd_info *mtd) | 123 | static int xway_dev_ready(struct mtd_info *mtd) |
@@ -118,80 +127,122 @@ static int xway_dev_ready(struct mtd_info *mtd) | |||
118 | 127 | ||
119 | static unsigned char xway_read_byte(struct mtd_info *mtd) | 128 | static unsigned char xway_read_byte(struct mtd_info *mtd) |
120 | { | 129 | { |
121 | struct nand_chip *this = mtd_to_nand(mtd); | 130 | return xway_readb(mtd, NAND_READ_DATA); |
122 | unsigned long nandaddr = (unsigned long) this->IO_ADDR_R; | 131 | } |
123 | unsigned long flags; | 132 | |
124 | int ret; | 133 | static void xway_read_buf(struct mtd_info *mtd, u_char *buf, int len) |
134 | { | ||
135 | int i; | ||
125 | 136 | ||
126 | spin_lock_irqsave(&ebu_lock, flags); | 137 | for (i = 0; i < len; i++) |
127 | ret = ltq_r8((void __iomem *)(nandaddr + NAND_READ_DATA)); | 138 | buf[i] = xway_readb(mtd, NAND_WRITE_DATA); |
128 | spin_unlock_irqrestore(&ebu_lock, flags); | 139 | } |
129 | 140 | ||
130 | return ret; | 141 | static void xway_write_buf(struct mtd_info *mtd, const u_char *buf, int len) |
142 | { | ||
143 | int i; | ||
144 | |||
145 | for (i = 0; i < len; i++) | ||
146 | xway_writeb(mtd, NAND_WRITE_DATA, buf[i]); | ||
131 | } | 147 | } |
132 | 148 | ||
149 | /* | ||
150 | * Probe for the NAND device. | ||
151 | */ | ||
133 | static int xway_nand_probe(struct platform_device *pdev) | 152 | static int xway_nand_probe(struct platform_device *pdev) |
134 | { | 153 | { |
135 | struct nand_chip *this = platform_get_drvdata(pdev); | 154 | struct xway_nand_data *data; |
136 | unsigned long nandaddr = (unsigned long) this->IO_ADDR_W; | 155 | struct mtd_info *mtd; |
137 | const __be32 *cs = of_get_property(pdev->dev.of_node, | 156 | struct resource *res; |
138 | "lantiq,cs", NULL); | 157 | int err; |
158 | u32 cs; | ||
139 | u32 cs_flag = 0; | 159 | u32 cs_flag = 0; |
140 | 160 | ||
161 | /* Allocate memory for the device structure (and zero it) */ | ||
162 | data = devm_kzalloc(&pdev->dev, sizeof(struct xway_nand_data), | ||
163 | GFP_KERNEL); | ||
164 | if (!data) | ||
165 | return -ENOMEM; | ||
166 | |||
167 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
168 | data->nandaddr = devm_ioremap_resource(&pdev->dev, res); | ||
169 | if (IS_ERR(data->nandaddr)) | ||
170 | return PTR_ERR(data->nandaddr); | ||
171 | |||
172 | nand_set_flash_node(&data->chip, pdev->dev.of_node); | ||
173 | mtd = nand_to_mtd(&data->chip); | ||
174 | mtd->dev.parent = &pdev->dev; | ||
175 | |||
176 | data->chip.cmd_ctrl = xway_cmd_ctrl; | ||
177 | data->chip.dev_ready = xway_dev_ready; | ||
178 | data->chip.select_chip = xway_select_chip; | ||
179 | data->chip.write_buf = xway_write_buf; | ||
180 | data->chip.read_buf = xway_read_buf; | ||
181 | data->chip.read_byte = xway_read_byte; | ||
182 | data->chip.chip_delay = 30; | ||
183 | |||
184 | data->chip.ecc.mode = NAND_ECC_SOFT; | ||
185 | data->chip.ecc.algo = NAND_ECC_HAMMING; | ||
186 | |||
187 | platform_set_drvdata(pdev, data); | ||
188 | nand_set_controller_data(&data->chip, data); | ||
189 | |||
141 | /* load our CS from the DT. Either we find a valid 1 or default to 0 */ | 190 | /* load our CS from the DT. Either we find a valid 1 or default to 0 */ |
142 | if (cs && (*cs == 1)) | 191 | err = of_property_read_u32(pdev->dev.of_node, "lantiq,cs", &cs); |
192 | if (!err && cs == 1) | ||
143 | cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1; | 193 | cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1; |
144 | 194 | ||
145 | /* setup the EBU to run in NAND mode on our base addr */ | 195 | /* setup the EBU to run in NAND mode on our base addr */ |
146 | ltq_ebu_w32(CPHYSADDR(nandaddr) | 196 | ltq_ebu_w32(CPHYSADDR(data->nandaddr) |
147 | | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1); | 197 | | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1); |
148 | 198 | ||
149 | ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2 | 199 | ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2 |
150 | | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1 | 200 | | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1 |
151 | | BUSCON1_CMULT4, LTQ_EBU_BUSCON1); | 201 | | BUSCON1_CMULT4, LTQ_EBU_BUSCON1); |
152 | 202 | ||
153 | ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P | 203 | ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P |
154 | | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P | 204 | | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P |
155 | | cs_flag, EBU_NAND_CON); | 205 | | cs_flag, EBU_NAND_CON); |
156 | 206 | ||
157 | /* finish with a reset */ | 207 | /* Scan to find existence of the device */ |
158 | xway_reset_chip(this); | 208 | err = nand_scan(mtd, 1); |
209 | if (err) | ||
210 | return err; | ||
159 | 211 | ||
160 | return 0; | 212 | err = mtd_device_register(mtd, NULL, 0); |
161 | } | 213 | if (err) |
214 | nand_release(mtd); | ||
162 | 215 | ||
163 | static struct platform_nand_data xway_nand_data = { | 216 | return err; |
164 | .chip = { | 217 | } |
165 | .nr_chips = 1, | ||
166 | .chip_delay = 30, | ||
167 | }, | ||
168 | .ctrl = { | ||
169 | .probe = xway_nand_probe, | ||
170 | .cmd_ctrl = xway_cmd_ctrl, | ||
171 | .dev_ready = xway_dev_ready, | ||
172 | .select_chip = xway_select_chip, | ||
173 | .read_byte = xway_read_byte, | ||
174 | } | ||
175 | }; | ||
176 | 218 | ||
177 | /* | 219 | /* |
178 | * Try to find the node inside the DT. If it is available attach out | 220 | * Remove a NAND device. |
179 | * platform_nand_data | ||
180 | */ | 221 | */ |
181 | static int __init xway_register_nand(void) | 222 | static int xway_nand_remove(struct platform_device *pdev) |
182 | { | 223 | { |
183 | struct device_node *node; | 224 | struct xway_nand_data *data = platform_get_drvdata(pdev); |
184 | struct platform_device *pdev; | 225 | |
185 | 226 | nand_release(nand_to_mtd(&data->chip)); | |
186 | node = of_find_compatible_node(NULL, NULL, "lantiq,nand-xway"); | 227 | |
187 | if (!node) | ||
188 | return -ENOENT; | ||
189 | pdev = of_find_device_by_node(node); | ||
190 | if (!pdev) | ||
191 | return -EINVAL; | ||
192 | pdev->dev.platform_data = &xway_nand_data; | ||
193 | of_node_put(node); | ||
194 | return 0; | 228 | return 0; |
195 | } | 229 | } |
196 | 230 | ||
197 | subsys_initcall(xway_register_nand); | 231 | static const struct of_device_id xway_nand_match[] = { |
232 | { .compatible = "lantiq,nand-xway" }, | ||
233 | {}, | ||
234 | }; | ||
235 | MODULE_DEVICE_TABLE(of, xway_nand_match); | ||
236 | |||
237 | static struct platform_driver xway_nand_driver = { | ||
238 | .probe = xway_nand_probe, | ||
239 | .remove = xway_nand_remove, | ||
240 | .driver = { | ||
241 | .name = "lantiq,nand-xway", | ||
242 | .of_match_table = xway_nand_match, | ||
243 | }, | ||
244 | }; | ||
245 | |||
246 | module_platform_driver(xway_nand_driver); | ||
247 | |||
248 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index a4b029a417f0..1a6d0e367b89 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c | |||
@@ -3188,13 +3188,13 @@ static int onenand_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, | |||
3188 | size_t tmp_retlen; | 3188 | size_t tmp_retlen; |
3189 | 3189 | ||
3190 | ret = action(mtd, from, len, &tmp_retlen, buf); | 3190 | ret = action(mtd, from, len, &tmp_retlen, buf); |
3191 | if (ret) | ||
3192 | break; | ||
3191 | 3193 | ||
3192 | buf += tmp_retlen; | 3194 | buf += tmp_retlen; |
3193 | len -= tmp_retlen; | 3195 | len -= tmp_retlen; |
3194 | *retlen += tmp_retlen; | 3196 | *retlen += tmp_retlen; |
3195 | 3197 | ||
3196 | if (ret) | ||
3197 | break; | ||
3198 | } | 3198 | } |
3199 | otp_pages--; | 3199 | otp_pages--; |
3200 | } | 3200 | } |
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig index d42c98e1f581..4a682ee0f632 100644 --- a/drivers/mtd/spi-nor/Kconfig +++ b/drivers/mtd/spi-nor/Kconfig | |||
@@ -29,6 +29,26 @@ config MTD_SPI_NOR_USE_4K_SECTORS | |||
29 | Please note that some tools/drivers/filesystems may not work with | 29 | Please note that some tools/drivers/filesystems may not work with |
30 | 4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum). | 30 | 4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum). |
31 | 31 | ||
32 | config SPI_ATMEL_QUADSPI | ||
33 | tristate "Atmel Quad SPI Controller" | ||
34 | depends on ARCH_AT91 || (ARM && COMPILE_TEST) | ||
35 | depends on OF && HAS_IOMEM | ||
36 | help | ||
37 | This enables support for the Quad SPI controller in master mode. | ||
38 | This driver does not support generic SPI. The implementation only | ||
39 | supports SPI NOR. | ||
40 | |||
41 | config SPI_CADENCE_QUADSPI | ||
42 | tristate "Cadence Quad SPI controller" | ||
43 | depends on OF && ARM | ||
44 | help | ||
45 | Enable support for the Cadence Quad SPI Flash controller. | ||
46 | |||
47 | Cadence QSPI is a specialized controller for connecting an SPI | ||
48 | Flash over 1/2/4-bit wide bus. Enable this option if you have a | ||
49 | device with a Cadence QSPI controller and want to access the | ||
50 | Flash as an MTD device. | ||
51 | |||
32 | config SPI_FSL_QUADSPI | 52 | config SPI_FSL_QUADSPI |
33 | tristate "Freescale Quad SPI controller" | 53 | tristate "Freescale Quad SPI controller" |
34 | depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST | 54 | depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST |
@@ -38,6 +58,13 @@ config SPI_FSL_QUADSPI | |||
38 | This controller does not support generic SPI. It only supports | 58 | This controller does not support generic SPI. It only supports |
39 | SPI NOR. | 59 | SPI NOR. |
40 | 60 | ||
61 | config SPI_HISI_SFC | ||
62 | tristate "Hisilicon SPI-NOR Flash Controller(SFC)" | ||
63 | depends on ARCH_HISI || COMPILE_TEST | ||
64 | depends on HAS_IOMEM && HAS_DMA | ||
65 | help | ||
66 | This enables support for hisilicon SPI-NOR flash controller. | ||
67 | |||
41 | config SPI_NXP_SPIFI | 68 | config SPI_NXP_SPIFI |
42 | tristate "NXP SPI Flash Interface (SPIFI)" | 69 | tristate "NXP SPI Flash Interface (SPIFI)" |
43 | depends on OF && (ARCH_LPC18XX || COMPILE_TEST) | 70 | depends on OF && (ARCH_LPC18XX || COMPILE_TEST) |
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile index 0bf3a7f81675..121695e83542 100644 --- a/drivers/mtd/spi-nor/Makefile +++ b/drivers/mtd/spi-nor/Makefile | |||
@@ -1,4 +1,7 @@ | |||
1 | obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o | 1 | obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o |
2 | obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o | ||
3 | obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o | ||
2 | obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o | 4 | obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o |
5 | obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o | ||
3 | obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o | 6 | obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o |
4 | obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o | 7 | obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o |
diff --git a/drivers/mtd/spi-nor/atmel-quadspi.c b/drivers/mtd/spi-nor/atmel-quadspi.c new file mode 100644 index 000000000000..47937d9beec6 --- /dev/null +++ b/drivers/mtd/spi-nor/atmel-quadspi.c | |||
@@ -0,0 +1,732 @@ | |||
1 | /* | ||
2 | * Driver for Atmel QSPI Controller | ||
3 | * | ||
4 | * Copyright (C) 2015 Atmel Corporation | ||
5 | * | ||
6 | * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
19 | * | ||
20 | * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale. | ||
21 | */ | ||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/clk.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/delay.h> | ||
28 | #include <linux/err.h> | ||
29 | #include <linux/interrupt.h> | ||
30 | #include <linux/mtd/mtd.h> | ||
31 | #include <linux/mtd/partitions.h> | ||
32 | #include <linux/mtd/spi-nor.h> | ||
33 | #include <linux/platform_data/atmel.h> | ||
34 | #include <linux/of.h> | ||
35 | |||
36 | #include <linux/io.h> | ||
37 | #include <linux/gpio.h> | ||
38 | #include <linux/pinctrl/consumer.h> | ||
39 | |||
40 | /* QSPI register offsets */ | ||
41 | #define QSPI_CR 0x0000 /* Control Register */ | ||
42 | #define QSPI_MR 0x0004 /* Mode Register */ | ||
43 | #define QSPI_RD 0x0008 /* Receive Data Register */ | ||
44 | #define QSPI_TD 0x000c /* Transmit Data Register */ | ||
45 | #define QSPI_SR 0x0010 /* Status Register */ | ||
46 | #define QSPI_IER 0x0014 /* Interrupt Enable Register */ | ||
47 | #define QSPI_IDR 0x0018 /* Interrupt Disable Register */ | ||
48 | #define QSPI_IMR 0x001c /* Interrupt Mask Register */ | ||
49 | #define QSPI_SCR 0x0020 /* Serial Clock Register */ | ||
50 | |||
51 | #define QSPI_IAR 0x0030 /* Instruction Address Register */ | ||
52 | #define QSPI_ICR 0x0034 /* Instruction Code Register */ | ||
53 | #define QSPI_IFR 0x0038 /* Instruction Frame Register */ | ||
54 | |||
55 | #define QSPI_SMR 0x0040 /* Scrambling Mode Register */ | ||
56 | #define QSPI_SKR 0x0044 /* Scrambling Key Register */ | ||
57 | |||
58 | #define QSPI_WPMR 0x00E4 /* Write Protection Mode Register */ | ||
59 | #define QSPI_WPSR 0x00E8 /* Write Protection Status Register */ | ||
60 | |||
61 | #define QSPI_VERSION 0x00FC /* Version Register */ | ||
62 | |||
63 | |||
64 | /* Bitfields in QSPI_CR (Control Register) */ | ||
65 | #define QSPI_CR_QSPIEN BIT(0) | ||
66 | #define QSPI_CR_QSPIDIS BIT(1) | ||
67 | #define QSPI_CR_SWRST BIT(7) | ||
68 | #define QSPI_CR_LASTXFER BIT(24) | ||
69 | |||
70 | /* Bitfields in QSPI_MR (Mode Register) */ | ||
71 | #define QSPI_MR_SSM BIT(0) | ||
72 | #define QSPI_MR_LLB BIT(1) | ||
73 | #define QSPI_MR_WDRBT BIT(2) | ||
74 | #define QSPI_MR_SMRM BIT(3) | ||
75 | #define QSPI_MR_CSMODE_MASK GENMASK(5, 4) | ||
76 | #define QSPI_MR_CSMODE_NOT_RELOADED (0 << 4) | ||
77 | #define QSPI_MR_CSMODE_LASTXFER (1 << 4) | ||
78 | #define QSPI_MR_CSMODE_SYSTEMATICALLY (2 << 4) | ||
79 | #define QSPI_MR_NBBITS_MASK GENMASK(11, 8) | ||
80 | #define QSPI_MR_NBBITS(n) ((((n) - 8) << 8) & QSPI_MR_NBBITS_MASK) | ||
81 | #define QSPI_MR_DLYBCT_MASK GENMASK(23, 16) | ||
82 | #define QSPI_MR_DLYBCT(n) (((n) << 16) & QSPI_MR_DLYBCT_MASK) | ||
83 | #define QSPI_MR_DLYCS_MASK GENMASK(31, 24) | ||
84 | #define QSPI_MR_DLYCS(n) (((n) << 24) & QSPI_MR_DLYCS_MASK) | ||
85 | |||
86 | /* Bitfields in QSPI_SR/QSPI_IER/QSPI_IDR/QSPI_IMR */ | ||
87 | #define QSPI_SR_RDRF BIT(0) | ||
88 | #define QSPI_SR_TDRE BIT(1) | ||
89 | #define QSPI_SR_TXEMPTY BIT(2) | ||
90 | #define QSPI_SR_OVRES BIT(3) | ||
91 | #define QSPI_SR_CSR BIT(8) | ||
92 | #define QSPI_SR_CSS BIT(9) | ||
93 | #define QSPI_SR_INSTRE BIT(10) | ||
94 | #define QSPI_SR_QSPIENS BIT(24) | ||
95 | |||
96 | #define QSPI_SR_CMD_COMPLETED (QSPI_SR_INSTRE | QSPI_SR_CSR) | ||
97 | |||
98 | /* Bitfields in QSPI_SCR (Serial Clock Register) */ | ||
99 | #define QSPI_SCR_CPOL BIT(0) | ||
100 | #define QSPI_SCR_CPHA BIT(1) | ||
101 | #define QSPI_SCR_SCBR_MASK GENMASK(15, 8) | ||
102 | #define QSPI_SCR_SCBR(n) (((n) << 8) & QSPI_SCR_SCBR_MASK) | ||
103 | #define QSPI_SCR_DLYBS_MASK GENMASK(23, 16) | ||
104 | #define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK) | ||
105 | |||
106 | /* Bitfields in QSPI_ICR (Instruction Code Register) */ | ||
107 | #define QSPI_ICR_INST_MASK GENMASK(7, 0) | ||
108 | #define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK) | ||
109 | #define QSPI_ICR_OPT_MASK GENMASK(23, 16) | ||
110 | #define QSPI_ICR_OPT(opt) (((opt) << 16) & QSPI_ICR_OPT_MASK) | ||
111 | |||
112 | /* Bitfields in QSPI_IFR (Instruction Frame Register) */ | ||
113 | #define QSPI_IFR_WIDTH_MASK GENMASK(2, 0) | ||
114 | #define QSPI_IFR_WIDTH_SINGLE_BIT_SPI (0 << 0) | ||
115 | #define QSPI_IFR_WIDTH_DUAL_OUTPUT (1 << 0) | ||
116 | #define QSPI_IFR_WIDTH_QUAD_OUTPUT (2 << 0) | ||
117 | #define QSPI_IFR_WIDTH_DUAL_IO (3 << 0) | ||
118 | #define QSPI_IFR_WIDTH_QUAD_IO (4 << 0) | ||
119 | #define QSPI_IFR_WIDTH_DUAL_CMD (5 << 0) | ||
120 | #define QSPI_IFR_WIDTH_QUAD_CMD (6 << 0) | ||
121 | #define QSPI_IFR_INSTEN BIT(4) | ||
122 | #define QSPI_IFR_ADDREN BIT(5) | ||
123 | #define QSPI_IFR_OPTEN BIT(6) | ||
124 | #define QSPI_IFR_DATAEN BIT(7) | ||
125 | #define QSPI_IFR_OPTL_MASK GENMASK(9, 8) | ||
126 | #define QSPI_IFR_OPTL_1BIT (0 << 8) | ||
127 | #define QSPI_IFR_OPTL_2BIT (1 << 8) | ||
128 | #define QSPI_IFR_OPTL_4BIT (2 << 8) | ||
129 | #define QSPI_IFR_OPTL_8BIT (3 << 8) | ||
130 | #define QSPI_IFR_ADDRL BIT(10) | ||
131 | #define QSPI_IFR_TFRTYP_MASK GENMASK(13, 12) | ||
132 | #define QSPI_IFR_TFRTYP_TRSFR_READ (0 << 12) | ||
133 | #define QSPI_IFR_TFRTYP_TRSFR_READ_MEM (1 << 12) | ||
134 | #define QSPI_IFR_TFRTYP_TRSFR_WRITE (2 << 12) | ||
135 | #define QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM (3 << 13) | ||
136 | #define QSPI_IFR_CRM BIT(14) | ||
137 | #define QSPI_IFR_NBDUM_MASK GENMASK(20, 16) | ||
138 | #define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK) | ||
139 | |||
140 | /* Bitfields in QSPI_SMR (Scrambling Mode Register) */ | ||
141 | #define QSPI_SMR_SCREN BIT(0) | ||
142 | #define QSPI_SMR_RVDIS BIT(1) | ||
143 | |||
144 | /* Bitfields in QSPI_WPMR (Write Protection Mode Register) */ | ||
145 | #define QSPI_WPMR_WPEN BIT(0) | ||
146 | #define QSPI_WPMR_WPKEY_MASK GENMASK(31, 8) | ||
147 | #define QSPI_WPMR_WPKEY(wpkey) (((wpkey) << 8) & QSPI_WPMR_WPKEY_MASK) | ||
148 | |||
149 | /* Bitfields in QSPI_WPSR (Write Protection Status Register) */ | ||
150 | #define QSPI_WPSR_WPVS BIT(0) | ||
151 | #define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8) | ||
152 | #define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC) | ||
153 | |||
154 | |||
155 | struct atmel_qspi { | ||
156 | void __iomem *regs; | ||
157 | void __iomem *mem; | ||
158 | struct clk *clk; | ||
159 | struct platform_device *pdev; | ||
160 | u32 pending; | ||
161 | |||
162 | struct spi_nor nor; | ||
163 | u32 clk_rate; | ||
164 | struct completion cmd_completion; | ||
165 | }; | ||
166 | |||
167 | struct atmel_qspi_command { | ||
168 | union { | ||
169 | struct { | ||
170 | u32 instruction:1; | ||
171 | u32 address:3; | ||
172 | u32 mode:1; | ||
173 | u32 dummy:1; | ||
174 | u32 data:1; | ||
175 | u32 reserved:25; | ||
176 | } bits; | ||
177 | u32 word; | ||
178 | } enable; | ||
179 | u8 instruction; | ||
180 | u8 mode; | ||
181 | u8 num_mode_cycles; | ||
182 | u8 num_dummy_cycles; | ||
183 | u32 address; | ||
184 | |||
185 | size_t buf_len; | ||
186 | const void *tx_buf; | ||
187 | void *rx_buf; | ||
188 | }; | ||
189 | |||
190 | /* Register access functions */ | ||
191 | static inline u32 qspi_readl(struct atmel_qspi *aq, u32 reg) | ||
192 | { | ||
193 | return readl_relaxed(aq->regs + reg); | ||
194 | } | ||
195 | |||
196 | static inline void qspi_writel(struct atmel_qspi *aq, u32 reg, u32 value) | ||
197 | { | ||
198 | writel_relaxed(value, aq->regs + reg); | ||
199 | } | ||
200 | |||
201 | static int atmel_qspi_run_transfer(struct atmel_qspi *aq, | ||
202 | const struct atmel_qspi_command *cmd) | ||
203 | { | ||
204 | void __iomem *ahb_mem; | ||
205 | |||
206 | /* Then fallback to a PIO transfer (memcpy() DOES NOT work!) */ | ||
207 | ahb_mem = aq->mem; | ||
208 | if (cmd->enable.bits.address) | ||
209 | ahb_mem += cmd->address; | ||
210 | if (cmd->tx_buf) | ||
211 | _memcpy_toio(ahb_mem, cmd->tx_buf, cmd->buf_len); | ||
212 | else | ||
213 | _memcpy_fromio(cmd->rx_buf, ahb_mem, cmd->buf_len); | ||
214 | |||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | #ifdef DEBUG | ||
219 | static void atmel_qspi_debug_command(struct atmel_qspi *aq, | ||
220 | const struct atmel_qspi_command *cmd, | ||
221 | u32 ifr) | ||
222 | { | ||
223 | u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; | ||
224 | size_t len = 0; | ||
225 | int i; | ||
226 | |||
227 | if (cmd->enable.bits.instruction) | ||
228 | cmd_buf[len++] = cmd->instruction; | ||
229 | |||
230 | for (i = cmd->enable.bits.address-1; i >= 0; --i) | ||
231 | cmd_buf[len++] = (cmd->address >> (i << 3)) & 0xff; | ||
232 | |||
233 | if (cmd->enable.bits.mode) | ||
234 | cmd_buf[len++] = cmd->mode; | ||
235 | |||
236 | if (cmd->enable.bits.dummy) { | ||
237 | int num = cmd->num_dummy_cycles; | ||
238 | |||
239 | switch (ifr & QSPI_IFR_WIDTH_MASK) { | ||
240 | case QSPI_IFR_WIDTH_SINGLE_BIT_SPI: | ||
241 | case QSPI_IFR_WIDTH_DUAL_OUTPUT: | ||
242 | case QSPI_IFR_WIDTH_QUAD_OUTPUT: | ||
243 | num >>= 3; | ||
244 | break; | ||
245 | case QSPI_IFR_WIDTH_DUAL_IO: | ||
246 | case QSPI_IFR_WIDTH_DUAL_CMD: | ||
247 | num >>= 2; | ||
248 | break; | ||
249 | case QSPI_IFR_WIDTH_QUAD_IO: | ||
250 | case QSPI_IFR_WIDTH_QUAD_CMD: | ||
251 | num >>= 1; | ||
252 | break; | ||
253 | default: | ||
254 | return; | ||
255 | } | ||
256 | |||
257 | for (i = 0; i < num; ++i) | ||
258 | cmd_buf[len++] = 0; | ||
259 | } | ||
260 | |||
261 | /* Dump the SPI command */ | ||
262 | print_hex_dump(KERN_DEBUG, "qspi cmd: ", DUMP_PREFIX_NONE, | ||
263 | 32, 1, cmd_buf, len, false); | ||
264 | |||
265 | #ifdef VERBOSE_DEBUG | ||
266 | /* If verbose debug is enabled, also dump the TX data */ | ||
267 | if (cmd->enable.bits.data && cmd->tx_buf) | ||
268 | print_hex_dump(KERN_DEBUG, "qspi tx : ", DUMP_PREFIX_NONE, | ||
269 | 32, 1, cmd->tx_buf, cmd->buf_len, false); | ||
270 | #endif | ||
271 | } | ||
272 | #else | ||
273 | #define atmel_qspi_debug_command(aq, cmd, ifr) | ||
274 | #endif | ||
275 | |||
276 | static int atmel_qspi_run_command(struct atmel_qspi *aq, | ||
277 | const struct atmel_qspi_command *cmd, | ||
278 | u32 ifr_tfrtyp, u32 ifr_width) | ||
279 | { | ||
280 | u32 iar, icr, ifr, sr; | ||
281 | int err = 0; | ||
282 | |||
283 | iar = 0; | ||
284 | icr = 0; | ||
285 | ifr = ifr_tfrtyp | ifr_width; | ||
286 | |||
287 | /* Compute instruction parameters */ | ||
288 | if (cmd->enable.bits.instruction) { | ||
289 | icr |= QSPI_ICR_INST(cmd->instruction); | ||
290 | ifr |= QSPI_IFR_INSTEN; | ||
291 | } | ||
292 | |||
293 | /* Compute address parameters */ | ||
294 | switch (cmd->enable.bits.address) { | ||
295 | case 4: | ||
296 | ifr |= QSPI_IFR_ADDRL; | ||
297 | /* fall through to the 24bit (3 byte) address case. */ | ||
298 | case 3: | ||
299 | iar = (cmd->enable.bits.data) ? 0 : cmd->address; | ||
300 | ifr |= QSPI_IFR_ADDREN; | ||
301 | break; | ||
302 | case 0: | ||
303 | break; | ||
304 | default: | ||
305 | return -EINVAL; | ||
306 | } | ||
307 | |||
308 | /* Compute option parameters */ | ||
309 | if (cmd->enable.bits.mode && cmd->num_mode_cycles) { | ||
310 | u32 mode_cycle_bits, mode_bits; | ||
311 | |||
312 | icr |= QSPI_ICR_OPT(cmd->mode); | ||
313 | ifr |= QSPI_IFR_OPTEN; | ||
314 | |||
315 | switch (ifr & QSPI_IFR_WIDTH_MASK) { | ||
316 | case QSPI_IFR_WIDTH_SINGLE_BIT_SPI: | ||
317 | case QSPI_IFR_WIDTH_DUAL_OUTPUT: | ||
318 | case QSPI_IFR_WIDTH_QUAD_OUTPUT: | ||
319 | mode_cycle_bits = 1; | ||
320 | break; | ||
321 | case QSPI_IFR_WIDTH_DUAL_IO: | ||
322 | case QSPI_IFR_WIDTH_DUAL_CMD: | ||
323 | mode_cycle_bits = 2; | ||
324 | break; | ||
325 | case QSPI_IFR_WIDTH_QUAD_IO: | ||
326 | case QSPI_IFR_WIDTH_QUAD_CMD: | ||
327 | mode_cycle_bits = 4; | ||
328 | break; | ||
329 | default: | ||
330 | return -EINVAL; | ||
331 | } | ||
332 | |||
333 | mode_bits = cmd->num_mode_cycles * mode_cycle_bits; | ||
334 | switch (mode_bits) { | ||
335 | case 1: | ||
336 | ifr |= QSPI_IFR_OPTL_1BIT; | ||
337 | break; | ||
338 | |||
339 | case 2: | ||
340 | ifr |= QSPI_IFR_OPTL_2BIT; | ||
341 | break; | ||
342 | |||
343 | case 4: | ||
344 | ifr |= QSPI_IFR_OPTL_4BIT; | ||
345 | break; | ||
346 | |||
347 | case 8: | ||
348 | ifr |= QSPI_IFR_OPTL_8BIT; | ||
349 | break; | ||
350 | |||
351 | default: | ||
352 | return -EINVAL; | ||
353 | } | ||
354 | } | ||
355 | |||
356 | /* Set number of dummy cycles */ | ||
357 | if (cmd->enable.bits.dummy) | ||
358 | ifr |= QSPI_IFR_NBDUM(cmd->num_dummy_cycles); | ||
359 | |||
360 | /* Set data enable */ | ||
361 | if (cmd->enable.bits.data) { | ||
362 | ifr |= QSPI_IFR_DATAEN; | ||
363 | |||
364 | /* Special case for Continuous Read Mode */ | ||
365 | if (!cmd->tx_buf && !cmd->rx_buf) | ||
366 | ifr |= QSPI_IFR_CRM; | ||
367 | } | ||
368 | |||
369 | /* Clear pending interrupts */ | ||
370 | (void)qspi_readl(aq, QSPI_SR); | ||
371 | |||
372 | /* Set QSPI Instruction Frame registers */ | ||
373 | atmel_qspi_debug_command(aq, cmd, ifr); | ||
374 | qspi_writel(aq, QSPI_IAR, iar); | ||
375 | qspi_writel(aq, QSPI_ICR, icr); | ||
376 | qspi_writel(aq, QSPI_IFR, ifr); | ||
377 | |||
378 | /* Skip to the final steps if there is no data */ | ||
379 | if (!cmd->enable.bits.data) | ||
380 | goto no_data; | ||
381 | |||
382 | /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */ | ||
383 | (void)qspi_readl(aq, QSPI_IFR); | ||
384 | |||
385 | /* Stop here for continuous read */ | ||
386 | if (!cmd->tx_buf && !cmd->rx_buf) | ||
387 | return 0; | ||
388 | /* Send/Receive data */ | ||
389 | err = atmel_qspi_run_transfer(aq, cmd); | ||
390 | |||
391 | /* Release the chip-select */ | ||
392 | qspi_writel(aq, QSPI_CR, QSPI_CR_LASTXFER); | ||
393 | |||
394 | if (err) | ||
395 | return err; | ||
396 | |||
397 | #if defined(DEBUG) && defined(VERBOSE_DEBUG) | ||
398 | /* | ||
399 | * If verbose debug is enabled, also dump the RX data in addition to | ||
400 | * the SPI command previously dumped by atmel_qspi_debug_command() | ||
401 | */ | ||
402 | if (cmd->rx_buf) | ||
403 | print_hex_dump(KERN_DEBUG, "qspi rx : ", DUMP_PREFIX_NONE, | ||
404 | 32, 1, cmd->rx_buf, cmd->buf_len, false); | ||
405 | #endif | ||
406 | no_data: | ||
407 | /* Poll INSTRuction End status */ | ||
408 | sr = qspi_readl(aq, QSPI_SR); | ||
409 | if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED) | ||
410 | return err; | ||
411 | |||
412 | /* Wait for INSTRuction End interrupt */ | ||
413 | reinit_completion(&aq->cmd_completion); | ||
414 | aq->pending = sr & QSPI_SR_CMD_COMPLETED; | ||
415 | qspi_writel(aq, QSPI_IER, QSPI_SR_CMD_COMPLETED); | ||
416 | if (!wait_for_completion_timeout(&aq->cmd_completion, | ||
417 | msecs_to_jiffies(1000))) | ||
418 | err = -ETIMEDOUT; | ||
419 | qspi_writel(aq, QSPI_IDR, QSPI_SR_CMD_COMPLETED); | ||
420 | |||
421 | return err; | ||
422 | } | ||
423 | |||
424 | static int atmel_qspi_read_reg(struct spi_nor *nor, u8 opcode, | ||
425 | u8 *buf, int len) | ||
426 | { | ||
427 | struct atmel_qspi *aq = nor->priv; | ||
428 | struct atmel_qspi_command cmd; | ||
429 | |||
430 | memset(&cmd, 0, sizeof(cmd)); | ||
431 | cmd.enable.bits.instruction = 1; | ||
432 | cmd.enable.bits.data = 1; | ||
433 | cmd.instruction = opcode; | ||
434 | cmd.rx_buf = buf; | ||
435 | cmd.buf_len = len; | ||
436 | return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_READ, | ||
437 | QSPI_IFR_WIDTH_SINGLE_BIT_SPI); | ||
438 | } | ||
439 | |||
440 | static int atmel_qspi_write_reg(struct spi_nor *nor, u8 opcode, | ||
441 | u8 *buf, int len) | ||
442 | { | ||
443 | struct atmel_qspi *aq = nor->priv; | ||
444 | struct atmel_qspi_command cmd; | ||
445 | |||
446 | memset(&cmd, 0, sizeof(cmd)); | ||
447 | cmd.enable.bits.instruction = 1; | ||
448 | cmd.enable.bits.data = (buf != NULL && len > 0); | ||
449 | cmd.instruction = opcode; | ||
450 | cmd.tx_buf = buf; | ||
451 | cmd.buf_len = len; | ||
452 | return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE, | ||
453 | QSPI_IFR_WIDTH_SINGLE_BIT_SPI); | ||
454 | } | ||
455 | |||
456 | static ssize_t atmel_qspi_write(struct spi_nor *nor, loff_t to, size_t len, | ||
457 | const u_char *write_buf) | ||
458 | { | ||
459 | struct atmel_qspi *aq = nor->priv; | ||
460 | struct atmel_qspi_command cmd; | ||
461 | ssize_t ret; | ||
462 | |||
463 | memset(&cmd, 0, sizeof(cmd)); | ||
464 | cmd.enable.bits.instruction = 1; | ||
465 | cmd.enable.bits.address = nor->addr_width; | ||
466 | cmd.enable.bits.data = 1; | ||
467 | cmd.instruction = nor->program_opcode; | ||
468 | cmd.address = (u32)to; | ||
469 | cmd.tx_buf = write_buf; | ||
470 | cmd.buf_len = len; | ||
471 | ret = atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM, | ||
472 | QSPI_IFR_WIDTH_SINGLE_BIT_SPI); | ||
473 | return (ret < 0) ? ret : len; | ||
474 | } | ||
475 | |||
476 | static int atmel_qspi_erase(struct spi_nor *nor, loff_t offs) | ||
477 | { | ||
478 | struct atmel_qspi *aq = nor->priv; | ||
479 | struct atmel_qspi_command cmd; | ||
480 | |||
481 | memset(&cmd, 0, sizeof(cmd)); | ||
482 | cmd.enable.bits.instruction = 1; | ||
483 | cmd.enable.bits.address = nor->addr_width; | ||
484 | cmd.instruction = nor->erase_opcode; | ||
485 | cmd.address = (u32)offs; | ||
486 | return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE, | ||
487 | QSPI_IFR_WIDTH_SINGLE_BIT_SPI); | ||
488 | } | ||
489 | |||
490 | static ssize_t atmel_qspi_read(struct spi_nor *nor, loff_t from, size_t len, | ||
491 | u_char *read_buf) | ||
492 | { | ||
493 | struct atmel_qspi *aq = nor->priv; | ||
494 | struct atmel_qspi_command cmd; | ||
495 | u8 num_mode_cycles, num_dummy_cycles; | ||
496 | u32 ifr_width; | ||
497 | ssize_t ret; | ||
498 | |||
499 | switch (nor->flash_read) { | ||
500 | case SPI_NOR_NORMAL: | ||
501 | case SPI_NOR_FAST: | ||
502 | ifr_width = QSPI_IFR_WIDTH_SINGLE_BIT_SPI; | ||
503 | break; | ||
504 | |||
505 | case SPI_NOR_DUAL: | ||
506 | ifr_width = QSPI_IFR_WIDTH_DUAL_OUTPUT; | ||
507 | break; | ||
508 | |||
509 | case SPI_NOR_QUAD: | ||
510 | ifr_width = QSPI_IFR_WIDTH_QUAD_OUTPUT; | ||
511 | break; | ||
512 | |||
513 | default: | ||
514 | return -EINVAL; | ||
515 | } | ||
516 | |||
517 | if (nor->read_dummy >= 2) { | ||
518 | num_mode_cycles = 2; | ||
519 | num_dummy_cycles = nor->read_dummy - 2; | ||
520 | } else { | ||
521 | num_mode_cycles = nor->read_dummy; | ||
522 | num_dummy_cycles = 0; | ||
523 | } | ||
524 | |||
525 | memset(&cmd, 0, sizeof(cmd)); | ||
526 | cmd.enable.bits.instruction = 1; | ||
527 | cmd.enable.bits.address = nor->addr_width; | ||
528 | cmd.enable.bits.mode = (num_mode_cycles > 0); | ||
529 | cmd.enable.bits.dummy = (num_dummy_cycles > 0); | ||
530 | cmd.enable.bits.data = 1; | ||
531 | cmd.instruction = nor->read_opcode; | ||
532 | cmd.address = (u32)from; | ||
533 | cmd.mode = 0xff; /* This value prevents from entering the 0-4-4 mode */ | ||
534 | cmd.num_mode_cycles = num_mode_cycles; | ||
535 | cmd.num_dummy_cycles = num_dummy_cycles; | ||
536 | cmd.rx_buf = read_buf; | ||
537 | cmd.buf_len = len; | ||
538 | ret = atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_READ_MEM, | ||
539 | ifr_width); | ||
540 | return (ret < 0) ? ret : len; | ||
541 | } | ||
542 | |||
543 | static int atmel_qspi_init(struct atmel_qspi *aq) | ||
544 | { | ||
545 | unsigned long src_rate; | ||
546 | u32 mr, scr, scbr; | ||
547 | |||
548 | /* Reset the QSPI controller */ | ||
549 | qspi_writel(aq, QSPI_CR, QSPI_CR_SWRST); | ||
550 | |||
551 | /* Set the QSPI controller in Serial Memory Mode */ | ||
552 | mr = QSPI_MR_NBBITS(8) | QSPI_MR_SSM; | ||
553 | qspi_writel(aq, QSPI_MR, mr); | ||
554 | |||
555 | src_rate = clk_get_rate(aq->clk); | ||
556 | if (!src_rate) | ||
557 | return -EINVAL; | ||
558 | |||
559 | /* Compute the QSPI baudrate */ | ||
560 | scbr = DIV_ROUND_UP(src_rate, aq->clk_rate); | ||
561 | if (scbr > 0) | ||
562 | scbr--; | ||
563 | scr = QSPI_SCR_SCBR(scbr); | ||
564 | qspi_writel(aq, QSPI_SCR, scr); | ||
565 | |||
566 | /* Enable the QSPI controller */ | ||
567 | qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIEN); | ||
568 | |||
569 | return 0; | ||
570 | } | ||
571 | |||
572 | static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id) | ||
573 | { | ||
574 | struct atmel_qspi *aq = (struct atmel_qspi *)dev_id; | ||
575 | u32 status, mask, pending; | ||
576 | |||
577 | status = qspi_readl(aq, QSPI_SR); | ||
578 | mask = qspi_readl(aq, QSPI_IMR); | ||
579 | pending = status & mask; | ||
580 | |||
581 | if (!pending) | ||
582 | return IRQ_NONE; | ||
583 | |||
584 | aq->pending |= pending; | ||
585 | if ((aq->pending & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED) | ||
586 | complete(&aq->cmd_completion); | ||
587 | |||
588 | return IRQ_HANDLED; | ||
589 | } | ||
590 | |||
591 | static int atmel_qspi_probe(struct platform_device *pdev) | ||
592 | { | ||
593 | struct device_node *child, *np = pdev->dev.of_node; | ||
594 | struct atmel_qspi *aq; | ||
595 | struct resource *res; | ||
596 | struct spi_nor *nor; | ||
597 | struct mtd_info *mtd; | ||
598 | int irq, err = 0; | ||
599 | |||
600 | if (of_get_child_count(np) != 1) | ||
601 | return -ENODEV; | ||
602 | child = of_get_next_child(np, NULL); | ||
603 | |||
604 | aq = devm_kzalloc(&pdev->dev, sizeof(*aq), GFP_KERNEL); | ||
605 | if (!aq) { | ||
606 | err = -ENOMEM; | ||
607 | goto exit; | ||
608 | } | ||
609 | |||
610 | platform_set_drvdata(pdev, aq); | ||
611 | init_completion(&aq->cmd_completion); | ||
612 | aq->pdev = pdev; | ||
613 | |||
614 | /* Map the registers */ | ||
615 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base"); | ||
616 | aq->regs = devm_ioremap_resource(&pdev->dev, res); | ||
617 | if (IS_ERR(aq->regs)) { | ||
618 | dev_err(&pdev->dev, "missing registers\n"); | ||
619 | err = PTR_ERR(aq->regs); | ||
620 | goto exit; | ||
621 | } | ||
622 | |||
623 | /* Map the AHB memory */ | ||
624 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mmap"); | ||
625 | aq->mem = devm_ioremap_resource(&pdev->dev, res); | ||
626 | if (IS_ERR(aq->mem)) { | ||
627 | dev_err(&pdev->dev, "missing AHB memory\n"); | ||
628 | err = PTR_ERR(aq->mem); | ||
629 | goto exit; | ||
630 | } | ||
631 | |||
632 | /* Get the peripheral clock */ | ||
633 | aq->clk = devm_clk_get(&pdev->dev, NULL); | ||
634 | if (IS_ERR(aq->clk)) { | ||
635 | dev_err(&pdev->dev, "missing peripheral clock\n"); | ||
636 | err = PTR_ERR(aq->clk); | ||
637 | goto exit; | ||
638 | } | ||
639 | |||
640 | /* Enable the peripheral clock */ | ||
641 | err = clk_prepare_enable(aq->clk); | ||
642 | if (err) { | ||
643 | dev_err(&pdev->dev, "failed to enable the peripheral clock\n"); | ||
644 | goto exit; | ||
645 | } | ||
646 | |||
647 | /* Request the IRQ */ | ||
648 | irq = platform_get_irq(pdev, 0); | ||
649 | if (irq < 0) { | ||
650 | dev_err(&pdev->dev, "missing IRQ\n"); | ||
651 | err = irq; | ||
652 | goto disable_clk; | ||
653 | } | ||
654 | err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt, | ||
655 | 0, dev_name(&pdev->dev), aq); | ||
656 | if (err) | ||
657 | goto disable_clk; | ||
658 | |||
659 | /* Setup the spi-nor */ | ||
660 | nor = &aq->nor; | ||
661 | mtd = &nor->mtd; | ||
662 | |||
663 | nor->dev = &pdev->dev; | ||
664 | spi_nor_set_flash_node(nor, child); | ||
665 | nor->priv = aq; | ||
666 | mtd->priv = nor; | ||
667 | |||
668 | nor->read_reg = atmel_qspi_read_reg; | ||
669 | nor->write_reg = atmel_qspi_write_reg; | ||
670 | nor->read = atmel_qspi_read; | ||
671 | nor->write = atmel_qspi_write; | ||
672 | nor->erase = atmel_qspi_erase; | ||
673 | |||
674 | err = of_property_read_u32(child, "spi-max-frequency", &aq->clk_rate); | ||
675 | if (err < 0) | ||
676 | goto disable_clk; | ||
677 | |||
678 | err = atmel_qspi_init(aq); | ||
679 | if (err) | ||
680 | goto disable_clk; | ||
681 | |||
682 | err = spi_nor_scan(nor, NULL, SPI_NOR_QUAD); | ||
683 | if (err) | ||
684 | goto disable_clk; | ||
685 | |||
686 | err = mtd_device_register(mtd, NULL, 0); | ||
687 | if (err) | ||
688 | goto disable_clk; | ||
689 | |||
690 | of_node_put(child); | ||
691 | |||
692 | return 0; | ||
693 | |||
694 | disable_clk: | ||
695 | clk_disable_unprepare(aq->clk); | ||
696 | exit: | ||
697 | of_node_put(child); | ||
698 | |||
699 | return err; | ||
700 | } | ||
701 | |||
702 | static int atmel_qspi_remove(struct platform_device *pdev) | ||
703 | { | ||
704 | struct atmel_qspi *aq = platform_get_drvdata(pdev); | ||
705 | |||
706 | mtd_device_unregister(&aq->nor.mtd); | ||
707 | qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIDIS); | ||
708 | clk_disable_unprepare(aq->clk); | ||
709 | return 0; | ||
710 | } | ||
711 | |||
712 | |||
713 | static const struct of_device_id atmel_qspi_dt_ids[] = { | ||
714 | { .compatible = "atmel,sama5d2-qspi" }, | ||
715 | { /* sentinel */ } | ||
716 | }; | ||
717 | |||
718 | MODULE_DEVICE_TABLE(of, atmel_qspi_dt_ids); | ||
719 | |||
720 | static struct platform_driver atmel_qspi_driver = { | ||
721 | .driver = { | ||
722 | .name = "atmel_qspi", | ||
723 | .of_match_table = atmel_qspi_dt_ids, | ||
724 | }, | ||
725 | .probe = atmel_qspi_probe, | ||
726 | .remove = atmel_qspi_remove, | ||
727 | }; | ||
728 | module_platform_driver(atmel_qspi_driver); | ||
729 | |||
730 | MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@atmel.com>"); | ||
731 | MODULE_DESCRIPTION("Atmel QSPI Controller driver"); | ||
732 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c new file mode 100644 index 000000000000..d403ba7b8f43 --- /dev/null +++ b/drivers/mtd/spi-nor/cadence-quadspi.c | |||
@@ -0,0 +1,1299 @@ | |||
1 | /* | ||
2 | * Driver for Cadence QSPI Controller | ||
3 | * | ||
4 | * Copyright Altera Corporation (C) 2012-2014. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along with | ||
16 | * this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #include <linux/clk.h> | ||
19 | #include <linux/completion.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/err.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/io.h> | ||
25 | #include <linux/jiffies.h> | ||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/mtd/mtd.h> | ||
29 | #include <linux/mtd/partitions.h> | ||
30 | #include <linux/mtd/spi-nor.h> | ||
31 | #include <linux/of_device.h> | ||
32 | #include <linux/of.h> | ||
33 | #include <linux/platform_device.h> | ||
34 | #include <linux/sched.h> | ||
35 | #include <linux/spi/spi.h> | ||
36 | #include <linux/timer.h> | ||
37 | |||
38 | #define CQSPI_NAME "cadence-qspi" | ||
39 | #define CQSPI_MAX_CHIPSELECT 16 | ||
40 | |||
41 | struct cqspi_st; | ||
42 | |||
43 | struct cqspi_flash_pdata { | ||
44 | struct spi_nor nor; | ||
45 | struct cqspi_st *cqspi; | ||
46 | u32 clk_rate; | ||
47 | u32 read_delay; | ||
48 | u32 tshsl_ns; | ||
49 | u32 tsd2d_ns; | ||
50 | u32 tchsh_ns; | ||
51 | u32 tslch_ns; | ||
52 | u8 inst_width; | ||
53 | u8 addr_width; | ||
54 | u8 data_width; | ||
55 | u8 cs; | ||
56 | bool registered; | ||
57 | }; | ||
58 | |||
59 | struct cqspi_st { | ||
60 | struct platform_device *pdev; | ||
61 | |||
62 | struct clk *clk; | ||
63 | unsigned int sclk; | ||
64 | |||
65 | void __iomem *iobase; | ||
66 | void __iomem *ahb_base; | ||
67 | struct completion transfer_complete; | ||
68 | struct mutex bus_mutex; | ||
69 | |||
70 | int current_cs; | ||
71 | int current_page_size; | ||
72 | int current_erase_size; | ||
73 | int current_addr_width; | ||
74 | unsigned long master_ref_clk_hz; | ||
75 | bool is_decoded_cs; | ||
76 | u32 fifo_depth; | ||
77 | u32 fifo_width; | ||
78 | u32 trigger_address; | ||
79 | struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT]; | ||
80 | }; | ||
81 | |||
82 | /* Operation timeout value */ | ||
83 | #define CQSPI_TIMEOUT_MS 500 | ||
84 | #define CQSPI_READ_TIMEOUT_MS 10 | ||
85 | |||
86 | /* Instruction type */ | ||
87 | #define CQSPI_INST_TYPE_SINGLE 0 | ||
88 | #define CQSPI_INST_TYPE_DUAL 1 | ||
89 | #define CQSPI_INST_TYPE_QUAD 2 | ||
90 | |||
91 | #define CQSPI_DUMMY_CLKS_PER_BYTE 8 | ||
92 | #define CQSPI_DUMMY_BYTES_MAX 4 | ||
93 | #define CQSPI_DUMMY_CLKS_MAX 31 | ||
94 | |||
95 | #define CQSPI_STIG_DATA_LEN_MAX 8 | ||
96 | |||
97 | /* Register map */ | ||
98 | #define CQSPI_REG_CONFIG 0x00 | ||
99 | #define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0) | ||
100 | #define CQSPI_REG_CONFIG_DECODE_MASK BIT(9) | ||
101 | #define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10 | ||
102 | #define CQSPI_REG_CONFIG_DMA_MASK BIT(15) | ||
103 | #define CQSPI_REG_CONFIG_BAUD_LSB 19 | ||
104 | #define CQSPI_REG_CONFIG_IDLE_LSB 31 | ||
105 | #define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF | ||
106 | #define CQSPI_REG_CONFIG_BAUD_MASK 0xF | ||
107 | |||
108 | #define CQSPI_REG_RD_INSTR 0x04 | ||
109 | #define CQSPI_REG_RD_INSTR_OPCODE_LSB 0 | ||
110 | #define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8 | ||
111 | #define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12 | ||
112 | #define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16 | ||
113 | #define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20 | ||
114 | #define CQSPI_REG_RD_INSTR_DUMMY_LSB 24 | ||
115 | #define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3 | ||
116 | #define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3 | ||
117 | #define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3 | ||
118 | #define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F | ||
119 | |||
120 | #define CQSPI_REG_WR_INSTR 0x08 | ||
121 | #define CQSPI_REG_WR_INSTR_OPCODE_LSB 0 | ||
122 | #define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12 | ||
123 | #define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16 | ||
124 | |||
125 | #define CQSPI_REG_DELAY 0x0C | ||
126 | #define CQSPI_REG_DELAY_TSLCH_LSB 0 | ||
127 | #define CQSPI_REG_DELAY_TCHSH_LSB 8 | ||
128 | #define CQSPI_REG_DELAY_TSD2D_LSB 16 | ||
129 | #define CQSPI_REG_DELAY_TSHSL_LSB 24 | ||
130 | #define CQSPI_REG_DELAY_TSLCH_MASK 0xFF | ||
131 | #define CQSPI_REG_DELAY_TCHSH_MASK 0xFF | ||
132 | #define CQSPI_REG_DELAY_TSD2D_MASK 0xFF | ||
133 | #define CQSPI_REG_DELAY_TSHSL_MASK 0xFF | ||
134 | |||
135 | #define CQSPI_REG_READCAPTURE 0x10 | ||
136 | #define CQSPI_REG_READCAPTURE_BYPASS_LSB 0 | ||
137 | #define CQSPI_REG_READCAPTURE_DELAY_LSB 1 | ||
138 | #define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF | ||
139 | |||
140 | #define CQSPI_REG_SIZE 0x14 | ||
141 | #define CQSPI_REG_SIZE_ADDRESS_LSB 0 | ||
142 | #define CQSPI_REG_SIZE_PAGE_LSB 4 | ||
143 | #define CQSPI_REG_SIZE_BLOCK_LSB 16 | ||
144 | #define CQSPI_REG_SIZE_ADDRESS_MASK 0xF | ||
145 | #define CQSPI_REG_SIZE_PAGE_MASK 0xFFF | ||
146 | #define CQSPI_REG_SIZE_BLOCK_MASK 0x3F | ||
147 | |||
148 | #define CQSPI_REG_SRAMPARTITION 0x18 | ||
149 | #define CQSPI_REG_INDIRECTTRIGGER 0x1C | ||
150 | |||
151 | #define CQSPI_REG_DMA 0x20 | ||
152 | #define CQSPI_REG_DMA_SINGLE_LSB 0 | ||
153 | #define CQSPI_REG_DMA_BURST_LSB 8 | ||
154 | #define CQSPI_REG_DMA_SINGLE_MASK 0xFF | ||
155 | #define CQSPI_REG_DMA_BURST_MASK 0xFF | ||
156 | |||
157 | #define CQSPI_REG_REMAP 0x24 | ||
158 | #define CQSPI_REG_MODE_BIT 0x28 | ||
159 | |||
160 | #define CQSPI_REG_SDRAMLEVEL 0x2C | ||
161 | #define CQSPI_REG_SDRAMLEVEL_RD_LSB 0 | ||
162 | #define CQSPI_REG_SDRAMLEVEL_WR_LSB 16 | ||
163 | #define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF | ||
164 | #define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF | ||
165 | |||
166 | #define CQSPI_REG_IRQSTATUS 0x40 | ||
167 | #define CQSPI_REG_IRQMASK 0x44 | ||
168 | |||
169 | #define CQSPI_REG_INDIRECTRD 0x60 | ||
170 | #define CQSPI_REG_INDIRECTRD_START_MASK BIT(0) | ||
171 | #define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1) | ||
172 | #define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5) | ||
173 | |||
174 | #define CQSPI_REG_INDIRECTRDWATERMARK 0x64 | ||
175 | #define CQSPI_REG_INDIRECTRDSTARTADDR 0x68 | ||
176 | #define CQSPI_REG_INDIRECTRDBYTES 0x6C | ||
177 | |||
178 | #define CQSPI_REG_CMDCTRL 0x90 | ||
179 | #define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0) | ||
180 | #define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1) | ||
181 | #define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12 | ||
182 | #define CQSPI_REG_CMDCTRL_WR_EN_LSB 15 | ||
183 | #define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16 | ||
184 | #define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19 | ||
185 | #define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20 | ||
186 | #define CQSPI_REG_CMDCTRL_RD_EN_LSB 23 | ||
187 | #define CQSPI_REG_CMDCTRL_OPCODE_LSB 24 | ||
188 | #define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7 | ||
189 | #define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3 | ||
190 | #define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7 | ||
191 | |||
192 | #define CQSPI_REG_INDIRECTWR 0x70 | ||
193 | #define CQSPI_REG_INDIRECTWR_START_MASK BIT(0) | ||
194 | #define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1) | ||
195 | #define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5) | ||
196 | |||
197 | #define CQSPI_REG_INDIRECTWRWATERMARK 0x74 | ||
198 | #define CQSPI_REG_INDIRECTWRSTARTADDR 0x78 | ||
199 | #define CQSPI_REG_INDIRECTWRBYTES 0x7C | ||
200 | |||
201 | #define CQSPI_REG_CMDADDRESS 0x94 | ||
202 | #define CQSPI_REG_CMDREADDATALOWER 0xA0 | ||
203 | #define CQSPI_REG_CMDREADDATAUPPER 0xA4 | ||
204 | #define CQSPI_REG_CMDWRITEDATALOWER 0xA8 | ||
205 | #define CQSPI_REG_CMDWRITEDATAUPPER 0xAC | ||
206 | |||
207 | /* Interrupt status bits */ | ||
208 | #define CQSPI_REG_IRQ_MODE_ERR BIT(0) | ||
209 | #define CQSPI_REG_IRQ_UNDERFLOW BIT(1) | ||
210 | #define CQSPI_REG_IRQ_IND_COMP BIT(2) | ||
211 | #define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3) | ||
212 | #define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4) | ||
213 | #define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5) | ||
214 | #define CQSPI_REG_IRQ_WATERMARK BIT(6) | ||
215 | #define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12) | ||
216 | |||
217 | #define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \ | ||
218 | CQSPI_REG_IRQ_IND_SRAM_FULL | \ | ||
219 | CQSPI_REG_IRQ_IND_COMP) | ||
220 | |||
221 | #define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \ | ||
222 | CQSPI_REG_IRQ_WATERMARK | \ | ||
223 | CQSPI_REG_IRQ_UNDERFLOW) | ||
224 | |||
225 | #define CQSPI_IRQ_STATUS_MASK 0x1FFFF | ||
226 | |||
227 | static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clear) | ||
228 | { | ||
229 | unsigned long end = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS); | ||
230 | u32 val; | ||
231 | |||
232 | while (1) { | ||
233 | val = readl(reg); | ||
234 | if (clear) | ||
235 | val = ~val; | ||
236 | val &= mask; | ||
237 | |||
238 | if (val == mask) | ||
239 | return 0; | ||
240 | |||
241 | if (time_after(jiffies, end)) | ||
242 | return -ETIMEDOUT; | ||
243 | } | ||
244 | } | ||
245 | |||
246 | static bool cqspi_is_idle(struct cqspi_st *cqspi) | ||
247 | { | ||
248 | u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); | ||
249 | |||
250 | return reg & (1 << CQSPI_REG_CONFIG_IDLE_LSB); | ||
251 | } | ||
252 | |||
253 | static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi) | ||
254 | { | ||
255 | u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL); | ||
256 | |||
257 | reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB; | ||
258 | return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK; | ||
259 | } | ||
260 | |||
261 | static irqreturn_t cqspi_irq_handler(int this_irq, void *dev) | ||
262 | { | ||
263 | struct cqspi_st *cqspi = dev; | ||
264 | unsigned int irq_status; | ||
265 | |||
266 | /* Read interrupt status */ | ||
267 | irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS); | ||
268 | |||
269 | /* Clear interrupt */ | ||
270 | writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS); | ||
271 | |||
272 | irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR; | ||
273 | |||
274 | if (irq_status) | ||
275 | complete(&cqspi->transfer_complete); | ||
276 | |||
277 | return IRQ_HANDLED; | ||
278 | } | ||
279 | |||
280 | static unsigned int cqspi_calc_rdreg(struct spi_nor *nor, const u8 opcode) | ||
281 | { | ||
282 | struct cqspi_flash_pdata *f_pdata = nor->priv; | ||
283 | u32 rdreg = 0; | ||
284 | |||
285 | rdreg |= f_pdata->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB; | ||
286 | rdreg |= f_pdata->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB; | ||
287 | rdreg |= f_pdata->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB; | ||
288 | |||
289 | return rdreg; | ||
290 | } | ||
291 | |||
292 | static int cqspi_wait_idle(struct cqspi_st *cqspi) | ||
293 | { | ||
294 | const unsigned int poll_idle_retry = 3; | ||
295 | unsigned int count = 0; | ||
296 | unsigned long timeout; | ||
297 | |||
298 | timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS); | ||
299 | while (1) { | ||
300 | /* | ||
301 | * Read few times in succession to ensure the controller | ||
302 | * is indeed idle, that is, the bit does not transition | ||
303 | * low again. | ||
304 | */ | ||
305 | if (cqspi_is_idle(cqspi)) | ||
306 | count++; | ||
307 | else | ||
308 | count = 0; | ||
309 | |||
310 | if (count >= poll_idle_retry) | ||
311 | return 0; | ||
312 | |||
313 | if (time_after(jiffies, timeout)) { | ||
314 | /* Timeout, in busy mode. */ | ||
315 | dev_err(&cqspi->pdev->dev, | ||
316 | "QSPI is still busy after %dms timeout.\n", | ||
317 | CQSPI_TIMEOUT_MS); | ||
318 | return -ETIMEDOUT; | ||
319 | } | ||
320 | |||
321 | cpu_relax(); | ||
322 | } | ||
323 | } | ||
324 | |||
325 | static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg) | ||
326 | { | ||
327 | void __iomem *reg_base = cqspi->iobase; | ||
328 | int ret; | ||
329 | |||
330 | /* Write the CMDCTRL without start execution. */ | ||
331 | writel(reg, reg_base + CQSPI_REG_CMDCTRL); | ||
332 | /* Start execute */ | ||
333 | reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK; | ||
334 | writel(reg, reg_base + CQSPI_REG_CMDCTRL); | ||
335 | |||
336 | /* Polling for completion. */ | ||
337 | ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL, | ||
338 | CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1); | ||
339 | if (ret) { | ||
340 | dev_err(&cqspi->pdev->dev, | ||
341 | "Flash command execution timed out.\n"); | ||
342 | return ret; | ||
343 | } | ||
344 | |||
345 | /* Polling QSPI idle status. */ | ||
346 | return cqspi_wait_idle(cqspi); | ||
347 | } | ||
348 | |||
349 | static int cqspi_command_read(struct spi_nor *nor, | ||
350 | const u8 *txbuf, const unsigned n_tx, | ||
351 | u8 *rxbuf, const unsigned n_rx) | ||
352 | { | ||
353 | struct cqspi_flash_pdata *f_pdata = nor->priv; | ||
354 | struct cqspi_st *cqspi = f_pdata->cqspi; | ||
355 | void __iomem *reg_base = cqspi->iobase; | ||
356 | unsigned int rdreg; | ||
357 | unsigned int reg; | ||
358 | unsigned int read_len; | ||
359 | int status; | ||
360 | |||
361 | if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) { | ||
362 | dev_err(nor->dev, "Invalid input argument, len %d rxbuf 0x%p\n", | ||
363 | n_rx, rxbuf); | ||
364 | return -EINVAL; | ||
365 | } | ||
366 | |||
367 | reg = txbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB; | ||
368 | |||
369 | rdreg = cqspi_calc_rdreg(nor, txbuf[0]); | ||
370 | writel(rdreg, reg_base + CQSPI_REG_RD_INSTR); | ||
371 | |||
372 | reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB); | ||
373 | |||
374 | /* 0 means 1 byte. */ | ||
375 | reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK) | ||
376 | << CQSPI_REG_CMDCTRL_RD_BYTES_LSB); | ||
377 | status = cqspi_exec_flash_cmd(cqspi, reg); | ||
378 | if (status) | ||
379 | return status; | ||
380 | |||
381 | reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER); | ||
382 | |||
383 | /* Put the read value into rx_buf */ | ||
384 | read_len = (n_rx > 4) ? 4 : n_rx; | ||
385 | memcpy(rxbuf, ®, read_len); | ||
386 | rxbuf += read_len; | ||
387 | |||
388 | if (n_rx > 4) { | ||
389 | reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER); | ||
390 | |||
391 | read_len = n_rx - read_len; | ||
392 | memcpy(rxbuf, ®, read_len); | ||
393 | } | ||
394 | |||
395 | return 0; | ||
396 | } | ||
397 | |||
398 | static int cqspi_command_write(struct spi_nor *nor, const u8 opcode, | ||
399 | const u8 *txbuf, const unsigned n_tx) | ||
400 | { | ||
401 | struct cqspi_flash_pdata *f_pdata = nor->priv; | ||
402 | struct cqspi_st *cqspi = f_pdata->cqspi; | ||
403 | void __iomem *reg_base = cqspi->iobase; | ||
404 | unsigned int reg; | ||
405 | unsigned int data; | ||
406 | int ret; | ||
407 | |||
408 | if (n_tx > 4 || (n_tx && !txbuf)) { | ||
409 | dev_err(nor->dev, | ||
410 | "Invalid input argument, cmdlen %d txbuf 0x%p\n", | ||
411 | n_tx, txbuf); | ||
412 | return -EINVAL; | ||
413 | } | ||
414 | |||
415 | reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; | ||
416 | if (n_tx) { | ||
417 | reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB); | ||
418 | reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK) | ||
419 | << CQSPI_REG_CMDCTRL_WR_BYTES_LSB; | ||
420 | data = 0; | ||
421 | memcpy(&data, txbuf, n_tx); | ||
422 | writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER); | ||
423 | } | ||
424 | |||
425 | ret = cqspi_exec_flash_cmd(cqspi, reg); | ||
426 | return ret; | ||
427 | } | ||
428 | |||
429 | static int cqspi_command_write_addr(struct spi_nor *nor, | ||
430 | const u8 opcode, const unsigned int addr) | ||
431 | { | ||
432 | struct cqspi_flash_pdata *f_pdata = nor->priv; | ||
433 | struct cqspi_st *cqspi = f_pdata->cqspi; | ||
434 | void __iomem *reg_base = cqspi->iobase; | ||
435 | unsigned int reg; | ||
436 | |||
437 | reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; | ||
438 | reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB); | ||
439 | reg |= ((nor->addr_width - 1) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) | ||
440 | << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB; | ||
441 | |||
442 | writel(addr, reg_base + CQSPI_REG_CMDADDRESS); | ||
443 | |||
444 | return cqspi_exec_flash_cmd(cqspi, reg); | ||
445 | } | ||
446 | |||
447 | static int cqspi_indirect_read_setup(struct spi_nor *nor, | ||
448 | const unsigned int from_addr) | ||
449 | { | ||
450 | struct cqspi_flash_pdata *f_pdata = nor->priv; | ||
451 | struct cqspi_st *cqspi = f_pdata->cqspi; | ||
452 | void __iomem *reg_base = cqspi->iobase; | ||
453 | unsigned int dummy_clk = 0; | ||
454 | unsigned int reg; | ||
455 | |||
456 | writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); | ||
457 | |||
458 | reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB; | ||
459 | reg |= cqspi_calc_rdreg(nor, nor->read_opcode); | ||
460 | |||
461 | /* Setup dummy clock cycles */ | ||
462 | dummy_clk = nor->read_dummy; | ||
463 | if (dummy_clk > CQSPI_DUMMY_CLKS_MAX) | ||
464 | dummy_clk = CQSPI_DUMMY_CLKS_MAX; | ||
465 | |||
466 | if (dummy_clk / 8) { | ||
467 | reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB); | ||
468 | /* Set mode bits high to ensure chip doesn't enter XIP */ | ||
469 | writel(0xFF, reg_base + CQSPI_REG_MODE_BIT); | ||
470 | |||
471 | /* Need to subtract the mode byte (8 clocks). */ | ||
472 | if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD) | ||
473 | dummy_clk -= 8; | ||
474 | |||
475 | if (dummy_clk) | ||
476 | reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK) | ||
477 | << CQSPI_REG_RD_INSTR_DUMMY_LSB; | ||
478 | } | ||
479 | |||
480 | writel(reg, reg_base + CQSPI_REG_RD_INSTR); | ||
481 | |||
482 | /* Set address width */ | ||
483 | reg = readl(reg_base + CQSPI_REG_SIZE); | ||
484 | reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; | ||
485 | reg |= (nor->addr_width - 1); | ||
486 | writel(reg, reg_base + CQSPI_REG_SIZE); | ||
487 | return 0; | ||
488 | } | ||
489 | |||
490 | static int cqspi_indirect_read_execute(struct spi_nor *nor, | ||
491 | u8 *rxbuf, const unsigned n_rx) | ||
492 | { | ||
493 | struct cqspi_flash_pdata *f_pdata = nor->priv; | ||
494 | struct cqspi_st *cqspi = f_pdata->cqspi; | ||
495 | void __iomem *reg_base = cqspi->iobase; | ||
496 | void __iomem *ahb_base = cqspi->ahb_base; | ||
497 | unsigned int remaining = n_rx; | ||
498 | unsigned int bytes_to_read = 0; | ||
499 | int ret = 0; | ||
500 | |||
501 | writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES); | ||
502 | |||
503 | /* Clear all interrupts. */ | ||
504 | writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); | ||
505 | |||
506 | writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK); | ||
507 | |||
508 | reinit_completion(&cqspi->transfer_complete); | ||
509 | writel(CQSPI_REG_INDIRECTRD_START_MASK, | ||
510 | reg_base + CQSPI_REG_INDIRECTRD); | ||
511 | |||
512 | while (remaining > 0) { | ||
513 | ret = wait_for_completion_timeout(&cqspi->transfer_complete, | ||
514 | msecs_to_jiffies | ||
515 | (CQSPI_READ_TIMEOUT_MS)); | ||
516 | |||
517 | bytes_to_read = cqspi_get_rd_sram_level(cqspi); | ||
518 | |||
519 | if (!ret && bytes_to_read == 0) { | ||
520 | dev_err(nor->dev, "Indirect read timeout, no bytes\n"); | ||
521 | ret = -ETIMEDOUT; | ||
522 | goto failrd; | ||
523 | } | ||
524 | |||
525 | while (bytes_to_read != 0) { | ||
526 | bytes_to_read *= cqspi->fifo_width; | ||
527 | bytes_to_read = bytes_to_read > remaining ? | ||
528 | remaining : bytes_to_read; | ||
529 | readsl(ahb_base, rxbuf, DIV_ROUND_UP(bytes_to_read, 4)); | ||
530 | rxbuf += bytes_to_read; | ||
531 | remaining -= bytes_to_read; | ||
532 | bytes_to_read = cqspi_get_rd_sram_level(cqspi); | ||
533 | } | ||
534 | |||
535 | if (remaining > 0) | ||
536 | reinit_completion(&cqspi->transfer_complete); | ||
537 | } | ||
538 | |||
539 | /* Check indirect done status */ | ||
540 | ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD, | ||
541 | CQSPI_REG_INDIRECTRD_DONE_MASK, 0); | ||
542 | if (ret) { | ||
543 | dev_err(nor->dev, | ||
544 | "Indirect read completion error (%i)\n", ret); | ||
545 | goto failrd; | ||
546 | } | ||
547 | |||
548 | /* Disable interrupt */ | ||
549 | writel(0, reg_base + CQSPI_REG_IRQMASK); | ||
550 | |||
551 | /* Clear indirect completion status */ | ||
552 | writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD); | ||
553 | |||
554 | return 0; | ||
555 | |||
556 | failrd: | ||
557 | /* Disable interrupt */ | ||
558 | writel(0, reg_base + CQSPI_REG_IRQMASK); | ||
559 | |||
560 | /* Cancel the indirect read */ | ||
561 | writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK, | ||
562 | reg_base + CQSPI_REG_INDIRECTRD); | ||
563 | return ret; | ||
564 | } | ||
565 | |||
566 | static int cqspi_indirect_write_setup(struct spi_nor *nor, | ||
567 | const unsigned int to_addr) | ||
568 | { | ||
569 | unsigned int reg; | ||
570 | struct cqspi_flash_pdata *f_pdata = nor->priv; | ||
571 | struct cqspi_st *cqspi = f_pdata->cqspi; | ||
572 | void __iomem *reg_base = cqspi->iobase; | ||
573 | |||
574 | /* Set opcode. */ | ||
575 | reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB; | ||
576 | writel(reg, reg_base + CQSPI_REG_WR_INSTR); | ||
577 | reg = cqspi_calc_rdreg(nor, nor->program_opcode); | ||
578 | writel(reg, reg_base + CQSPI_REG_RD_INSTR); | ||
579 | |||
580 | writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR); | ||
581 | |||
582 | reg = readl(reg_base + CQSPI_REG_SIZE); | ||
583 | reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; | ||
584 | reg |= (nor->addr_width - 1); | ||
585 | writel(reg, reg_base + CQSPI_REG_SIZE); | ||
586 | return 0; | ||
587 | } | ||
588 | |||
589 | static int cqspi_indirect_write_execute(struct spi_nor *nor, | ||
590 | const u8 *txbuf, const unsigned n_tx) | ||
591 | { | ||
592 | const unsigned int page_size = nor->page_size; | ||
593 | struct cqspi_flash_pdata *f_pdata = nor->priv; | ||
594 | struct cqspi_st *cqspi = f_pdata->cqspi; | ||
595 | void __iomem *reg_base = cqspi->iobase; | ||
596 | unsigned int remaining = n_tx; | ||
597 | unsigned int write_bytes; | ||
598 | int ret; | ||
599 | |||
600 | writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES); | ||
601 | |||
602 | /* Clear all interrupts. */ | ||
603 | writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); | ||
604 | |||
605 | writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK); | ||
606 | |||
607 | reinit_completion(&cqspi->transfer_complete); | ||
608 | writel(CQSPI_REG_INDIRECTWR_START_MASK, | ||
609 | reg_base + CQSPI_REG_INDIRECTWR); | ||
610 | |||
611 | while (remaining > 0) { | ||
612 | write_bytes = remaining > page_size ? page_size : remaining; | ||
613 | writesl(cqspi->ahb_base, txbuf, DIV_ROUND_UP(write_bytes, 4)); | ||
614 | |||
615 | ret = wait_for_completion_timeout(&cqspi->transfer_complete, | ||
616 | msecs_to_jiffies | ||
617 | (CQSPI_TIMEOUT_MS)); | ||
618 | if (!ret) { | ||
619 | dev_err(nor->dev, "Indirect write timeout\n"); | ||
620 | ret = -ETIMEDOUT; | ||
621 | goto failwr; | ||
622 | } | ||
623 | |||
624 | txbuf += write_bytes; | ||
625 | remaining -= write_bytes; | ||
626 | |||
627 | if (remaining > 0) | ||
628 | reinit_completion(&cqspi->transfer_complete); | ||
629 | } | ||
630 | |||
631 | /* Check indirect done status */ | ||
632 | ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR, | ||
633 | CQSPI_REG_INDIRECTWR_DONE_MASK, 0); | ||
634 | if (ret) { | ||
635 | dev_err(nor->dev, | ||
636 | "Indirect write completion error (%i)\n", ret); | ||
637 | goto failwr; | ||
638 | } | ||
639 | |||
640 | /* Disable interrupt. */ | ||
641 | writel(0, reg_base + CQSPI_REG_IRQMASK); | ||
642 | |||
643 | /* Clear indirect completion status */ | ||
644 | writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR); | ||
645 | |||
646 | cqspi_wait_idle(cqspi); | ||
647 | |||
648 | return 0; | ||
649 | |||
650 | failwr: | ||
651 | /* Disable interrupt. */ | ||
652 | writel(0, reg_base + CQSPI_REG_IRQMASK); | ||
653 | |||
654 | /* Cancel the indirect write */ | ||
655 | writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK, | ||
656 | reg_base + CQSPI_REG_INDIRECTWR); | ||
657 | return ret; | ||
658 | } | ||
659 | |||
660 | static void cqspi_chipselect(struct spi_nor *nor) | ||
661 | { | ||
662 | struct cqspi_flash_pdata *f_pdata = nor->priv; | ||
663 | struct cqspi_st *cqspi = f_pdata->cqspi; | ||
664 | void __iomem *reg_base = cqspi->iobase; | ||
665 | unsigned int chip_select = f_pdata->cs; | ||
666 | unsigned int reg; | ||
667 | |||
668 | reg = readl(reg_base + CQSPI_REG_CONFIG); | ||
669 | if (cqspi->is_decoded_cs) { | ||
670 | reg |= CQSPI_REG_CONFIG_DECODE_MASK; | ||
671 | } else { | ||
672 | reg &= ~CQSPI_REG_CONFIG_DECODE_MASK; | ||
673 | |||
674 | /* Convert CS if without decoder. | ||
675 | * CS0 to 4b'1110 | ||
676 | * CS1 to 4b'1101 | ||
677 | * CS2 to 4b'1011 | ||
678 | * CS3 to 4b'0111 | ||
679 | */ | ||
680 | chip_select = 0xF & ~(1 << chip_select); | ||
681 | } | ||
682 | |||
683 | reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK | ||
684 | << CQSPI_REG_CONFIG_CHIPSELECT_LSB); | ||
685 | reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK) | ||
686 | << CQSPI_REG_CONFIG_CHIPSELECT_LSB; | ||
687 | writel(reg, reg_base + CQSPI_REG_CONFIG); | ||
688 | } | ||
689 | |||
690 | static void cqspi_configure_cs_and_sizes(struct spi_nor *nor) | ||
691 | { | ||
692 | struct cqspi_flash_pdata *f_pdata = nor->priv; | ||
693 | struct cqspi_st *cqspi = f_pdata->cqspi; | ||
694 | void __iomem *iobase = cqspi->iobase; | ||
695 | unsigned int reg; | ||
696 | |||
697 | /* configure page size and block size. */ | ||
698 | reg = readl(iobase + CQSPI_REG_SIZE); | ||
699 | reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB); | ||
700 | reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB); | ||
701 | reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; | ||
702 | reg |= (nor->page_size << CQSPI_REG_SIZE_PAGE_LSB); | ||
703 | reg |= (ilog2(nor->mtd.erasesize) << CQSPI_REG_SIZE_BLOCK_LSB); | ||
704 | reg |= (nor->addr_width - 1); | ||
705 | writel(reg, iobase + CQSPI_REG_SIZE); | ||
706 | |||
707 | /* configure the chip select */ | ||
708 | cqspi_chipselect(nor); | ||
709 | |||
710 | /* Store the new configuration of the controller */ | ||
711 | cqspi->current_page_size = nor->page_size; | ||
712 | cqspi->current_erase_size = nor->mtd.erasesize; | ||
713 | cqspi->current_addr_width = nor->addr_width; | ||
714 | } | ||
715 | |||
716 | static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz, | ||
717 | const unsigned int ns_val) | ||
718 | { | ||
719 | unsigned int ticks; | ||
720 | |||
721 | ticks = ref_clk_hz / 1000; /* kHz */ | ||
722 | ticks = DIV_ROUND_UP(ticks * ns_val, 1000000); | ||
723 | |||
724 | return ticks; | ||
725 | } | ||
726 | |||
727 | static void cqspi_delay(struct spi_nor *nor) | ||
728 | { | ||
729 | struct cqspi_flash_pdata *f_pdata = nor->priv; | ||
730 | struct cqspi_st *cqspi = f_pdata->cqspi; | ||
731 | void __iomem *iobase = cqspi->iobase; | ||
732 | const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz; | ||
733 | unsigned int tshsl, tchsh, tslch, tsd2d; | ||
734 | unsigned int reg; | ||
735 | unsigned int tsclk; | ||
736 | |||
737 | /* calculate the number of ref ticks for one sclk tick */ | ||
738 | tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk); | ||
739 | |||
740 | tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns); | ||
741 | /* this particular value must be at least one sclk */ | ||
742 | if (tshsl < tsclk) | ||
743 | tshsl = tsclk; | ||
744 | |||
745 | tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns); | ||
746 | tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns); | ||
747 | tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns); | ||
748 | |||
749 | reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK) | ||
750 | << CQSPI_REG_DELAY_TSHSL_LSB; | ||
751 | reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK) | ||
752 | << CQSPI_REG_DELAY_TCHSH_LSB; | ||
753 | reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK) | ||
754 | << CQSPI_REG_DELAY_TSLCH_LSB; | ||
755 | reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK) | ||
756 | << CQSPI_REG_DELAY_TSD2D_LSB; | ||
757 | writel(reg, iobase + CQSPI_REG_DELAY); | ||
758 | } | ||
759 | |||
760 | static void cqspi_config_baudrate_div(struct cqspi_st *cqspi) | ||
761 | { | ||
762 | const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz; | ||
763 | void __iomem *reg_base = cqspi->iobase; | ||
764 | u32 reg, div; | ||
765 | |||
766 | /* Recalculate the baudrate divisor based on QSPI specification. */ | ||
767 | div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1; | ||
768 | |||
769 | reg = readl(reg_base + CQSPI_REG_CONFIG); | ||
770 | reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB); | ||
771 | reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB; | ||
772 | writel(reg, reg_base + CQSPI_REG_CONFIG); | ||
773 | } | ||
774 | |||
775 | static void cqspi_readdata_capture(struct cqspi_st *cqspi, | ||
776 | const unsigned int bypass, | ||
777 | const unsigned int delay) | ||
778 | { | ||
779 | void __iomem *reg_base = cqspi->iobase; | ||
780 | unsigned int reg; | ||
781 | |||
782 | reg = readl(reg_base + CQSPI_REG_READCAPTURE); | ||
783 | |||
784 | if (bypass) | ||
785 | reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB); | ||
786 | else | ||
787 | reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB); | ||
788 | |||
789 | reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK | ||
790 | << CQSPI_REG_READCAPTURE_DELAY_LSB); | ||
791 | |||
792 | reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK) | ||
793 | << CQSPI_REG_READCAPTURE_DELAY_LSB; | ||
794 | |||
795 | writel(reg, reg_base + CQSPI_REG_READCAPTURE); | ||
796 | } | ||
797 | |||
798 | static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable) | ||
799 | { | ||
800 | void __iomem *reg_base = cqspi->iobase; | ||
801 | unsigned int reg; | ||
802 | |||
803 | reg = readl(reg_base + CQSPI_REG_CONFIG); | ||
804 | |||
805 | if (enable) | ||
806 | reg |= CQSPI_REG_CONFIG_ENABLE_MASK; | ||
807 | else | ||
808 | reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK; | ||
809 | |||
810 | writel(reg, reg_base + CQSPI_REG_CONFIG); | ||
811 | } | ||
812 | |||
813 | static void cqspi_configure(struct spi_nor *nor) | ||
814 | { | ||
815 | struct cqspi_flash_pdata *f_pdata = nor->priv; | ||
816 | struct cqspi_st *cqspi = f_pdata->cqspi; | ||
817 | const unsigned int sclk = f_pdata->clk_rate; | ||
818 | int switch_cs = (cqspi->current_cs != f_pdata->cs); | ||
819 | int switch_ck = (cqspi->sclk != sclk); | ||
820 | |||
821 | if ((cqspi->current_page_size != nor->page_size) || | ||
822 | (cqspi->current_erase_size != nor->mtd.erasesize) || | ||
823 | (cqspi->current_addr_width != nor->addr_width)) | ||
824 | switch_cs = 1; | ||
825 | |||
826 | if (switch_cs || switch_ck) | ||
827 | cqspi_controller_enable(cqspi, 0); | ||
828 | |||
829 | /* Switch chip select. */ | ||
830 | if (switch_cs) { | ||
831 | cqspi->current_cs = f_pdata->cs; | ||
832 | cqspi_configure_cs_and_sizes(nor); | ||
833 | } | ||
834 | |||
835 | /* Setup baudrate divisor and delays */ | ||
836 | if (switch_ck) { | ||
837 | cqspi->sclk = sclk; | ||
838 | cqspi_config_baudrate_div(cqspi); | ||
839 | cqspi_delay(nor); | ||
840 | cqspi_readdata_capture(cqspi, 1, f_pdata->read_delay); | ||
841 | } | ||
842 | |||
843 | if (switch_cs || switch_ck) | ||
844 | cqspi_controller_enable(cqspi, 1); | ||
845 | } | ||
846 | |||
847 | static int cqspi_set_protocol(struct spi_nor *nor, const int read) | ||
848 | { | ||
849 | struct cqspi_flash_pdata *f_pdata = nor->priv; | ||
850 | |||
851 | f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE; | ||
852 | f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE; | ||
853 | f_pdata->data_width = CQSPI_INST_TYPE_SINGLE; | ||
854 | |||
855 | if (read) { | ||
856 | switch (nor->flash_read) { | ||
857 | case SPI_NOR_NORMAL: | ||
858 | case SPI_NOR_FAST: | ||
859 | f_pdata->data_width = CQSPI_INST_TYPE_SINGLE; | ||
860 | break; | ||
861 | case SPI_NOR_DUAL: | ||
862 | f_pdata->data_width = CQSPI_INST_TYPE_DUAL; | ||
863 | break; | ||
864 | case SPI_NOR_QUAD: | ||
865 | f_pdata->data_width = CQSPI_INST_TYPE_QUAD; | ||
866 | break; | ||
867 | default: | ||
868 | return -EINVAL; | ||
869 | } | ||
870 | } | ||
871 | |||
872 | cqspi_configure(nor); | ||
873 | |||
874 | return 0; | ||
875 | } | ||
876 | |||
877 | static ssize_t cqspi_write(struct spi_nor *nor, loff_t to, | ||
878 | size_t len, const u_char *buf) | ||
879 | { | ||
880 | int ret; | ||
881 | |||
882 | ret = cqspi_set_protocol(nor, 0); | ||
883 | if (ret) | ||
884 | return ret; | ||
885 | |||
886 | ret = cqspi_indirect_write_setup(nor, to); | ||
887 | if (ret) | ||
888 | return ret; | ||
889 | |||
890 | ret = cqspi_indirect_write_execute(nor, buf, len); | ||
891 | if (ret) | ||
892 | return ret; | ||
893 | |||
894 | return (ret < 0) ? ret : len; | ||
895 | } | ||
896 | |||
897 | static ssize_t cqspi_read(struct spi_nor *nor, loff_t from, | ||
898 | size_t len, u_char *buf) | ||
899 | { | ||
900 | int ret; | ||
901 | |||
902 | ret = cqspi_set_protocol(nor, 1); | ||
903 | if (ret) | ||
904 | return ret; | ||
905 | |||
906 | ret = cqspi_indirect_read_setup(nor, from); | ||
907 | if (ret) | ||
908 | return ret; | ||
909 | |||
910 | ret = cqspi_indirect_read_execute(nor, buf, len); | ||
911 | if (ret) | ||
912 | return ret; | ||
913 | |||
914 | return (ret < 0) ? ret : len; | ||
915 | } | ||
916 | |||
917 | static int cqspi_erase(struct spi_nor *nor, loff_t offs) | ||
918 | { | ||
919 | int ret; | ||
920 | |||
921 | ret = cqspi_set_protocol(nor, 0); | ||
922 | if (ret) | ||
923 | return ret; | ||
924 | |||
925 | /* Send write enable, then erase commands. */ | ||
926 | ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0); | ||
927 | if (ret) | ||
928 | return ret; | ||
929 | |||
930 | /* Set up command buffer. */ | ||
931 | ret = cqspi_command_write_addr(nor, nor->erase_opcode, offs); | ||
932 | if (ret) | ||
933 | return ret; | ||
934 | |||
935 | return 0; | ||
936 | } | ||
937 | |||
938 | static int cqspi_prep(struct spi_nor *nor, enum spi_nor_ops ops) | ||
939 | { | ||
940 | struct cqspi_flash_pdata *f_pdata = nor->priv; | ||
941 | struct cqspi_st *cqspi = f_pdata->cqspi; | ||
942 | |||
943 | mutex_lock(&cqspi->bus_mutex); | ||
944 | |||
945 | return 0; | ||
946 | } | ||
947 | |||
948 | static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops) | ||
949 | { | ||
950 | struct cqspi_flash_pdata *f_pdata = nor->priv; | ||
951 | struct cqspi_st *cqspi = f_pdata->cqspi; | ||
952 | |||
953 | mutex_unlock(&cqspi->bus_mutex); | ||
954 | } | ||
955 | |||
956 | static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) | ||
957 | { | ||
958 | int ret; | ||
959 | |||
960 | ret = cqspi_set_protocol(nor, 0); | ||
961 | if (!ret) | ||
962 | ret = cqspi_command_read(nor, &opcode, 1, buf, len); | ||
963 | |||
964 | return ret; | ||
965 | } | ||
966 | |||
967 | static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) | ||
968 | { | ||
969 | int ret; | ||
970 | |||
971 | ret = cqspi_set_protocol(nor, 0); | ||
972 | if (!ret) | ||
973 | ret = cqspi_command_write(nor, opcode, buf, len); | ||
974 | |||
975 | return ret; | ||
976 | } | ||
977 | |||
978 | static int cqspi_of_get_flash_pdata(struct platform_device *pdev, | ||
979 | struct cqspi_flash_pdata *f_pdata, | ||
980 | struct device_node *np) | ||
981 | { | ||
982 | if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) { | ||
983 | dev_err(&pdev->dev, "couldn't determine read-delay\n"); | ||
984 | return -ENXIO; | ||
985 | } | ||
986 | |||
987 | if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) { | ||
988 | dev_err(&pdev->dev, "couldn't determine tshsl-ns\n"); | ||
989 | return -ENXIO; | ||
990 | } | ||
991 | |||
992 | if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) { | ||
993 | dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n"); | ||
994 | return -ENXIO; | ||
995 | } | ||
996 | |||
997 | if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) { | ||
998 | dev_err(&pdev->dev, "couldn't determine tchsh-ns\n"); | ||
999 | return -ENXIO; | ||
1000 | } | ||
1001 | |||
1002 | if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) { | ||
1003 | dev_err(&pdev->dev, "couldn't determine tslch-ns\n"); | ||
1004 | return -ENXIO; | ||
1005 | } | ||
1006 | |||
1007 | if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) { | ||
1008 | dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n"); | ||
1009 | return -ENXIO; | ||
1010 | } | ||
1011 | |||
1012 | return 0; | ||
1013 | } | ||
1014 | |||
1015 | static int cqspi_of_get_pdata(struct platform_device *pdev) | ||
1016 | { | ||
1017 | struct device_node *np = pdev->dev.of_node; | ||
1018 | struct cqspi_st *cqspi = platform_get_drvdata(pdev); | ||
1019 | |||
1020 | cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs"); | ||
1021 | |||
1022 | if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) { | ||
1023 | dev_err(&pdev->dev, "couldn't determine fifo-depth\n"); | ||
1024 | return -ENXIO; | ||
1025 | } | ||
1026 | |||
1027 | if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) { | ||
1028 | dev_err(&pdev->dev, "couldn't determine fifo-width\n"); | ||
1029 | return -ENXIO; | ||
1030 | } | ||
1031 | |||
1032 | if (of_property_read_u32(np, "cdns,trigger-address", | ||
1033 | &cqspi->trigger_address)) { | ||
1034 | dev_err(&pdev->dev, "couldn't determine trigger-address\n"); | ||
1035 | return -ENXIO; | ||
1036 | } | ||
1037 | |||
1038 | return 0; | ||
1039 | } | ||
1040 | |||
1041 | static void cqspi_controller_init(struct cqspi_st *cqspi) | ||
1042 | { | ||
1043 | cqspi_controller_enable(cqspi, 0); | ||
1044 | |||
1045 | /* Configure the remap address register, no remap */ | ||
1046 | writel(0, cqspi->iobase + CQSPI_REG_REMAP); | ||
1047 | |||
1048 | /* Disable all interrupts. */ | ||
1049 | writel(0, cqspi->iobase + CQSPI_REG_IRQMASK); | ||
1050 | |||
1051 | /* Configure the SRAM split to 1:1 . */ | ||
1052 | writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION); | ||
1053 | |||
1054 | /* Load indirect trigger address. */ | ||
1055 | writel(cqspi->trigger_address, | ||
1056 | cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER); | ||
1057 | |||
1058 | /* Program read watermark -- 1/2 of the FIFO. */ | ||
1059 | writel(cqspi->fifo_depth * cqspi->fifo_width / 2, | ||
1060 | cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK); | ||
1061 | /* Program write watermark -- 1/8 of the FIFO. */ | ||
1062 | writel(cqspi->fifo_depth * cqspi->fifo_width / 8, | ||
1063 | cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK); | ||
1064 | |||
1065 | cqspi_controller_enable(cqspi, 1); | ||
1066 | } | ||
1067 | |||
1068 | static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np) | ||
1069 | { | ||
1070 | struct platform_device *pdev = cqspi->pdev; | ||
1071 | struct device *dev = &pdev->dev; | ||
1072 | struct cqspi_flash_pdata *f_pdata; | ||
1073 | struct spi_nor *nor; | ||
1074 | struct mtd_info *mtd; | ||
1075 | unsigned int cs; | ||
1076 | int i, ret; | ||
1077 | |||
1078 | /* Get flash device data */ | ||
1079 | for_each_available_child_of_node(dev->of_node, np) { | ||
1080 | if (of_property_read_u32(np, "reg", &cs)) { | ||
1081 | dev_err(dev, "Couldn't determine chip select.\n"); | ||
1082 | goto err; | ||
1083 | } | ||
1084 | |||
1085 | if (cs > CQSPI_MAX_CHIPSELECT) { | ||
1086 | dev_err(dev, "Chip select %d out of range.\n", cs); | ||
1087 | goto err; | ||
1088 | } | ||
1089 | |||
1090 | f_pdata = &cqspi->f_pdata[cs]; | ||
1091 | f_pdata->cqspi = cqspi; | ||
1092 | f_pdata->cs = cs; | ||
1093 | |||
1094 | ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np); | ||
1095 | if (ret) | ||
1096 | goto err; | ||
1097 | |||
1098 | nor = &f_pdata->nor; | ||
1099 | mtd = &nor->mtd; | ||
1100 | |||
1101 | mtd->priv = nor; | ||
1102 | |||
1103 | nor->dev = dev; | ||
1104 | spi_nor_set_flash_node(nor, np); | ||
1105 | nor->priv = f_pdata; | ||
1106 | |||
1107 | nor->read_reg = cqspi_read_reg; | ||
1108 | nor->write_reg = cqspi_write_reg; | ||
1109 | nor->read = cqspi_read; | ||
1110 | nor->write = cqspi_write; | ||
1111 | nor->erase = cqspi_erase; | ||
1112 | nor->prepare = cqspi_prep; | ||
1113 | nor->unprepare = cqspi_unprep; | ||
1114 | |||
1115 | mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d", | ||
1116 | dev_name(dev), cs); | ||
1117 | if (!mtd->name) { | ||
1118 | ret = -ENOMEM; | ||
1119 | goto err; | ||
1120 | } | ||
1121 | |||
1122 | ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD); | ||
1123 | if (ret) | ||
1124 | goto err; | ||
1125 | |||
1126 | ret = mtd_device_register(mtd, NULL, 0); | ||
1127 | if (ret) | ||
1128 | goto err; | ||
1129 | |||
1130 | f_pdata->registered = true; | ||
1131 | } | ||
1132 | |||
1133 | return 0; | ||
1134 | |||
1135 | err: | ||
1136 | for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++) | ||
1137 | if (cqspi->f_pdata[i].registered) | ||
1138 | mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd); | ||
1139 | return ret; | ||
1140 | } | ||
1141 | |||
1142 | static int cqspi_probe(struct platform_device *pdev) | ||
1143 | { | ||
1144 | struct device_node *np = pdev->dev.of_node; | ||
1145 | struct device *dev = &pdev->dev; | ||
1146 | struct cqspi_st *cqspi; | ||
1147 | struct resource *res; | ||
1148 | struct resource *res_ahb; | ||
1149 | int ret; | ||
1150 | int irq; | ||
1151 | |||
1152 | cqspi = devm_kzalloc(dev, sizeof(*cqspi), GFP_KERNEL); | ||
1153 | if (!cqspi) | ||
1154 | return -ENOMEM; | ||
1155 | |||
1156 | mutex_init(&cqspi->bus_mutex); | ||
1157 | cqspi->pdev = pdev; | ||
1158 | platform_set_drvdata(pdev, cqspi); | ||
1159 | |||
1160 | /* Obtain configuration from OF. */ | ||
1161 | ret = cqspi_of_get_pdata(pdev); | ||
1162 | if (ret) { | ||
1163 | dev_err(dev, "Cannot get mandatory OF data.\n"); | ||
1164 | return -ENODEV; | ||
1165 | } | ||
1166 | |||
1167 | /* Obtain QSPI clock. */ | ||
1168 | cqspi->clk = devm_clk_get(dev, NULL); | ||
1169 | if (IS_ERR(cqspi->clk)) { | ||
1170 | dev_err(dev, "Cannot claim QSPI clock.\n"); | ||
1171 | return PTR_ERR(cqspi->clk); | ||
1172 | } | ||
1173 | |||
1174 | /* Obtain and remap controller address. */ | ||
1175 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1176 | cqspi->iobase = devm_ioremap_resource(dev, res); | ||
1177 | if (IS_ERR(cqspi->iobase)) { | ||
1178 | dev_err(dev, "Cannot remap controller address.\n"); | ||
1179 | return PTR_ERR(cqspi->iobase); | ||
1180 | } | ||
1181 | |||
1182 | /* Obtain and remap AHB address. */ | ||
1183 | res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
1184 | cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb); | ||
1185 | if (IS_ERR(cqspi->ahb_base)) { | ||
1186 | dev_err(dev, "Cannot remap AHB address.\n"); | ||
1187 | return PTR_ERR(cqspi->ahb_base); | ||
1188 | } | ||
1189 | |||
1190 | init_completion(&cqspi->transfer_complete); | ||
1191 | |||
1192 | /* Obtain IRQ line. */ | ||
1193 | irq = platform_get_irq(pdev, 0); | ||
1194 | if (irq < 0) { | ||
1195 | dev_err(dev, "Cannot obtain IRQ.\n"); | ||
1196 | return -ENXIO; | ||
1197 | } | ||
1198 | |||
1199 | ret = clk_prepare_enable(cqspi->clk); | ||
1200 | if (ret) { | ||
1201 | dev_err(dev, "Cannot enable QSPI clock.\n"); | ||
1202 | return ret; | ||
1203 | } | ||
1204 | |||
1205 | cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk); | ||
1206 | |||
1207 | ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0, | ||
1208 | pdev->name, cqspi); | ||
1209 | if (ret) { | ||
1210 | dev_err(dev, "Cannot request IRQ.\n"); | ||
1211 | goto probe_irq_failed; | ||
1212 | } | ||
1213 | |||
1214 | cqspi_wait_idle(cqspi); | ||
1215 | cqspi_controller_init(cqspi); | ||
1216 | cqspi->current_cs = -1; | ||
1217 | cqspi->sclk = 0; | ||
1218 | |||
1219 | ret = cqspi_setup_flash(cqspi, np); | ||
1220 | if (ret) { | ||
1221 | dev_err(dev, "Cadence QSPI NOR probe failed %d\n", ret); | ||
1222 | goto probe_setup_failed; | ||
1223 | } | ||
1224 | |||
1225 | return ret; | ||
1226 | probe_irq_failed: | ||
1227 | cqspi_controller_enable(cqspi, 0); | ||
1228 | probe_setup_failed: | ||
1229 | clk_disable_unprepare(cqspi->clk); | ||
1230 | return ret; | ||
1231 | } | ||
1232 | |||
1233 | static int cqspi_remove(struct platform_device *pdev) | ||
1234 | { | ||
1235 | struct cqspi_st *cqspi = platform_get_drvdata(pdev); | ||
1236 | int i; | ||
1237 | |||
1238 | for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++) | ||
1239 | if (cqspi->f_pdata[i].registered) | ||
1240 | mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd); | ||
1241 | |||
1242 | cqspi_controller_enable(cqspi, 0); | ||
1243 | |||
1244 | clk_disable_unprepare(cqspi->clk); | ||
1245 | |||
1246 | return 0; | ||
1247 | } | ||
1248 | |||
1249 | #ifdef CONFIG_PM_SLEEP | ||
1250 | static int cqspi_suspend(struct device *dev) | ||
1251 | { | ||
1252 | struct cqspi_st *cqspi = dev_get_drvdata(dev); | ||
1253 | |||
1254 | cqspi_controller_enable(cqspi, 0); | ||
1255 | return 0; | ||
1256 | } | ||
1257 | |||
1258 | static int cqspi_resume(struct device *dev) | ||
1259 | { | ||
1260 | struct cqspi_st *cqspi = dev_get_drvdata(dev); | ||
1261 | |||
1262 | cqspi_controller_enable(cqspi, 1); | ||
1263 | return 0; | ||
1264 | } | ||
1265 | |||
1266 | static const struct dev_pm_ops cqspi__dev_pm_ops = { | ||
1267 | .suspend = cqspi_suspend, | ||
1268 | .resume = cqspi_resume, | ||
1269 | }; | ||
1270 | |||
1271 | #define CQSPI_DEV_PM_OPS (&cqspi__dev_pm_ops) | ||
1272 | #else | ||
1273 | #define CQSPI_DEV_PM_OPS NULL | ||
1274 | #endif | ||
1275 | |||
1276 | static struct of_device_id const cqspi_dt_ids[] = { | ||
1277 | {.compatible = "cdns,qspi-nor",}, | ||
1278 | { /* end of table */ } | ||
1279 | }; | ||
1280 | |||
1281 | MODULE_DEVICE_TABLE(of, cqspi_dt_ids); | ||
1282 | |||
1283 | static struct platform_driver cqspi_platform_driver = { | ||
1284 | .probe = cqspi_probe, | ||
1285 | .remove = cqspi_remove, | ||
1286 | .driver = { | ||
1287 | .name = CQSPI_NAME, | ||
1288 | .pm = CQSPI_DEV_PM_OPS, | ||
1289 | .of_match_table = cqspi_dt_ids, | ||
1290 | }, | ||
1291 | }; | ||
1292 | |||
1293 | module_platform_driver(cqspi_platform_driver); | ||
1294 | |||
1295 | MODULE_DESCRIPTION("Cadence QSPI Controller Driver"); | ||
1296 | MODULE_LICENSE("GPL v2"); | ||
1297 | MODULE_ALIAS("platform:" CQSPI_NAME); | ||
1298 | MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>"); | ||
1299 | MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>"); | ||
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c index 9ab2b51d54b8..5c82e4ef1904 100644 --- a/drivers/mtd/spi-nor/fsl-quadspi.c +++ b/drivers/mtd/spi-nor/fsl-quadspi.c | |||
@@ -618,9 +618,9 @@ static inline void fsl_qspi_invalid(struct fsl_qspi *q) | |||
618 | qspi_writel(q, reg, q->iobase + QUADSPI_MCR); | 618 | qspi_writel(q, reg, q->iobase + QUADSPI_MCR); |
619 | } | 619 | } |
620 | 620 | ||
621 | static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor, | 621 | static ssize_t fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor, |
622 | u8 opcode, unsigned int to, u32 *txbuf, | 622 | u8 opcode, unsigned int to, u32 *txbuf, |
623 | unsigned count, size_t *retlen) | 623 | unsigned count) |
624 | { | 624 | { |
625 | int ret, i, j; | 625 | int ret, i, j; |
626 | u32 tmp; | 626 | u32 tmp; |
@@ -647,8 +647,8 @@ static int fsl_qspi_nor_write(struct fsl_qspi *q, struct spi_nor *nor, | |||
647 | /* Trigger it */ | 647 | /* Trigger it */ |
648 | ret = fsl_qspi_runcmd(q, opcode, to, count); | 648 | ret = fsl_qspi_runcmd(q, opcode, to, count); |
649 | 649 | ||
650 | if (ret == 0 && retlen) | 650 | if (ret == 0) |
651 | *retlen += count; | 651 | return count; |
652 | 652 | ||
653 | return ret; | 653 | return ret; |
654 | } | 654 | } |
@@ -859,7 +859,9 @@ static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) | |||
859 | 859 | ||
860 | } else if (len > 0) { | 860 | } else if (len > 0) { |
861 | ret = fsl_qspi_nor_write(q, nor, opcode, 0, | 861 | ret = fsl_qspi_nor_write(q, nor, opcode, 0, |
862 | (u32 *)buf, len, NULL); | 862 | (u32 *)buf, len); |
863 | if (ret > 0) | ||
864 | return 0; | ||
863 | } else { | 865 | } else { |
864 | dev_err(q->dev, "invalid cmd %d\n", opcode); | 866 | dev_err(q->dev, "invalid cmd %d\n", opcode); |
865 | ret = -EINVAL; | 867 | ret = -EINVAL; |
@@ -868,20 +870,20 @@ static int fsl_qspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) | |||
868 | return ret; | 870 | return ret; |
869 | } | 871 | } |
870 | 872 | ||
871 | static void fsl_qspi_write(struct spi_nor *nor, loff_t to, | 873 | static ssize_t fsl_qspi_write(struct spi_nor *nor, loff_t to, |
872 | size_t len, size_t *retlen, const u_char *buf) | 874 | size_t len, const u_char *buf) |
873 | { | 875 | { |
874 | struct fsl_qspi *q = nor->priv; | 876 | struct fsl_qspi *q = nor->priv; |
875 | 877 | ssize_t ret = fsl_qspi_nor_write(q, nor, nor->program_opcode, to, | |
876 | fsl_qspi_nor_write(q, nor, nor->program_opcode, to, | 878 | (u32 *)buf, len); |
877 | (u32 *)buf, len, retlen); | ||
878 | 879 | ||
879 | /* invalid the data in the AHB buffer. */ | 880 | /* invalid the data in the AHB buffer. */ |
880 | fsl_qspi_invalid(q); | 881 | fsl_qspi_invalid(q); |
882 | return ret; | ||
881 | } | 883 | } |
882 | 884 | ||
883 | static int fsl_qspi_read(struct spi_nor *nor, loff_t from, | 885 | static ssize_t fsl_qspi_read(struct spi_nor *nor, loff_t from, |
884 | size_t len, size_t *retlen, u_char *buf) | 886 | size_t len, u_char *buf) |
885 | { | 887 | { |
886 | struct fsl_qspi *q = nor->priv; | 888 | struct fsl_qspi *q = nor->priv; |
887 | u8 cmd = nor->read_opcode; | 889 | u8 cmd = nor->read_opcode; |
@@ -923,8 +925,7 @@ static int fsl_qspi_read(struct spi_nor *nor, loff_t from, | |||
923 | memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs, | 925 | memcpy(buf, q->ahb_addr + q->chip_base_addr + from - q->memmap_offs, |
924 | len); | 926 | len); |
925 | 927 | ||
926 | *retlen += len; | 928 | return len; |
927 | return 0; | ||
928 | } | 929 | } |
929 | 930 | ||
930 | static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs) | 931 | static int fsl_qspi_erase(struct spi_nor *nor, loff_t offs) |
diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c new file mode 100644 index 000000000000..20378b0d55e9 --- /dev/null +++ b/drivers/mtd/spi-nor/hisi-sfc.c | |||
@@ -0,0 +1,489 @@ | |||
1 | /* | ||
2 | * HiSilicon SPI Nor Flash Controller Driver | ||
3 | * | ||
4 | * Copyright (c) 2015-2016 HiSilicon Technologies Co., Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | #include <linux/bitops.h> | ||
20 | #include <linux/clk.h> | ||
21 | #include <linux/dma-mapping.h> | ||
22 | #include <linux/iopoll.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/mtd/mtd.h> | ||
25 | #include <linux/mtd/spi-nor.h> | ||
26 | #include <linux/of.h> | ||
27 | #include <linux/platform_device.h> | ||
28 | #include <linux/slab.h> | ||
29 | |||
30 | /* Hardware register offsets and field definitions */ | ||
31 | #define FMC_CFG 0x00 | ||
32 | #define FMC_CFG_OP_MODE_MASK BIT_MASK(0) | ||
33 | #define FMC_CFG_OP_MODE_BOOT 0 | ||
34 | #define FMC_CFG_OP_MODE_NORMAL 1 | ||
35 | #define FMC_CFG_FLASH_SEL(type) (((type) & 0x3) << 1) | ||
36 | #define FMC_CFG_FLASH_SEL_MASK 0x6 | ||
37 | #define FMC_ECC_TYPE(type) (((type) & 0x7) << 5) | ||
38 | #define FMC_ECC_TYPE_MASK GENMASK(7, 5) | ||
39 | #define SPI_NOR_ADDR_MODE_MASK BIT_MASK(10) | ||
40 | #define SPI_NOR_ADDR_MODE_3BYTES (0x0 << 10) | ||
41 | #define SPI_NOR_ADDR_MODE_4BYTES (0x1 << 10) | ||
42 | #define FMC_GLOBAL_CFG 0x04 | ||
43 | #define FMC_GLOBAL_CFG_WP_ENABLE BIT(6) | ||
44 | #define FMC_SPI_TIMING_CFG 0x08 | ||
45 | #define TIMING_CFG_TCSH(nr) (((nr) & 0xf) << 8) | ||
46 | #define TIMING_CFG_TCSS(nr) (((nr) & 0xf) << 4) | ||
47 | #define TIMING_CFG_TSHSL(nr) ((nr) & 0xf) | ||
48 | #define CS_HOLD_TIME 0x6 | ||
49 | #define CS_SETUP_TIME 0x6 | ||
50 | #define CS_DESELECT_TIME 0xf | ||
51 | #define FMC_INT 0x18 | ||
52 | #define FMC_INT_OP_DONE BIT(0) | ||
53 | #define FMC_INT_CLR 0x20 | ||
54 | #define FMC_CMD 0x24 | ||
55 | #define FMC_CMD_CMD1(cmd) ((cmd) & 0xff) | ||
56 | #define FMC_ADDRL 0x2c | ||
57 | #define FMC_OP_CFG 0x30 | ||
58 | #define OP_CFG_FM_CS(cs) ((cs) << 11) | ||
59 | #define OP_CFG_MEM_IF_TYPE(type) (((type) & 0x7) << 7) | ||
60 | #define OP_CFG_ADDR_NUM(addr) (((addr) & 0x7) << 4) | ||
61 | #define OP_CFG_DUMMY_NUM(dummy) ((dummy) & 0xf) | ||
62 | #define FMC_DATA_NUM 0x38 | ||
63 | #define FMC_DATA_NUM_CNT(cnt) ((cnt) & GENMASK(13, 0)) | ||
64 | #define FMC_OP 0x3c | ||
65 | #define FMC_OP_DUMMY_EN BIT(8) | ||
66 | #define FMC_OP_CMD1_EN BIT(7) | ||
67 | #define FMC_OP_ADDR_EN BIT(6) | ||
68 | #define FMC_OP_WRITE_DATA_EN BIT(5) | ||
69 | #define FMC_OP_READ_DATA_EN BIT(2) | ||
70 | #define FMC_OP_READ_STATUS_EN BIT(1) | ||
71 | #define FMC_OP_REG_OP_START BIT(0) | ||
72 | #define FMC_DMA_LEN 0x40 | ||
73 | #define FMC_DMA_LEN_SET(len) ((len) & GENMASK(27, 0)) | ||
74 | #define FMC_DMA_SADDR_D0 0x4c | ||
75 | #define HIFMC_DMA_MAX_LEN (4096) | ||
76 | #define HIFMC_DMA_MASK (HIFMC_DMA_MAX_LEN - 1) | ||
77 | #define FMC_OP_DMA 0x68 | ||
78 | #define OP_CTRL_RD_OPCODE(code) (((code) & 0xff) << 16) | ||
79 | #define OP_CTRL_WR_OPCODE(code) (((code) & 0xff) << 8) | ||
80 | #define OP_CTRL_RW_OP(op) ((op) << 1) | ||
81 | #define OP_CTRL_DMA_OP_READY BIT(0) | ||
82 | #define FMC_OP_READ 0x0 | ||
83 | #define FMC_OP_WRITE 0x1 | ||
84 | #define FMC_WAIT_TIMEOUT 1000000 | ||
85 | |||
86 | enum hifmc_iftype { | ||
87 | IF_TYPE_STD, | ||
88 | IF_TYPE_DUAL, | ||
89 | IF_TYPE_DIO, | ||
90 | IF_TYPE_QUAD, | ||
91 | IF_TYPE_QIO, | ||
92 | }; | ||
93 | |||
94 | struct hifmc_priv { | ||
95 | u32 chipselect; | ||
96 | u32 clkrate; | ||
97 | struct hifmc_host *host; | ||
98 | }; | ||
99 | |||
100 | #define HIFMC_MAX_CHIP_NUM 2 | ||
101 | struct hifmc_host { | ||
102 | struct device *dev; | ||
103 | struct mutex lock; | ||
104 | |||
105 | void __iomem *regbase; | ||
106 | void __iomem *iobase; | ||
107 | struct clk *clk; | ||
108 | void *buffer; | ||
109 | dma_addr_t dma_buffer; | ||
110 | |||
111 | struct spi_nor *nor[HIFMC_MAX_CHIP_NUM]; | ||
112 | u32 num_chip; | ||
113 | }; | ||
114 | |||
115 | static inline int wait_op_finish(struct hifmc_host *host) | ||
116 | { | ||
117 | u32 reg; | ||
118 | |||
119 | return readl_poll_timeout(host->regbase + FMC_INT, reg, | ||
120 | (reg & FMC_INT_OP_DONE), 0, FMC_WAIT_TIMEOUT); | ||
121 | } | ||
122 | |||
123 | static int get_if_type(enum read_mode flash_read) | ||
124 | { | ||
125 | enum hifmc_iftype if_type; | ||
126 | |||
127 | switch (flash_read) { | ||
128 | case SPI_NOR_DUAL: | ||
129 | if_type = IF_TYPE_DUAL; | ||
130 | break; | ||
131 | case SPI_NOR_QUAD: | ||
132 | if_type = IF_TYPE_QUAD; | ||
133 | break; | ||
134 | case SPI_NOR_NORMAL: | ||
135 | case SPI_NOR_FAST: | ||
136 | default: | ||
137 | if_type = IF_TYPE_STD; | ||
138 | break; | ||
139 | } | ||
140 | |||
141 | return if_type; | ||
142 | } | ||
143 | |||
144 | static void hisi_spi_nor_init(struct hifmc_host *host) | ||
145 | { | ||
146 | u32 reg; | ||
147 | |||
148 | reg = TIMING_CFG_TCSH(CS_HOLD_TIME) | ||
149 | | TIMING_CFG_TCSS(CS_SETUP_TIME) | ||
150 | | TIMING_CFG_TSHSL(CS_DESELECT_TIME); | ||
151 | writel(reg, host->regbase + FMC_SPI_TIMING_CFG); | ||
152 | } | ||
153 | |||
154 | static int hisi_spi_nor_prep(struct spi_nor *nor, enum spi_nor_ops ops) | ||
155 | { | ||
156 | struct hifmc_priv *priv = nor->priv; | ||
157 | struct hifmc_host *host = priv->host; | ||
158 | int ret; | ||
159 | |||
160 | mutex_lock(&host->lock); | ||
161 | |||
162 | ret = clk_set_rate(host->clk, priv->clkrate); | ||
163 | if (ret) | ||
164 | goto out; | ||
165 | |||
166 | ret = clk_prepare_enable(host->clk); | ||
167 | if (ret) | ||
168 | goto out; | ||
169 | |||
170 | return 0; | ||
171 | |||
172 | out: | ||
173 | mutex_unlock(&host->lock); | ||
174 | return ret; | ||
175 | } | ||
176 | |||
177 | static void hisi_spi_nor_unprep(struct spi_nor *nor, enum spi_nor_ops ops) | ||
178 | { | ||
179 | struct hifmc_priv *priv = nor->priv; | ||
180 | struct hifmc_host *host = priv->host; | ||
181 | |||
182 | clk_disable_unprepare(host->clk); | ||
183 | mutex_unlock(&host->lock); | ||
184 | } | ||
185 | |||
186 | static int hisi_spi_nor_op_reg(struct spi_nor *nor, | ||
187 | u8 opcode, int len, u8 optype) | ||
188 | { | ||
189 | struct hifmc_priv *priv = nor->priv; | ||
190 | struct hifmc_host *host = priv->host; | ||
191 | u32 reg; | ||
192 | |||
193 | reg = FMC_CMD_CMD1(opcode); | ||
194 | writel(reg, host->regbase + FMC_CMD); | ||
195 | |||
196 | reg = FMC_DATA_NUM_CNT(len); | ||
197 | writel(reg, host->regbase + FMC_DATA_NUM); | ||
198 | |||
199 | reg = OP_CFG_FM_CS(priv->chipselect); | ||
200 | writel(reg, host->regbase + FMC_OP_CFG); | ||
201 | |||
202 | writel(0xff, host->regbase + FMC_INT_CLR); | ||
203 | reg = FMC_OP_CMD1_EN | FMC_OP_REG_OP_START | optype; | ||
204 | writel(reg, host->regbase + FMC_OP); | ||
205 | |||
206 | return wait_op_finish(host); | ||
207 | } | ||
208 | |||
209 | static int hisi_spi_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, | ||
210 | int len) | ||
211 | { | ||
212 | struct hifmc_priv *priv = nor->priv; | ||
213 | struct hifmc_host *host = priv->host; | ||
214 | int ret; | ||
215 | |||
216 | ret = hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_READ_DATA_EN); | ||
217 | if (ret) | ||
218 | return ret; | ||
219 | |||
220 | memcpy_fromio(buf, host->iobase, len); | ||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | static int hisi_spi_nor_write_reg(struct spi_nor *nor, u8 opcode, | ||
225 | u8 *buf, int len) | ||
226 | { | ||
227 | struct hifmc_priv *priv = nor->priv; | ||
228 | struct hifmc_host *host = priv->host; | ||
229 | |||
230 | if (len) | ||
231 | memcpy_toio(host->iobase, buf, len); | ||
232 | |||
233 | return hisi_spi_nor_op_reg(nor, opcode, len, FMC_OP_WRITE_DATA_EN); | ||
234 | } | ||
235 | |||
236 | static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off, | ||
237 | dma_addr_t dma_buf, size_t len, u8 op_type) | ||
238 | { | ||
239 | struct hifmc_priv *priv = nor->priv; | ||
240 | struct hifmc_host *host = priv->host; | ||
241 | u8 if_type = 0; | ||
242 | u32 reg; | ||
243 | |||
244 | reg = readl(host->regbase + FMC_CFG); | ||
245 | reg &= ~(FMC_CFG_OP_MODE_MASK | SPI_NOR_ADDR_MODE_MASK); | ||
246 | reg |= FMC_CFG_OP_MODE_NORMAL; | ||
247 | reg |= (nor->addr_width == 4) ? SPI_NOR_ADDR_MODE_4BYTES | ||
248 | : SPI_NOR_ADDR_MODE_3BYTES; | ||
249 | writel(reg, host->regbase + FMC_CFG); | ||
250 | |||
251 | writel(start_off, host->regbase + FMC_ADDRL); | ||
252 | writel(dma_buf, host->regbase + FMC_DMA_SADDR_D0); | ||
253 | writel(FMC_DMA_LEN_SET(len), host->regbase + FMC_DMA_LEN); | ||
254 | |||
255 | reg = OP_CFG_FM_CS(priv->chipselect); | ||
256 | if_type = get_if_type(nor->flash_read); | ||
257 | reg |= OP_CFG_MEM_IF_TYPE(if_type); | ||
258 | if (op_type == FMC_OP_READ) | ||
259 | reg |= OP_CFG_DUMMY_NUM(nor->read_dummy >> 3); | ||
260 | writel(reg, host->regbase + FMC_OP_CFG); | ||
261 | |||
262 | writel(0xff, host->regbase + FMC_INT_CLR); | ||
263 | reg = OP_CTRL_RW_OP(op_type) | OP_CTRL_DMA_OP_READY; | ||
264 | reg |= (op_type == FMC_OP_READ) | ||
265 | ? OP_CTRL_RD_OPCODE(nor->read_opcode) | ||
266 | : OP_CTRL_WR_OPCODE(nor->program_opcode); | ||
267 | writel(reg, host->regbase + FMC_OP_DMA); | ||
268 | |||
269 | return wait_op_finish(host); | ||
270 | } | ||
271 | |||
272 | static ssize_t hisi_spi_nor_read(struct spi_nor *nor, loff_t from, size_t len, | ||
273 | u_char *read_buf) | ||
274 | { | ||
275 | struct hifmc_priv *priv = nor->priv; | ||
276 | struct hifmc_host *host = priv->host; | ||
277 | size_t offset; | ||
278 | int ret; | ||
279 | |||
280 | for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) { | ||
281 | size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset); | ||
282 | |||
283 | ret = hisi_spi_nor_dma_transfer(nor, | ||
284 | from + offset, host->dma_buffer, trans, FMC_OP_READ); | ||
285 | if (ret) { | ||
286 | dev_warn(nor->dev, "DMA read timeout\n"); | ||
287 | return ret; | ||
288 | } | ||
289 | memcpy(read_buf + offset, host->buffer, trans); | ||
290 | } | ||
291 | |||
292 | return len; | ||
293 | } | ||
294 | |||
295 | static ssize_t hisi_spi_nor_write(struct spi_nor *nor, loff_t to, | ||
296 | size_t len, const u_char *write_buf) | ||
297 | { | ||
298 | struct hifmc_priv *priv = nor->priv; | ||
299 | struct hifmc_host *host = priv->host; | ||
300 | size_t offset; | ||
301 | int ret; | ||
302 | |||
303 | for (offset = 0; offset < len; offset += HIFMC_DMA_MAX_LEN) { | ||
304 | size_t trans = min_t(size_t, HIFMC_DMA_MAX_LEN, len - offset); | ||
305 | |||
306 | memcpy(host->buffer, write_buf + offset, trans); | ||
307 | ret = hisi_spi_nor_dma_transfer(nor, | ||
308 | to + offset, host->dma_buffer, trans, FMC_OP_WRITE); | ||
309 | if (ret) { | ||
310 | dev_warn(nor->dev, "DMA write timeout\n"); | ||
311 | return ret; | ||
312 | } | ||
313 | } | ||
314 | |||
315 | return len; | ||
316 | } | ||
317 | |||
318 | /** | ||
319 | * Get spi flash device information and register it as a mtd device. | ||
320 | */ | ||
321 | static int hisi_spi_nor_register(struct device_node *np, | ||
322 | struct hifmc_host *host) | ||
323 | { | ||
324 | struct device *dev = host->dev; | ||
325 | struct spi_nor *nor; | ||
326 | struct hifmc_priv *priv; | ||
327 | struct mtd_info *mtd; | ||
328 | int ret; | ||
329 | |||
330 | nor = devm_kzalloc(dev, sizeof(*nor), GFP_KERNEL); | ||
331 | if (!nor) | ||
332 | return -ENOMEM; | ||
333 | |||
334 | nor->dev = dev; | ||
335 | spi_nor_set_flash_node(nor, np); | ||
336 | |||
337 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); | ||
338 | if (!priv) | ||
339 | return -ENOMEM; | ||
340 | |||
341 | ret = of_property_read_u32(np, "reg", &priv->chipselect); | ||
342 | if (ret) { | ||
343 | dev_err(dev, "There's no reg property for %s\n", | ||
344 | np->full_name); | ||
345 | return ret; | ||
346 | } | ||
347 | |||
348 | ret = of_property_read_u32(np, "spi-max-frequency", | ||
349 | &priv->clkrate); | ||
350 | if (ret) { | ||
351 | dev_err(dev, "There's no spi-max-frequency property for %s\n", | ||
352 | np->full_name); | ||
353 | return ret; | ||
354 | } | ||
355 | priv->host = host; | ||
356 | nor->priv = priv; | ||
357 | |||
358 | nor->prepare = hisi_spi_nor_prep; | ||
359 | nor->unprepare = hisi_spi_nor_unprep; | ||
360 | nor->read_reg = hisi_spi_nor_read_reg; | ||
361 | nor->write_reg = hisi_spi_nor_write_reg; | ||
362 | nor->read = hisi_spi_nor_read; | ||
363 | nor->write = hisi_spi_nor_write; | ||
364 | nor->erase = NULL; | ||
365 | ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD); | ||
366 | if (ret) | ||
367 | return ret; | ||
368 | |||
369 | mtd = &nor->mtd; | ||
370 | mtd->name = np->name; | ||
371 | ret = mtd_device_register(mtd, NULL, 0); | ||
372 | if (ret) | ||
373 | return ret; | ||
374 | |||
375 | host->nor[host->num_chip] = nor; | ||
376 | host->num_chip++; | ||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | static void hisi_spi_nor_unregister_all(struct hifmc_host *host) | ||
381 | { | ||
382 | int i; | ||
383 | |||
384 | for (i = 0; i < host->num_chip; i++) | ||
385 | mtd_device_unregister(&host->nor[i]->mtd); | ||
386 | } | ||
387 | |||
388 | static int hisi_spi_nor_register_all(struct hifmc_host *host) | ||
389 | { | ||
390 | struct device *dev = host->dev; | ||
391 | struct device_node *np; | ||
392 | int ret; | ||
393 | |||
394 | for_each_available_child_of_node(dev->of_node, np) { | ||
395 | ret = hisi_spi_nor_register(np, host); | ||
396 | if (ret) | ||
397 | goto fail; | ||
398 | |||
399 | if (host->num_chip == HIFMC_MAX_CHIP_NUM) { | ||
400 | dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n"); | ||
401 | break; | ||
402 | } | ||
403 | } | ||
404 | |||
405 | return 0; | ||
406 | |||
407 | fail: | ||
408 | hisi_spi_nor_unregister_all(host); | ||
409 | return ret; | ||
410 | } | ||
411 | |||
412 | static int hisi_spi_nor_probe(struct platform_device *pdev) | ||
413 | { | ||
414 | struct device *dev = &pdev->dev; | ||
415 | struct resource *res; | ||
416 | struct hifmc_host *host; | ||
417 | int ret; | ||
418 | |||
419 | host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); | ||
420 | if (!host) | ||
421 | return -ENOMEM; | ||
422 | |||
423 | platform_set_drvdata(pdev, host); | ||
424 | host->dev = dev; | ||
425 | |||
426 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control"); | ||
427 | host->regbase = devm_ioremap_resource(dev, res); | ||
428 | if (IS_ERR(host->regbase)) | ||
429 | return PTR_ERR(host->regbase); | ||
430 | |||
431 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "memory"); | ||
432 | host->iobase = devm_ioremap_resource(dev, res); | ||
433 | if (IS_ERR(host->iobase)) | ||
434 | return PTR_ERR(host->iobase); | ||
435 | |||
436 | host->clk = devm_clk_get(dev, NULL); | ||
437 | if (IS_ERR(host->clk)) | ||
438 | return PTR_ERR(host->clk); | ||
439 | |||
440 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); | ||
441 | if (ret) { | ||
442 | dev_warn(dev, "Unable to set dma mask\n"); | ||
443 | return ret; | ||
444 | } | ||
445 | |||
446 | host->buffer = dmam_alloc_coherent(dev, HIFMC_DMA_MAX_LEN, | ||
447 | &host->dma_buffer, GFP_KERNEL); | ||
448 | if (!host->buffer) | ||
449 | return -ENOMEM; | ||
450 | |||
451 | mutex_init(&host->lock); | ||
452 | clk_prepare_enable(host->clk); | ||
453 | hisi_spi_nor_init(host); | ||
454 | ret = hisi_spi_nor_register_all(host); | ||
455 | if (ret) | ||
456 | mutex_destroy(&host->lock); | ||
457 | |||
458 | clk_disable_unprepare(host->clk); | ||
459 | return ret; | ||
460 | } | ||
461 | |||
462 | static int hisi_spi_nor_remove(struct platform_device *pdev) | ||
463 | { | ||
464 | struct hifmc_host *host = platform_get_drvdata(pdev); | ||
465 | |||
466 | hisi_spi_nor_unregister_all(host); | ||
467 | mutex_destroy(&host->lock); | ||
468 | clk_disable_unprepare(host->clk); | ||
469 | return 0; | ||
470 | } | ||
471 | |||
472 | static const struct of_device_id hisi_spi_nor_dt_ids[] = { | ||
473 | { .compatible = "hisilicon,fmc-spi-nor"}, | ||
474 | { /* sentinel */ } | ||
475 | }; | ||
476 | MODULE_DEVICE_TABLE(of, hisi_spi_nor_dt_ids); | ||
477 | |||
478 | static struct platform_driver hisi_spi_nor_driver = { | ||
479 | .driver = { | ||
480 | .name = "hisi-sfc", | ||
481 | .of_match_table = hisi_spi_nor_dt_ids, | ||
482 | }, | ||
483 | .probe = hisi_spi_nor_probe, | ||
484 | .remove = hisi_spi_nor_remove, | ||
485 | }; | ||
486 | module_platform_driver(hisi_spi_nor_driver); | ||
487 | |||
488 | MODULE_LICENSE("GPL v2"); | ||
489 | MODULE_DESCRIPTION("HiSilicon SPI Nor Flash Controller Driver"); | ||
diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c index 8bed1a4cb79c..e661877c23de 100644 --- a/drivers/mtd/spi-nor/mtk-quadspi.c +++ b/drivers/mtd/spi-nor/mtk-quadspi.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/ioport.h> | 21 | #include <linux/ioport.h> |
22 | #include <linux/math64.h> | 22 | #include <linux/math64.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/mtd/mtd.h> | ||
25 | #include <linux/mutex.h> | 24 | #include <linux/mutex.h> |
26 | #include <linux/of.h> | 25 | #include <linux/of.h> |
27 | #include <linux/of_device.h> | 26 | #include <linux/of_device.h> |
@@ -243,8 +242,8 @@ static void mt8173_nor_set_addr(struct mt8173_nor *mt8173_nor, u32 addr) | |||
243 | writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR3_REG); | 242 | writeb(addr & 0xff, mt8173_nor->base + MTK_NOR_RADR3_REG); |
244 | } | 243 | } |
245 | 244 | ||
246 | static int mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length, | 245 | static ssize_t mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length, |
247 | size_t *retlen, u_char *buffer) | 246 | u_char *buffer) |
248 | { | 247 | { |
249 | int i, ret; | 248 | int i, ret; |
250 | int addr = (int)from; | 249 | int addr = (int)from; |
@@ -255,13 +254,13 @@ static int mt8173_nor_read(struct spi_nor *nor, loff_t from, size_t length, | |||
255 | mt8173_nor_set_read_mode(mt8173_nor); | 254 | mt8173_nor_set_read_mode(mt8173_nor); |
256 | mt8173_nor_set_addr(mt8173_nor, addr); | 255 | mt8173_nor_set_addr(mt8173_nor, addr); |
257 | 256 | ||
258 | for (i = 0; i < length; i++, (*retlen)++) { | 257 | for (i = 0; i < length; i++) { |
259 | ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_READ_CMD); | 258 | ret = mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_PIO_READ_CMD); |
260 | if (ret < 0) | 259 | if (ret < 0) |
261 | return ret; | 260 | return ret; |
262 | buf[i] = readb(mt8173_nor->base + MTK_NOR_RDATA_REG); | 261 | buf[i] = readb(mt8173_nor->base + MTK_NOR_RDATA_REG); |
263 | } | 262 | } |
264 | return 0; | 263 | return length; |
265 | } | 264 | } |
266 | 265 | ||
267 | static int mt8173_nor_write_single_byte(struct mt8173_nor *mt8173_nor, | 266 | static int mt8173_nor_write_single_byte(struct mt8173_nor *mt8173_nor, |
@@ -297,36 +296,44 @@ static int mt8173_nor_write_buffer(struct mt8173_nor *mt8173_nor, int addr, | |||
297 | return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WR_CMD); | 296 | return mt8173_nor_execute_cmd(mt8173_nor, MTK_NOR_WR_CMD); |
298 | } | 297 | } |
299 | 298 | ||
300 | static void mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len, | 299 | static ssize_t mt8173_nor_write(struct spi_nor *nor, loff_t to, size_t len, |
301 | size_t *retlen, const u_char *buf) | 300 | const u_char *buf) |
302 | { | 301 | { |
303 | int ret; | 302 | int ret; |
304 | struct mt8173_nor *mt8173_nor = nor->priv; | 303 | struct mt8173_nor *mt8173_nor = nor->priv; |
304 | size_t i; | ||
305 | 305 | ||
306 | ret = mt8173_nor_write_buffer_enable(mt8173_nor); | 306 | ret = mt8173_nor_write_buffer_enable(mt8173_nor); |
307 | if (ret < 0) | 307 | if (ret < 0) { |
308 | dev_warn(mt8173_nor->dev, "write buffer enable failed!\n"); | 308 | dev_warn(mt8173_nor->dev, "write buffer enable failed!\n"); |
309 | return ret; | ||
310 | } | ||
309 | 311 | ||
310 | while (len >= SFLASH_WRBUF_SIZE) { | 312 | for (i = 0; i + SFLASH_WRBUF_SIZE <= len; i += SFLASH_WRBUF_SIZE) { |
311 | ret = mt8173_nor_write_buffer(mt8173_nor, to, buf); | 313 | ret = mt8173_nor_write_buffer(mt8173_nor, to, buf); |
312 | if (ret < 0) | 314 | if (ret < 0) { |
313 | dev_err(mt8173_nor->dev, "write buffer failed!\n"); | 315 | dev_err(mt8173_nor->dev, "write buffer failed!\n"); |
314 | len -= SFLASH_WRBUF_SIZE; | 316 | return ret; |
317 | } | ||
315 | to += SFLASH_WRBUF_SIZE; | 318 | to += SFLASH_WRBUF_SIZE; |
316 | buf += SFLASH_WRBUF_SIZE; | 319 | buf += SFLASH_WRBUF_SIZE; |
317 | (*retlen) += SFLASH_WRBUF_SIZE; | ||
318 | } | 320 | } |
319 | ret = mt8173_nor_write_buffer_disable(mt8173_nor); | 321 | ret = mt8173_nor_write_buffer_disable(mt8173_nor); |
320 | if (ret < 0) | 322 | if (ret < 0) { |
321 | dev_warn(mt8173_nor->dev, "write buffer disable failed!\n"); | 323 | dev_warn(mt8173_nor->dev, "write buffer disable failed!\n"); |
324 | return ret; | ||
325 | } | ||
322 | 326 | ||
323 | if (len) { | 327 | if (i < len) { |
324 | ret = mt8173_nor_write_single_byte(mt8173_nor, to, (int)len, | 328 | ret = mt8173_nor_write_single_byte(mt8173_nor, to, |
325 | (u8 *)buf); | 329 | (int)(len - i), (u8 *)buf); |
326 | if (ret < 0) | 330 | if (ret < 0) { |
327 | dev_err(mt8173_nor->dev, "write single byte failed!\n"); | 331 | dev_err(mt8173_nor->dev, "write single byte failed!\n"); |
328 | (*retlen) += len; | 332 | return ret; |
333 | } | ||
329 | } | 334 | } |
335 | |||
336 | return len; | ||
330 | } | 337 | } |
331 | 338 | ||
332 | static int mt8173_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) | 339 | static int mt8173_nor_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) |
diff --git a/drivers/mtd/spi-nor/nxp-spifi.c b/drivers/mtd/spi-nor/nxp-spifi.c index ae428cb0e04b..73a14f40928b 100644 --- a/drivers/mtd/spi-nor/nxp-spifi.c +++ b/drivers/mtd/spi-nor/nxp-spifi.c | |||
@@ -172,8 +172,8 @@ static int nxp_spifi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) | |||
172 | return nxp_spifi_wait_for_cmd(spifi); | 172 | return nxp_spifi_wait_for_cmd(spifi); |
173 | } | 173 | } |
174 | 174 | ||
175 | static int nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len, | 175 | static ssize_t nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len, |
176 | size_t *retlen, u_char *buf) | 176 | u_char *buf) |
177 | { | 177 | { |
178 | struct nxp_spifi *spifi = nor->priv; | 178 | struct nxp_spifi *spifi = nor->priv; |
179 | int ret; | 179 | int ret; |
@@ -183,24 +183,23 @@ static int nxp_spifi_read(struct spi_nor *nor, loff_t from, size_t len, | |||
183 | return ret; | 183 | return ret; |
184 | 184 | ||
185 | memcpy_fromio(buf, spifi->flash_base + from, len); | 185 | memcpy_fromio(buf, spifi->flash_base + from, len); |
186 | *retlen += len; | ||
187 | 186 | ||
188 | return 0; | 187 | return len; |
189 | } | 188 | } |
190 | 189 | ||
191 | static void nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len, | 190 | static ssize_t nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len, |
192 | size_t *retlen, const u_char *buf) | 191 | const u_char *buf) |
193 | { | 192 | { |
194 | struct nxp_spifi *spifi = nor->priv; | 193 | struct nxp_spifi *spifi = nor->priv; |
195 | u32 cmd; | 194 | u32 cmd; |
196 | int ret; | 195 | int ret; |
196 | size_t i; | ||
197 | 197 | ||
198 | ret = nxp_spifi_set_memory_mode_off(spifi); | 198 | ret = nxp_spifi_set_memory_mode_off(spifi); |
199 | if (ret) | 199 | if (ret) |
200 | return; | 200 | return ret; |
201 | 201 | ||
202 | writel(to, spifi->io_base + SPIFI_ADDR); | 202 | writel(to, spifi->io_base + SPIFI_ADDR); |
203 | *retlen += len; | ||
204 | 203 | ||
205 | cmd = SPIFI_CMD_DOUT | | 204 | cmd = SPIFI_CMD_DOUT | |
206 | SPIFI_CMD_DATALEN(len) | | 205 | SPIFI_CMD_DATALEN(len) | |
@@ -209,10 +208,14 @@ static void nxp_spifi_write(struct spi_nor *nor, loff_t to, size_t len, | |||
209 | SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1); | 208 | SPIFI_CMD_FRAMEFORM(spifi->nor.addr_width + 1); |
210 | writel(cmd, spifi->io_base + SPIFI_CMD); | 209 | writel(cmd, spifi->io_base + SPIFI_CMD); |
211 | 210 | ||
212 | while (len--) | 211 | for (i = 0; i < len; i++) |
213 | writeb(*buf++, spifi->io_base + SPIFI_DATA); | 212 | writeb(buf[i], spifi->io_base + SPIFI_DATA); |
213 | |||
214 | ret = nxp_spifi_wait_for_cmd(spifi); | ||
215 | if (ret) | ||
216 | return ret; | ||
214 | 217 | ||
215 | nxp_spifi_wait_for_cmd(spifi); | 218 | return len; |
216 | } | 219 | } |
217 | 220 | ||
218 | static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs) | 221 | static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs) |
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index c52e45594bfd..d0fc165d7d66 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c | |||
@@ -661,7 +661,7 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) | |||
661 | status_new = (status_old & ~mask & ~SR_TB) | val; | 661 | status_new = (status_old & ~mask & ~SR_TB) | val; |
662 | 662 | ||
663 | /* Don't protect status register if we're fully unlocked */ | 663 | /* Don't protect status register if we're fully unlocked */ |
664 | if (lock_len == mtd->size) | 664 | if (lock_len == 0) |
665 | status_new &= ~SR_SRWD; | 665 | status_new &= ~SR_SRWD; |
666 | 666 | ||
667 | if (!use_top) | 667 | if (!use_top) |
@@ -830,10 +830,26 @@ static const struct flash_info spi_nor_ids[] = { | |||
830 | { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) }, | 830 | { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) }, |
831 | 831 | ||
832 | /* GigaDevice */ | 832 | /* GigaDevice */ |
833 | { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) }, | 833 | { |
834 | { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) }, | 834 | "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, |
835 | { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, | 835 | SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | |
836 | { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, SECT_4K) }, | 836 | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) |
837 | }, | ||
838 | { | ||
839 | "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, | ||
840 | SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | | ||
841 | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) | ||
842 | }, | ||
843 | { | ||
844 | "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128, | ||
845 | SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | | ||
846 | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) | ||
847 | }, | ||
848 | { | ||
849 | "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, | ||
850 | SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | | ||
851 | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) | ||
852 | }, | ||
837 | 853 | ||
838 | /* Intel/Numonyx -- xxxs33b */ | 854 | /* Intel/Numonyx -- xxxs33b */ |
839 | { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, | 855 | { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, |
@@ -871,6 +887,7 @@ static const struct flash_info spi_nor_ids[] = { | |||
871 | { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, | 887 | { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, |
872 | { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, | 888 | { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, |
873 | { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, | 889 | { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, |
890 | { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, | ||
874 | 891 | ||
875 | /* PMC */ | 892 | /* PMC */ |
876 | { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) }, | 893 | { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) }, |
@@ -1031,8 +1048,25 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
1031 | if (ret) | 1048 | if (ret) |
1032 | return ret; | 1049 | return ret; |
1033 | 1050 | ||
1034 | ret = nor->read(nor, from, len, retlen, buf); | 1051 | while (len) { |
1052 | ret = nor->read(nor, from, len, buf); | ||
1053 | if (ret == 0) { | ||
1054 | /* We shouldn't see 0-length reads */ | ||
1055 | ret = -EIO; | ||
1056 | goto read_err; | ||
1057 | } | ||
1058 | if (ret < 0) | ||
1059 | goto read_err; | ||
1060 | |||
1061 | WARN_ON(ret > len); | ||
1062 | *retlen += ret; | ||
1063 | buf += ret; | ||
1064 | from += ret; | ||
1065 | len -= ret; | ||
1066 | } | ||
1067 | ret = 0; | ||
1035 | 1068 | ||
1069 | read_err: | ||
1036 | spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ); | 1070 | spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ); |
1037 | return ret; | 1071 | return ret; |
1038 | } | 1072 | } |
@@ -1060,10 +1094,14 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
1060 | nor->program_opcode = SPINOR_OP_BP; | 1094 | nor->program_opcode = SPINOR_OP_BP; |
1061 | 1095 | ||
1062 | /* write one byte. */ | 1096 | /* write one byte. */ |
1063 | nor->write(nor, to, 1, retlen, buf); | 1097 | ret = nor->write(nor, to, 1, buf); |
1098 | if (ret < 0) | ||
1099 | goto sst_write_err; | ||
1100 | WARN(ret != 1, "While writing 1 byte written %i bytes\n", | ||
1101 | (int)ret); | ||
1064 | ret = spi_nor_wait_till_ready(nor); | 1102 | ret = spi_nor_wait_till_ready(nor); |
1065 | if (ret) | 1103 | if (ret) |
1066 | goto time_out; | 1104 | goto sst_write_err; |
1067 | } | 1105 | } |
1068 | to += actual; | 1106 | to += actual; |
1069 | 1107 | ||
@@ -1072,10 +1110,14 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
1072 | nor->program_opcode = SPINOR_OP_AAI_WP; | 1110 | nor->program_opcode = SPINOR_OP_AAI_WP; |
1073 | 1111 | ||
1074 | /* write two bytes. */ | 1112 | /* write two bytes. */ |
1075 | nor->write(nor, to, 2, retlen, buf + actual); | 1113 | ret = nor->write(nor, to, 2, buf + actual); |
1114 | if (ret < 0) | ||
1115 | goto sst_write_err; | ||
1116 | WARN(ret != 2, "While writing 2 bytes written %i bytes\n", | ||
1117 | (int)ret); | ||
1076 | ret = spi_nor_wait_till_ready(nor); | 1118 | ret = spi_nor_wait_till_ready(nor); |
1077 | if (ret) | 1119 | if (ret) |
1078 | goto time_out; | 1120 | goto sst_write_err; |
1079 | to += 2; | 1121 | to += 2; |
1080 | nor->sst_write_second = true; | 1122 | nor->sst_write_second = true; |
1081 | } | 1123 | } |
@@ -1084,21 +1126,26 @@ static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
1084 | write_disable(nor); | 1126 | write_disable(nor); |
1085 | ret = spi_nor_wait_till_ready(nor); | 1127 | ret = spi_nor_wait_till_ready(nor); |
1086 | if (ret) | 1128 | if (ret) |
1087 | goto time_out; | 1129 | goto sst_write_err; |
1088 | 1130 | ||
1089 | /* Write out trailing byte if it exists. */ | 1131 | /* Write out trailing byte if it exists. */ |
1090 | if (actual != len) { | 1132 | if (actual != len) { |
1091 | write_enable(nor); | 1133 | write_enable(nor); |
1092 | 1134 | ||
1093 | nor->program_opcode = SPINOR_OP_BP; | 1135 | nor->program_opcode = SPINOR_OP_BP; |
1094 | nor->write(nor, to, 1, retlen, buf + actual); | 1136 | ret = nor->write(nor, to, 1, buf + actual); |
1095 | 1137 | if (ret < 0) | |
1138 | goto sst_write_err; | ||
1139 | WARN(ret != 1, "While writing 1 byte written %i bytes\n", | ||
1140 | (int)ret); | ||
1096 | ret = spi_nor_wait_till_ready(nor); | 1141 | ret = spi_nor_wait_till_ready(nor); |
1097 | if (ret) | 1142 | if (ret) |
1098 | goto time_out; | 1143 | goto sst_write_err; |
1099 | write_disable(nor); | 1144 | write_disable(nor); |
1145 | actual += 1; | ||
1100 | } | 1146 | } |
1101 | time_out: | 1147 | sst_write_err: |
1148 | *retlen += actual; | ||
1102 | spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE); | 1149 | spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE); |
1103 | return ret; | 1150 | return ret; |
1104 | } | 1151 | } |
@@ -1112,8 +1159,8 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
1112 | size_t *retlen, const u_char *buf) | 1159 | size_t *retlen, const u_char *buf) |
1113 | { | 1160 | { |
1114 | struct spi_nor *nor = mtd_to_spi_nor(mtd); | 1161 | struct spi_nor *nor = mtd_to_spi_nor(mtd); |
1115 | u32 page_offset, page_size, i; | 1162 | size_t page_offset, page_remain, i; |
1116 | int ret; | 1163 | ssize_t ret; |
1117 | 1164 | ||
1118 | dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); | 1165 | dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); |
1119 | 1166 | ||
@@ -1121,35 +1168,37 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
1121 | if (ret) | 1168 | if (ret) |
1122 | return ret; | 1169 | return ret; |
1123 | 1170 | ||
1124 | write_enable(nor); | 1171 | for (i = 0; i < len; ) { |
1125 | 1172 | ssize_t written; | |
1126 | page_offset = to & (nor->page_size - 1); | ||
1127 | 1173 | ||
1128 | /* do all the bytes fit onto one page? */ | 1174 | page_offset = (to + i) & (nor->page_size - 1); |
1129 | if (page_offset + len <= nor->page_size) { | 1175 | WARN_ONCE(page_offset, |
1130 | nor->write(nor, to, len, retlen, buf); | 1176 | "Writing at offset %zu into a NOR page. Writing partial pages may decrease reliability and increase wear of NOR flash.", |
1131 | } else { | 1177 | page_offset); |
1132 | /* the size of data remaining on the first page */ | 1178 | /* the size of data remaining on the first page */ |
1133 | page_size = nor->page_size - page_offset; | 1179 | page_remain = min_t(size_t, |
1134 | nor->write(nor, to, page_size, retlen, buf); | 1180 | nor->page_size - page_offset, len - i); |
1135 | |||
1136 | /* write everything in nor->page_size chunks */ | ||
1137 | for (i = page_size; i < len; i += page_size) { | ||
1138 | page_size = len - i; | ||
1139 | if (page_size > nor->page_size) | ||
1140 | page_size = nor->page_size; | ||
1141 | 1181 | ||
1142 | ret = spi_nor_wait_till_ready(nor); | 1182 | write_enable(nor); |
1143 | if (ret) | 1183 | ret = nor->write(nor, to + i, page_remain, buf + i); |
1144 | goto write_err; | 1184 | if (ret < 0) |
1145 | 1185 | goto write_err; | |
1146 | write_enable(nor); | 1186 | written = ret; |
1147 | 1187 | ||
1148 | nor->write(nor, to + i, page_size, retlen, buf + i); | 1188 | ret = spi_nor_wait_till_ready(nor); |
1189 | if (ret) | ||
1190 | goto write_err; | ||
1191 | *retlen += written; | ||
1192 | i += written; | ||
1193 | if (written != page_remain) { | ||
1194 | dev_err(nor->dev, | ||
1195 | "While writing %zu bytes written %zd bytes\n", | ||
1196 | page_remain, written); | ||
1197 | ret = -EIO; | ||
1198 | goto write_err; | ||
1149 | } | 1199 | } |
1150 | } | 1200 | } |
1151 | 1201 | ||
1152 | ret = spi_nor_wait_till_ready(nor); | ||
1153 | write_err: | 1202 | write_err: |
1154 | spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE); | 1203 | spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE); |
1155 | return ret; | 1204 | return ret; |
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c index daf82ba7aba0..41b13d1cdcc4 100644 --- a/drivers/mtd/ssfdc.c +++ b/drivers/mtd/ssfdc.c | |||
@@ -380,8 +380,7 @@ static int ssfdcr_readsect(struct mtd_blktrans_dev *dev, | |||
380 | " block_addr=%d\n", logic_sect_no, sectors_per_block, offset, | 380 | " block_addr=%d\n", logic_sect_no, sectors_per_block, offset, |
381 | block_address); | 381 | block_address); |
382 | 382 | ||
383 | if (block_address >= ssfdc->map_len) | 383 | BUG_ON(block_address >= ssfdc->map_len); |
384 | BUG(); | ||
385 | 384 | ||
386 | block_address = ssfdc->logic_block_map[block_address]; | 385 | block_address = ssfdc->logic_block_map[block_address]; |
387 | 386 | ||
diff --git a/drivers/mtd/tests/nandbiterrs.c b/drivers/mtd/tests/nandbiterrs.c index 09a4ccac53a2..f26dec896afa 100644 --- a/drivers/mtd/tests/nandbiterrs.c +++ b/drivers/mtd/tests/nandbiterrs.c | |||
@@ -290,7 +290,7 @@ static int overwrite_test(void) | |||
290 | 290 | ||
291 | while (opno < max_overwrite) { | 291 | while (opno < max_overwrite) { |
292 | 292 | ||
293 | err = rewrite_page(0); | 293 | err = write_page(0); |
294 | if (err) | 294 | if (err) |
295 | break; | 295 | break; |
296 | 296 | ||
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index fbe8e164a4ee..8dd6e01f45c0 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h | |||
@@ -783,6 +783,7 @@ static inline void nand_set_controller_data(struct nand_chip *chip, void *priv) | |||
783 | * NAND Flash Manufacturer ID Codes | 783 | * NAND Flash Manufacturer ID Codes |
784 | */ | 784 | */ |
785 | #define NAND_MFR_TOSHIBA 0x98 | 785 | #define NAND_MFR_TOSHIBA 0x98 |
786 | #define NAND_MFR_ESMT 0xc8 | ||
786 | #define NAND_MFR_SAMSUNG 0xec | 787 | #define NAND_MFR_SAMSUNG 0xec |
787 | #define NAND_MFR_FUJITSU 0x04 | 788 | #define NAND_MFR_FUJITSU 0x04 |
788 | #define NAND_MFR_NATIONAL 0x8f | 789 | #define NAND_MFR_NATIONAL 0x8f |
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 7f041bd88b82..c425c7b4c2a0 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h | |||
@@ -173,10 +173,10 @@ struct spi_nor { | |||
173 | int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); | 173 | int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); |
174 | int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); | 174 | int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); |
175 | 175 | ||
176 | int (*read)(struct spi_nor *nor, loff_t from, | 176 | ssize_t (*read)(struct spi_nor *nor, loff_t from, |
177 | size_t len, size_t *retlen, u_char *read_buf); | 177 | size_t len, u_char *read_buf); |
178 | void (*write)(struct spi_nor *nor, loff_t to, | 178 | ssize_t (*write)(struct spi_nor *nor, loff_t to, |
179 | size_t len, size_t *retlen, const u_char *write_buf); | 179 | size_t len, const u_char *write_buf); |
180 | int (*erase)(struct spi_nor *nor, loff_t offs); | 180 | int (*erase)(struct spi_nor *nor, loff_t offs); |
181 | 181 | ||
182 | int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); | 182 | int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); |