summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt5
-rw-r--r--Documentation/devicetree/bindings/mtd/cadence-quadspi.txt5
-rw-r--r--Documentation/devicetree/bindings/mtd/cypress,hyperflash.txt13
-rw-r--r--Documentation/devicetree/bindings/mtd/stm32-quadspi.txt43
-rw-r--r--Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt51
-rw-r--r--MAINTAINERS8
-rw-r--r--drivers/dma/mxs-dma.c25
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c143
-rw-r--r--drivers/mtd/hyperbus/Kconfig23
-rw-r--r--drivers/mtd/hyperbus/Makefile4
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c147
-rw-r--r--drivers/mtd/hyperbus/hyperbus-core.c153
-rw-r--r--drivers/mtd/mtdconcat.c37
-rw-r--r--drivers/mtd/mtdcore.c3
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c5
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c263
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c19
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/Makefile1
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c934
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c1709
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h64
-rw-r--r--drivers/mtd/nand/raw/mtk_ecc.c4
-rw-r--r--drivers/mtd/nand/raw/mtk_ecc.h2
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c88
-rw-r--r--drivers/mtd/nand/raw/nand_base.c80
-rw-r--r--drivers/mtd/nand/raw/nand_bch.c3
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c45
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c21
-rw-r--r--drivers/mtd/nand/spi/Makefile2
-rw-r--r--drivers/mtd/nand/spi/core.c5
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c79
-rw-r--r--drivers/mtd/nand/spi/paragon.c147
-rw-r--r--drivers/mtd/parsers/afs.c3
-rw-r--r--drivers/mtd/spi-nor/Kconfig7
-rw-r--r--drivers/mtd/spi-nor/Makefile1
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c21
-rw-r--r--drivers/mtd/spi-nor/intel-spi-pci.c1
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c37
-rw-r--r--drivers/mtd/spi-nor/stm32-quadspi.c707
-rw-r--r--include/linux/dma/mxs-dma.h24
-rw-r--r--include/linux/mtd/cfi.h7
-rw-r--r--include/linux/mtd/hyperbus.h84
-rw-r--r--include/linux/mtd/mtd.h6
-rw-r--r--include/linux/mtd/onenand_regs.h1
-rw-r--r--include/linux/mtd/rawnand.h36
-rw-r--r--include/linux/mtd/spinand.h35
-rw-r--r--include/uapi/mtd/mtd-abi.h10
49 files changed, 2624 insertions, 2490 deletions
diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
index 0b7c3738b66c..82156dc8f304 100644
--- a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
+++ b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt
@@ -28,6 +28,7 @@ Required properties:
28 brcm,brcmnand-v7.0 28 brcm,brcmnand-v7.0
29 brcm,brcmnand-v7.1 29 brcm,brcmnand-v7.1
30 brcm,brcmnand-v7.2 30 brcm,brcmnand-v7.2
31 brcm,brcmnand-v7.3
31 brcm,brcmnand 32 brcm,brcmnand
32- reg : the register start and length for NAND register region. 33- reg : the register start and length for NAND register region.
33 (optional) Flash DMA register range (if present) 34 (optional) Flash DMA register range (if present)
@@ -101,10 +102,10 @@ Required properties:
101 number (e.g., 0, 1, 2, etc.) 102 number (e.g., 0, 1, 2, etc.)
102- #address-cells : see partition.txt 103- #address-cells : see partition.txt
103- #size-cells : see partition.txt 104- #size-cells : see partition.txt
104- nand-ecc-strength : see nand-controller.yaml
105- nand-ecc-step-size : must be 512 or 1024. See nand-controller.yaml
106 105
107Optional properties: 106Optional properties:
107- nand-ecc-strength : see nand-controller.yaml
108- nand-ecc-step-size : must be 512 or 1024. See nand-controller.yaml
108- nand-on-flash-bbt : boolean, to enable the on-flash BBT for this 109- nand-on-flash-bbt : boolean, to enable the on-flash BBT for this
109 chip-select. See nand-controller.yaml 110 chip-select. See nand-controller.yaml
110- brcm,nand-oob-sector-size : integer, to denote the spare area sector size 111- brcm,nand-oob-sector-size : integer, to denote the spare area sector size
diff --git a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
index 4345c3a6f530..945be7d5b236 100644
--- a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
+++ b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt
@@ -35,6 +35,9 @@ custom properties:
35 (qspi_n_ss_out). 35 (qspi_n_ss_out).
36- cdns,tslch-ns : Delay in nanoseconds between setting qspi_n_ss_out low 36- cdns,tslch-ns : Delay in nanoseconds between setting qspi_n_ss_out low
37 and first bit transfer. 37 and first bit transfer.
38- resets : Must contain an entry for each entry in reset-names.
39 See ../reset/reset.txt for details.
40- reset-names : Must include either "qspi" and/or "qspi-ocp".
38 41
39Example: 42Example:
40 43
@@ -50,6 +53,8 @@ Example:
50 cdns,fifo-depth = <128>; 53 cdns,fifo-depth = <128>;
51 cdns,fifo-width = <4>; 54 cdns,fifo-width = <4>;
52 cdns,trigger-address = <0x00000000>; 55 cdns,trigger-address = <0x00000000>;
56 resets = <&rst QSPI_RESET>, <&rst QSPI_OCP_RESET>;
57 reset-names = "qspi", "qspi-ocp";
53 58
54 flash0: n25q00@0 { 59 flash0: n25q00@0 {
55 ... 60 ...
diff --git a/Documentation/devicetree/bindings/mtd/cypress,hyperflash.txt b/Documentation/devicetree/bindings/mtd/cypress,hyperflash.txt
new file mode 100644
index 000000000000..ad42f4db32f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/cypress,hyperflash.txt
@@ -0,0 +1,13 @@
1Bindings for HyperFlash NOR flash chips compliant with Cypress HyperBus
2specification and supports Cypress CFI specification 1.5 command set.
3
4Required properties:
5- compatible : "cypress,hyperflash", "cfi-flash" for HyperFlash NOR chips
6- reg : Address of flash's memory map
7
8Example:
9
10 flash@0 {
11 compatible = "cypress,hyperflash", "cfi-flash";
12 reg = <0x0 0x4000000>;
13 };
diff --git a/Documentation/devicetree/bindings/mtd/stm32-quadspi.txt b/Documentation/devicetree/bindings/mtd/stm32-quadspi.txt
deleted file mode 100644
index ddd18c135148..000000000000
--- a/Documentation/devicetree/bindings/mtd/stm32-quadspi.txt
+++ /dev/null
@@ -1,43 +0,0 @@
1* STMicroelectronics Quad Serial Peripheral Interface(QuadSPI)
2
3Required properties:
4- compatible: should be "st,stm32f469-qspi"
5- reg: the first contains the register location and length.
6 the second contains the memory mapping address and length
7- reg-names: should contain the reg names "qspi" "qspi_mm"
8- interrupts: should contain the interrupt for the device
9- clocks: the phandle of the clock needed by the QSPI controller
10- A pinctrl must be defined to set pins in mode of operation for QSPI transfer
11
12Optional properties:
13- resets: must contain the phandle to the reset controller.
14
15A spi flash must be a child of the nor_flash node and could have some
16properties. Also see jedec,spi-nor.txt.
17
18Required properties:
19- reg: chip-Select number (QSPI controller may connect 2 nor flashes)
20- spi-max-frequency: max frequency of spi bus
21
22Optional property:
23- spi-rx-bus-width: see ../spi/spi-bus.txt for the description
24
25Example:
26
27qspi: spi@a0001000 {
28 compatible = "st,stm32f469-qspi";
29 reg = <0xa0001000 0x1000>, <0x90000000 0x10000000>;
30 reg-names = "qspi", "qspi_mm";
31 interrupts = <91>;
32 resets = <&rcc STM32F4_AHB3_RESET(QSPI)>;
33 clocks = <&rcc 0 STM32F4_AHB3_CLOCK(QSPI)>;
34 pinctrl-names = "default";
35 pinctrl-0 = <&pinctrl_qspi0>;
36
37 flash@0 {
38 reg = <0>;
39 spi-rx-bus-width = <4>;
40 spi-max-frequency = <108000000>;
41 ...
42 };
43};
diff --git a/Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt b/Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt
new file mode 100644
index 000000000000..faa81c2e5da6
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt
@@ -0,0 +1,51 @@
1Bindings for HyperBus Memory Controller (HBMC) on TI's K3 family of SoCs
2
3Required properties:
4- compatible : "ti,am654-hbmc" for AM654 SoC
5- reg : Two entries:
6 First entry pointed to the register space of HBMC controller
7 Second entry pointing to the memory map region dedicated for
8 MMIO access to attached flash devices
9- ranges : Address translation from offset within CS to allocated MMIO
10 space in SoC
11
12Optional properties:
13- mux-controls : phandle to the multiplexer that controls selection of
14 HBMC vs OSPI inside Flash SubSystem (FSS). Default is OSPI,
15 if property is absent.
16 See Documentation/devicetree/bindings/mux/reg-mux.txt
17 for mmio-mux binding details
18
19Example:
20
21 system-controller@47000000 {
22 compatible = "syscon", "simple-mfd";
23 reg = <0x0 0x47000000 0x0 0x100>;
24 #address-cells = <2>;
25 #size-cells = <2>;
26 ranges;
27
28 hbmc_mux: multiplexer {
29 compatible = "mmio-mux";
30 #mux-control-cells = <1>;
31 mux-reg-masks = <0x4 0x2>; /* 0: reg 0x4, bit 1 */
32 };
33 };
34
35 hbmc: hyperbus@47034000 {
36 compatible = "ti,am654-hbmc";
37 reg = <0x0 0x47034000 0x0 0x100>,
38 <0x5 0x00000000 0x1 0x0000000>;
39 power-domains = <&k3_pds 55>;
40 #address-cells = <2>;
41 #size-cells = <1>;
42 ranges = <0x0 0x0 0x5 0x00000000 0x4000000>, /* CS0 - 64MB */
43 <0x1 0x0 0x5 0x04000000 0x4000000>; /* CS1 - 64MB */
44 mux-controls = <&hbmc_mux 0>;
45
46 /* Slave flash node */
47 flash@0,0 {
48 compatible = "cypress,hyperflash", "cfi-flash";
49 reg = <0x0 0x0 0x4000000>;
50 };
51 };
diff --git a/MAINTAINERS b/MAINTAINERS
index 32bb6280c219..211ea3a199bd 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7435,6 +7435,14 @@ F: include/asm-generic/mshyperv.h
7435F: tools/hv/ 7435F: tools/hv/
7436F: Documentation/ABI/stable/sysfs-bus-vmbus 7436F: Documentation/ABI/stable/sysfs-bus-vmbus
7437 7437
7438HYPERBUS SUPPORT
7439M: Vignesh Raghavendra <vigneshr@ti.com>
7440S: Supported
7441F: drivers/mtd/hyperbus/
7442F: include/linux/mtd/hyperbus.h
7443F: Documentation/devicetree/bindings/mtd/cypress,hyperflash.txt
7444F: Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt
7445
7438HYPERVISOR VIRTUAL CONSOLE DRIVER 7446HYPERVISOR VIRTUAL CONSOLE DRIVER
7439L: linuxppc-dev@lists.ozlabs.org 7447L: linuxppc-dev@lists.ozlabs.org
7440S: Odd Fixes 7448S: Odd Fixes
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 22cc7f68ef6e..20a9cb7cb6d3 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -24,6 +24,7 @@
24#include <linux/of_device.h> 24#include <linux/of_device.h>
25#include <linux/of_dma.h> 25#include <linux/of_dma.h>
26#include <linux/list.h> 26#include <linux/list.h>
27#include <linux/dma/mxs-dma.h>
27 28
28#include <asm/irq.h> 29#include <asm/irq.h>
29 30
@@ -77,6 +78,7 @@
77#define BM_CCW_COMMAND (3 << 0) 78#define BM_CCW_COMMAND (3 << 0)
78#define CCW_CHAIN (1 << 2) 79#define CCW_CHAIN (1 << 2)
79#define CCW_IRQ (1 << 3) 80#define CCW_IRQ (1 << 3)
81#define CCW_WAIT4RDY (1 << 5)
80#define CCW_DEC_SEM (1 << 6) 82#define CCW_DEC_SEM (1 << 6)
81#define CCW_WAIT4END (1 << 7) 83#define CCW_WAIT4END (1 << 7)
82#define CCW_HALT_ON_TERM (1 << 8) 84#define CCW_HALT_ON_TERM (1 << 8)
@@ -477,16 +479,16 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
477 * ...... 479 * ......
478 * ->device_prep_slave_sg(0); 480 * ->device_prep_slave_sg(0);
479 * ...... 481 * ......
480 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 482 * ->device_prep_slave_sg(DMA_CTRL_ACK);
481 * ...... 483 * ......
482 * [3] If there are more than two DMA commands in the DMA chain, the code 484 * [3] If there are more than two DMA commands in the DMA chain, the code
483 * should be: 485 * should be:
484 * ...... 486 * ......
485 * ->device_prep_slave_sg(0); // First 487 * ->device_prep_slave_sg(0); // First
486 * ...... 488 * ......
487 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]); 489 * ->device_prep_slave_sg(DMA_CTRL_ACK]);
488 * ...... 490 * ......
489 * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last 491 * ->device_prep_slave_sg(DMA_CTRL_ACK); // Last
490 * ...... 492 * ......
491 */ 493 */
492static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( 494static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
@@ -500,13 +502,12 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
500 struct scatterlist *sg; 502 struct scatterlist *sg;
501 u32 i, j; 503 u32 i, j;
502 u32 *pio; 504 u32 *pio;
503 bool append = flags & DMA_PREP_INTERRUPT; 505 int idx = 0;
504 int idx = append ? mxs_chan->desc_count : 0;
505 506
506 if (mxs_chan->status == DMA_IN_PROGRESS && !append) 507 if (mxs_chan->status == DMA_IN_PROGRESS)
507 return NULL; 508 idx = mxs_chan->desc_count;
508 509
509 if (sg_len + (append ? idx : 0) > NUM_CCW) { 510 if (sg_len + idx > NUM_CCW) {
510 dev_err(mxs_dma->dma_device.dev, 511 dev_err(mxs_dma->dma_device.dev,
511 "maximum number of sg exceeded: %d > %d\n", 512 "maximum number of sg exceeded: %d > %d\n",
512 sg_len, NUM_CCW); 513 sg_len, NUM_CCW);
@@ -520,7 +521,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
520 * If the sg is prepared with append flag set, the sg 521 * If the sg is prepared with append flag set, the sg
521 * will be appended to the last prepared sg. 522 * will be appended to the last prepared sg.
522 */ 523 */
523 if (append) { 524 if (idx) {
524 BUG_ON(idx < 1); 525 BUG_ON(idx < 1);
525 ccw = &mxs_chan->ccw[idx - 1]; 526 ccw = &mxs_chan->ccw[idx - 1];
526 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; 527 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
@@ -541,12 +542,14 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
541 ccw->bits = 0; 542 ccw->bits = 0;
542 ccw->bits |= CCW_IRQ; 543 ccw->bits |= CCW_IRQ;
543 ccw->bits |= CCW_DEC_SEM; 544 ccw->bits |= CCW_DEC_SEM;
544 if (flags & DMA_CTRL_ACK) 545 if (flags & MXS_DMA_CTRL_WAIT4END)
545 ccw->bits |= CCW_WAIT4END; 546 ccw->bits |= CCW_WAIT4END;
546 ccw->bits |= CCW_HALT_ON_TERM; 547 ccw->bits |= CCW_HALT_ON_TERM;
547 ccw->bits |= CCW_TERM_FLUSH; 548 ccw->bits |= CCW_TERM_FLUSH;
548 ccw->bits |= BF_CCW(sg_len, PIO_NUM); 549 ccw->bits |= BF_CCW(sg_len, PIO_NUM);
549 ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); 550 ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
551 if (flags & MXS_DMA_CTRL_WAIT4RDY)
552 ccw->bits |= CCW_WAIT4RDY;
550 } else { 553 } else {
551 for_each_sg(sgl, sg, sg_len, i) { 554 for_each_sg(sgl, sg, sg_len, i) {
552 if (sg_dma_len(sg) > MAX_XFER_BYTES) { 555 if (sg_dma_len(sg) > MAX_XFER_BYTES) {
@@ -573,7 +576,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
573 ccw->bits &= ~CCW_CHAIN; 576 ccw->bits &= ~CCW_CHAIN;
574 ccw->bits |= CCW_IRQ; 577 ccw->bits |= CCW_IRQ;
575 ccw->bits |= CCW_DEC_SEM; 578 ccw->bits |= CCW_DEC_SEM;
576 if (flags & DMA_CTRL_ACK) 579 if (flags & MXS_DMA_CTRL_WAIT4END)
577 ccw->bits |= CCW_WAIT4END; 580 ccw->bits |= CCW_WAIT4END;
578 } 581 }
579 } 582 }
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index fb31a7f649a3..80a6e2dcd085 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -274,4 +274,6 @@ source "drivers/mtd/spi-nor/Kconfig"
274 274
275source "drivers/mtd/ubi/Kconfig" 275source "drivers/mtd/ubi/Kconfig"
276 276
277source "drivers/mtd/hyperbus/Kconfig"
278
277endif # MTD 279endif # MTD
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 806287e80e84..62d649a959e2 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -34,3 +34,4 @@ obj-y += chips/ lpddr/ maps/ devices/ nand/ tests/
34 34
35obj-$(CONFIG_MTD_SPI_NOR) += spi-nor/ 35obj-$(CONFIG_MTD_SPI_NOR) += spi-nor/
36obj-$(CONFIG_MTD_UBI) += ubi/ 36obj-$(CONFIG_MTD_UBI) += ubi/
37obj-$(CONFIG_MTD_HYPERBUS) += hyperbus/
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index c8fa5906bdf9..f4da7bd552e9 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -49,6 +49,16 @@
49#define SST49LF008A 0x005a 49#define SST49LF008A 0x005a
50#define AT49BV6416 0x00d6 50#define AT49BV6416 0x00d6
51 51
52/*
53 * Status Register bit description. Used by flash devices that don't
54 * support DQ polling (e.g. HyperFlash)
55 */
56#define CFI_SR_DRB BIT(7)
57#define CFI_SR_ESB BIT(5)
58#define CFI_SR_PSB BIT(4)
59#define CFI_SR_WBASB BIT(3)
60#define CFI_SR_SLSB BIT(1)
61
52static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 62static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
53static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 63static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 64static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -97,6 +107,50 @@ static struct mtd_chip_driver cfi_amdstd_chipdrv = {
97 .module = THIS_MODULE 107 .module = THIS_MODULE
98}; 108};
99 109
110/*
111 * Use status register to poll for Erase/write completion when DQ is not
112 * supported. This is indicated by Bit[1:0] of SoftwareFeatures field in
113 * CFI Primary Vendor-Specific Extended Query table 1.5
114 */
115static int cfi_use_status_reg(struct cfi_private *cfi)
116{
117 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
118 u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
119
120 return extp->MinorVersion >= '5' &&
121 (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
122}
123
124static void cfi_check_err_status(struct map_info *map, struct flchip *chip,
125 unsigned long adr)
126{
127 struct cfi_private *cfi = map->fldrv_priv;
128 map_word status;
129
130 if (!cfi_use_status_reg(cfi))
131 return;
132
133 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
134 cfi->device_type, NULL);
135 status = map_read(map, adr);
136
137 if (map_word_bitsset(map, status, CMD(0x3a))) {
138 unsigned long chipstatus = MERGESTATUS(status);
139
140 if (chipstatus & CFI_SR_ESB)
141 pr_err("%s erase operation failed, status %lx\n",
142 map->name, chipstatus);
143 if (chipstatus & CFI_SR_PSB)
144 pr_err("%s program operation failed, status %lx\n",
145 map->name, chipstatus);
146 if (chipstatus & CFI_SR_WBASB)
147 pr_err("%s buffer program command aborted, status %lx\n",
148 map->name, chipstatus);
149 if (chipstatus & CFI_SR_SLSB)
150 pr_err("%s sector write protected, status %lx\n",
151 map->name, chipstatus);
152 }
153}
100 154
101/* #define DEBUG_CFI_FEATURES */ 155/* #define DEBUG_CFI_FEATURES */
102 156
@@ -742,10 +796,25 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
742 * correctly and is therefore not done (particularly with interleaved chips 796 * correctly and is therefore not done (particularly with interleaved chips
743 * as each chip must be checked independently of the others). 797 * as each chip must be checked independently of the others).
744 */ 798 */
745static int __xipram chip_ready(struct map_info *map, unsigned long addr) 799static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
800 unsigned long addr)
746{ 801{
802 struct cfi_private *cfi = map->fldrv_priv;
747 map_word d, t; 803 map_word d, t;
748 804
805 if (cfi_use_status_reg(cfi)) {
806 map_word ready = CMD(CFI_SR_DRB);
807 /*
808 * For chips that support status register, check device
809 * ready bit
810 */
811 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
812 cfi->device_type, NULL);
813 d = map_read(map, addr);
814
815 return map_word_andequal(map, d, ready, ready);
816 }
817
749 d = map_read(map, addr); 818 d = map_read(map, addr);
750 t = map_read(map, addr); 819 t = map_read(map, addr);
751 820
@@ -767,10 +836,30 @@ static int __xipram chip_ready(struct map_info *map, unsigned long addr)
767 * as each chip must be checked independently of the others). 836 * as each chip must be checked independently of the others).
768 * 837 *
769 */ 838 */
770static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected) 839static int __xipram chip_good(struct map_info *map, struct flchip *chip,
840 unsigned long addr, map_word expected)
771{ 841{
842 struct cfi_private *cfi = map->fldrv_priv;
772 map_word oldd, curd; 843 map_word oldd, curd;
773 844
845 if (cfi_use_status_reg(cfi)) {
846 map_word ready = CMD(CFI_SR_DRB);
847 map_word err = CMD(CFI_SR_PSB | CFI_SR_ESB);
848 /*
849 * For chips that support status register, check device
850 * ready bit and Erase/Program status bit to know if
851 * operation succeeded.
852 */
853 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
854 cfi->device_type, NULL);
855 curd = map_read(map, addr);
856
857 if (map_word_andequal(map, curd, ready, ready))
858 return !map_word_bitsset(map, curd, err);
859
860 return 0;
861 }
862
774 oldd = map_read(map, addr); 863 oldd = map_read(map, addr);
775 curd = map_read(map, addr); 864 curd = map_read(map, addr);
776 865
@@ -792,7 +881,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
792 881
793 case FL_STATUS: 882 case FL_STATUS:
794 for (;;) { 883 for (;;) {
795 if (chip_ready(map, adr)) 884 if (chip_ready(map, chip, adr))
796 break; 885 break;
797 886
798 if (time_after(jiffies, timeo)) { 887 if (time_after(jiffies, timeo)) {
@@ -830,7 +919,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
830 chip->state = FL_ERASE_SUSPENDING; 919 chip->state = FL_ERASE_SUSPENDING;
831 chip->erase_suspended = 1; 920 chip->erase_suspended = 1;
832 for (;;) { 921 for (;;) {
833 if (chip_ready(map, adr)) 922 if (chip_ready(map, chip, adr))
834 break; 923 break;
835 924
836 if (time_after(jiffies, timeo)) { 925 if (time_after(jiffies, timeo)) {
@@ -1362,7 +1451,7 @@ static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
1362 /* wait for chip to become ready */ 1451 /* wait for chip to become ready */
1363 timeo = jiffies + msecs_to_jiffies(2); 1452 timeo = jiffies + msecs_to_jiffies(2);
1364 for (;;) { 1453 for (;;) {
1365 if (chip_ready(map, adr)) 1454 if (chip_ready(map, chip, adr))
1366 break; 1455 break;
1367 1456
1368 if (time_after(jiffies, timeo)) { 1457 if (time_after(jiffies, timeo)) {
@@ -1628,22 +1717,24 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1628 continue; 1717 continue;
1629 } 1718 }
1630 1719
1631 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){ 1720 if (time_after(jiffies, timeo) &&
1721 !chip_ready(map, chip, adr)) {
1632 xip_enable(map, chip, adr); 1722 xip_enable(map, chip, adr);
1633 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); 1723 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1634 xip_disable(map, chip, adr); 1724 xip_disable(map, chip, adr);
1635 break; 1725 break;
1636 } 1726 }
1637 1727
1638 if (chip_ready(map, adr)) 1728 if (chip_ready(map, chip, adr))
1639 break; 1729 break;
1640 1730
1641 /* Latency issues. Drop the lock, wait a while and retry */ 1731 /* Latency issues. Drop the lock, wait a while and retry */
1642 UDELAY(map, chip, adr, 1); 1732 UDELAY(map, chip, adr, 1);
1643 } 1733 }
1644 /* Did we succeed? */ 1734 /* Did we succeed? */
1645 if (!chip_good(map, adr, datum)) { 1735 if (!chip_good(map, chip, adr, datum)) {
1646 /* reset on all failures. */ 1736 /* reset on all failures. */
1737 cfi_check_err_status(map, chip, adr);
1647 map_write(map, CMD(0xF0), chip->start); 1738 map_write(map, CMD(0xF0), chip->start);
1648 /* FIXME - should have reset delay before continuing */ 1739 /* FIXME - should have reset delay before continuing */
1649 1740
@@ -1881,10 +1972,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1881 * We check "time_after" and "!chip_good" before checking "chip_good" to avoid 1972 * We check "time_after" and "!chip_good" before checking "chip_good" to avoid
1882 * the failure due to scheduling. 1973 * the failure due to scheduling.
1883 */ 1974 */
1884 if (time_after(jiffies, timeo) && !chip_good(map, adr, datum)) 1975 if (time_after(jiffies, timeo) &&
1976 !chip_good(map, chip, adr, datum))
1885 break; 1977 break;
1886 1978
1887 if (chip_good(map, adr, datum)) { 1979 if (chip_good(map, chip, adr, datum)) {
1888 xip_enable(map, chip, adr); 1980 xip_enable(map, chip, adr);
1889 goto op_done; 1981 goto op_done;
1890 } 1982 }
@@ -1901,6 +1993,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1901 * See e.g. 1993 * See e.g.
1902 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf 1994 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
1903 */ 1995 */
1996 cfi_check_err_status(map, chip, adr);
1904 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, 1997 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1905 cfi->device_type, NULL); 1998 cfi->device_type, NULL);
1906 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, 1999 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
@@ -2018,7 +2111,7 @@ static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
2018 * If the driver thinks the chip is idle, and no toggle bits 2111 * If the driver thinks the chip is idle, and no toggle bits
2019 * are changing, then the chip is actually idle for sure. 2112 * are changing, then the chip is actually idle for sure.
2020 */ 2113 */
2021 if (chip->state == FL_READY && chip_ready(map, adr)) 2114 if (chip->state == FL_READY && chip_ready(map, chip, adr))
2022 return 0; 2115 return 0;
2023 2116
2024 /* 2117 /*
@@ -2035,7 +2128,7 @@ static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
2035 2128
2036 /* wait for the chip to become ready */ 2129 /* wait for the chip to become ready */
2037 for (i = 0; i < jiffies_to_usecs(timeo); i++) { 2130 for (i = 0; i < jiffies_to_usecs(timeo); i++) {
2038 if (chip_ready(map, adr)) 2131 if (chip_ready(map, chip, adr))
2039 return 0; 2132 return 0;
2040 2133
2041 udelay(1); 2134 udelay(1);
@@ -2099,14 +2192,15 @@ retry:
2099 map_write(map, datum, adr); 2192 map_write(map, datum, adr);
2100 2193
2101 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) { 2194 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
2102 if (chip_ready(map, adr)) 2195 if (chip_ready(map, chip, adr))
2103 break; 2196 break;
2104 2197
2105 udelay(1); 2198 udelay(1);
2106 } 2199 }
2107 2200
2108 if (!chip_good(map, adr, datum)) { 2201 if (!chip_good(map, chip, adr, datum)) {
2109 /* reset on all failures. */ 2202 /* reset on all failures. */
2203 cfi_check_err_status(map, chip, adr);
2110 map_write(map, CMD(0xF0), chip->start); 2204 map_write(map, CMD(0xF0), chip->start);
2111 /* FIXME - should have reset delay before continuing */ 2205 /* FIXME - should have reset delay before continuing */
2112 2206
@@ -2300,7 +2394,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2300 chip->erase_suspended = 0; 2394 chip->erase_suspended = 0;
2301 } 2395 }
2302 2396
2303 if (chip_good(map, adr, map_word_ff(map))) 2397 if (chip_good(map, chip, adr, map_word_ff(map)))
2304 break; 2398 break;
2305 2399
2306 if (time_after(jiffies, timeo)) { 2400 if (time_after(jiffies, timeo)) {
@@ -2316,6 +2410,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2316 /* Did we succeed? */ 2410 /* Did we succeed? */
2317 if (ret) { 2411 if (ret) {
2318 /* reset on all failures. */ 2412 /* reset on all failures. */
2413 cfi_check_err_status(map, chip, adr);
2319 map_write(map, CMD(0xF0), chip->start); 2414 map_write(map, CMD(0xF0), chip->start);
2320 /* FIXME - should have reset delay before continuing */ 2415 /* FIXME - should have reset delay before continuing */
2321 2416
@@ -2396,7 +2491,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
2396 chip->erase_suspended = 0; 2491 chip->erase_suspended = 0;
2397 } 2492 }
2398 2493
2399 if (chip_good(map, adr, map_word_ff(map))) 2494 if (chip_good(map, chip, adr, map_word_ff(map)))
2400 break; 2495 break;
2401 2496
2402 if (time_after(jiffies, timeo)) { 2497 if (time_after(jiffies, timeo)) {
@@ -2412,6 +2507,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
2412 /* Did we succeed? */ 2507 /* Did we succeed? */
2413 if (ret) { 2508 if (ret) {
2414 /* reset on all failures. */ 2509 /* reset on all failures. */
2510 cfi_check_err_status(map, chip, adr);
2415 map_write(map, CMD(0xF0), chip->start); 2511 map_write(map, CMD(0xF0), chip->start);
2416 /* FIXME - should have reset delay before continuing */ 2512 /* FIXME - should have reset delay before continuing */
2417 2513
@@ -2533,8 +2629,6 @@ struct ppb_lock {
2533 int locked; 2629 int locked;
2534}; 2630};
2535 2631
2536#define MAX_SECTORS 512
2537
2538#define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1) 2632#define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1)
2539#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2) 2633#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2)
2540#define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3) 2634#define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3)
@@ -2589,7 +2683,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2589 */ 2683 */
2590 timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */ 2684 timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */
2591 for (;;) { 2685 for (;;) {
2592 if (chip_ready(map, adr)) 2686 if (chip_ready(map, chip, adr))
2593 break; 2687 break;
2594 2688
2595 if (time_after(jiffies, timeo)) { 2689 if (time_after(jiffies, timeo)) {
@@ -2633,6 +2727,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2633 int i; 2727 int i;
2634 int sectors; 2728 int sectors;
2635 int ret; 2729 int ret;
2730 int max_sectors;
2636 2731
2637 /* 2732 /*
2638 * PPB unlocking always unlocks all sectors of the flash chip. 2733 * PPB unlocking always unlocks all sectors of the flash chip.
@@ -2640,7 +2735,11 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2640 * first check the locking status of all sectors and save 2735 * first check the locking status of all sectors and save
2641 * it for future use. 2736 * it for future use.
2642 */ 2737 */
2643 sect = kcalloc(MAX_SECTORS, sizeof(struct ppb_lock), GFP_KERNEL); 2738 max_sectors = 0;
2739 for (i = 0; i < mtd->numeraseregions; i++)
2740 max_sectors += regions[i].numblocks;
2741
2742 sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL);
2644 if (!sect) 2743 if (!sect)
2645 return -ENOMEM; 2744 return -ENOMEM;
2646 2745
@@ -2689,9 +2788,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2689 } 2788 }
2690 2789
2691 sectors++; 2790 sectors++;
2692 if (sectors >= MAX_SECTORS) { 2791 if (sectors >= max_sectors) {
2693 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n", 2792 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2694 MAX_SECTORS); 2793 max_sectors);
2695 kfree(sect); 2794 kfree(sect);
2696 return -EINVAL; 2795 return -EINVAL;
2697 } 2796 }
diff --git a/drivers/mtd/hyperbus/Kconfig b/drivers/mtd/hyperbus/Kconfig
new file mode 100644
index 000000000000..cff6bbd226f5
--- /dev/null
+++ b/drivers/mtd/hyperbus/Kconfig
@@ -0,0 +1,23 @@
1menuconfig MTD_HYPERBUS
2 tristate "HyperBus support"
3 select MTD_CFI
4 select MTD_MAP_BANK_WIDTH_2
5 select MTD_CFI_AMDSTD
6 select MTD_COMPLEX_MAPPINGS
7 help
8 This is the framework for the HyperBus which can be used by
9 the HyperBus Controller driver to communicate with
10 HyperFlash. See Cypress HyperBus specification for more
11 details
12
13if MTD_HYPERBUS
14
15config HBMC_AM654
16 tristate "HyperBus controller driver for AM65x SoC"
17 select MULTIPLEXER
18 select MUX_MMIO
19 help
20 This is the driver for HyperBus controller on TI's AM65x and
21 other SoCs
22
23endif # MTD_HYPERBUS
diff --git a/drivers/mtd/hyperbus/Makefile b/drivers/mtd/hyperbus/Makefile
new file mode 100644
index 000000000000..8a936e066f48
--- /dev/null
+++ b/drivers/mtd/hyperbus/Makefile
@@ -0,0 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
2
3obj-$(CONFIG_MTD_HYPERBUS) += hyperbus-core.o
4obj-$(CONFIG_HBMC_AM654) += hbmc-am654.o
diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c
new file mode 100644
index 000000000000..08d543b124cd
--- /dev/null
+++ b/drivers/mtd/hyperbus/hbmc-am654.c
@@ -0,0 +1,147 @@
1// SPDX-License-Identifier: GPL-2.0
2//
3// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
4// Author: Vignesh Raghavendra <vigneshr@ti.com>
5
6#include <linux/err.h>
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/mtd/cfi.h>
10#include <linux/mtd/hyperbus.h>
11#include <linux/mtd/mtd.h>
12#include <linux/mux/consumer.h>
13#include <linux/of.h>
14#include <linux/platform_device.h>
15#include <linux/pm_runtime.h>
16#include <linux/types.h>
17
18#define AM654_HBMC_CALIB_COUNT 25
19
20struct am654_hbmc_priv {
21 struct hyperbus_ctlr ctlr;
22 struct hyperbus_device hbdev;
23 struct mux_control *mux_ctrl;
24};
25
26static int am654_hbmc_calibrate(struct hyperbus_device *hbdev)
27{
28 struct map_info *map = &hbdev->map;
29 struct cfi_private cfi;
30 int count = AM654_HBMC_CALIB_COUNT;
31 int pass_count = 0;
32 int ret;
33
34 cfi.interleave = 1;
35 cfi.device_type = CFI_DEVICETYPE_X16;
36 cfi_send_gen_cmd(0xF0, 0, 0, map, &cfi, cfi.device_type, NULL);
37 cfi_send_gen_cmd(0x98, 0x55, 0, map, &cfi, cfi.device_type, NULL);
38
39 while (count--) {
40 ret = cfi_qry_present(map, 0, &cfi);
41 if (ret)
42 pass_count++;
43 else
44 pass_count = 0;
45 if (pass_count == 5)
46 break;
47 }
48
49 cfi_qry_mode_off(0, map, &cfi);
50
51 return ret;
52}
53
54static const struct hyperbus_ops am654_hbmc_ops = {
55 .calibrate = am654_hbmc_calibrate,
56};
57
58static int am654_hbmc_probe(struct platform_device *pdev)
59{
60 struct device *dev = &pdev->dev;
61 struct am654_hbmc_priv *priv;
62 int ret;
63
64 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
65 if (!priv)
66 return -ENOMEM;
67
68 platform_set_drvdata(pdev, priv);
69
70 if (of_property_read_bool(dev->of_node, "mux-controls")) {
71 struct mux_control *control = devm_mux_control_get(dev, NULL);
72
73 if (IS_ERR(control))
74 return PTR_ERR(control);
75
76 ret = mux_control_select(control, 1);
77 if (ret) {
78 dev_err(dev, "Failed to select HBMC mux\n");
79 return ret;
80 }
81 priv->mux_ctrl = control;
82 }
83
84 pm_runtime_enable(dev);
85 ret = pm_runtime_get_sync(dev);
86 if (ret < 0) {
87 pm_runtime_put_noidle(dev);
88 goto disable_pm;
89 }
90
91 priv->ctlr.dev = dev;
92 priv->ctlr.ops = &am654_hbmc_ops;
93 priv->hbdev.ctlr = &priv->ctlr;
94 priv->hbdev.np = of_get_next_child(dev->of_node, NULL);
95 ret = hyperbus_register_device(&priv->hbdev);
96 if (ret) {
97 dev_err(dev, "failed to register controller\n");
98 pm_runtime_put_sync(&pdev->dev);
99 goto disable_pm;
100 }
101
102 return 0;
103disable_pm:
104 pm_runtime_disable(dev);
105 if (priv->mux_ctrl)
106 mux_control_deselect(priv->mux_ctrl);
107 return ret;
108}
109
110static int am654_hbmc_remove(struct platform_device *pdev)
111{
112 struct am654_hbmc_priv *priv = platform_get_drvdata(pdev);
113 int ret;
114
115 ret = hyperbus_unregister_device(&priv->hbdev);
116 if (priv->mux_ctrl)
117 mux_control_deselect(priv->mux_ctrl);
118 pm_runtime_put_sync(&pdev->dev);
119 pm_runtime_disable(&pdev->dev);
120
121 return ret;
122}
123
124static const struct of_device_id am654_hbmc_dt_ids[] = {
125 {
126 .compatible = "ti,am654-hbmc",
127 },
128 { /* end of table */ }
129};
130
131MODULE_DEVICE_TABLE(of, am654_hbmc_dt_ids);
132
133static struct platform_driver am654_hbmc_platform_driver = {
134 .probe = am654_hbmc_probe,
135 .remove = am654_hbmc_remove,
136 .driver = {
137 .name = "hbmc-am654",
138 .of_match_table = am654_hbmc_dt_ids,
139 },
140};
141
142module_platform_driver(am654_hbmc_platform_driver);
143
144MODULE_DESCRIPTION("HBMC driver for AM654 SoC");
145MODULE_LICENSE("GPL v2");
146MODULE_ALIAS("platform:hbmc-am654");
147MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
diff --git a/drivers/mtd/hyperbus/hyperbus-core.c b/drivers/mtd/hyperbus/hyperbus-core.c
new file mode 100644
index 000000000000..6af9ea34117d
--- /dev/null
+++ b/drivers/mtd/hyperbus/hyperbus-core.c
@@ -0,0 +1,153 @@
1// SPDX-License-Identifier: GPL-2.0
2//
3// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
4// Author: Vignesh Raghavendra <vigneshr@ti.com>
5
6#include <linux/err.h>
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/mtd/hyperbus.h>
10#include <linux/mtd/map.h>
11#include <linux/mtd/mtd.h>
12#include <linux/of.h>
13#include <linux/of_address.h>
14#include <linux/types.h>
15
16static struct hyperbus_device *map_to_hbdev(struct map_info *map)
17{
18 return container_of(map, struct hyperbus_device, map);
19}
20
21static map_word hyperbus_read16(struct map_info *map, unsigned long addr)
22{
23 struct hyperbus_device *hbdev = map_to_hbdev(map);
24 struct hyperbus_ctlr *ctlr = hbdev->ctlr;
25 map_word read_data;
26
27 read_data.x[0] = ctlr->ops->read16(hbdev, addr);
28
29 return read_data;
30}
31
32static void hyperbus_write16(struct map_info *map, map_word d,
33 unsigned long addr)
34{
35 struct hyperbus_device *hbdev = map_to_hbdev(map);
36 struct hyperbus_ctlr *ctlr = hbdev->ctlr;
37
38 ctlr->ops->write16(hbdev, addr, d.x[0]);
39}
40
41static void hyperbus_copy_from(struct map_info *map, void *to,
42 unsigned long from, ssize_t len)
43{
44 struct hyperbus_device *hbdev = map_to_hbdev(map);
45 struct hyperbus_ctlr *ctlr = hbdev->ctlr;
46
47 ctlr->ops->copy_from(hbdev, to, from, len);
48}
49
50static void hyperbus_copy_to(struct map_info *map, unsigned long to,
51 const void *from, ssize_t len)
52{
53 struct hyperbus_device *hbdev = map_to_hbdev(map);
54 struct hyperbus_ctlr *ctlr = hbdev->ctlr;
55
56 ctlr->ops->copy_to(hbdev, to, from, len);
57}
58
59int hyperbus_register_device(struct hyperbus_device *hbdev)
60{
61 const struct hyperbus_ops *ops;
62 struct hyperbus_ctlr *ctlr;
63 struct device_node *np;
64 struct map_info *map;
65 struct resource res;
66 struct device *dev;
67 int ret;
68
69 if (!hbdev || !hbdev->np || !hbdev->ctlr || !hbdev->ctlr->dev) {
70 pr_err("hyperbus: please fill all the necessary fields!\n");
71 return -EINVAL;
72 }
73
74 np = hbdev->np;
75 ctlr = hbdev->ctlr;
76 if (!of_device_is_compatible(np, "cypress,hyperflash"))
77 return -ENODEV;
78
79 hbdev->memtype = HYPERFLASH;
80
81 ret = of_address_to_resource(np, 0, &res);
82 if (ret)
83 return ret;
84
85 dev = ctlr->dev;
86 map = &hbdev->map;
87 map->size = resource_size(&res);
88 map->virt = devm_ioremap_resource(dev, &res);
89 if (IS_ERR(map->virt))
90 return PTR_ERR(map->virt);
91
92 map->name = dev_name(dev);
93 map->bankwidth = 2;
94 map->device_node = np;
95
96 simple_map_init(map);
97 ops = ctlr->ops;
98 if (ops) {
99 if (ops->read16)
100 map->read = hyperbus_read16;
101 if (ops->write16)
102 map->write = hyperbus_write16;
103 if (ops->copy_to)
104 map->copy_to = hyperbus_copy_to;
105 if (ops->copy_from)
106 map->copy_from = hyperbus_copy_from;
107
108 if (ops->calibrate && !ctlr->calibrated) {
109 ret = ops->calibrate(hbdev);
110 if (!ret) {
111 dev_err(dev, "Calibration failed\n");
112 return -ENODEV;
113 }
114 ctlr->calibrated = true;
115 }
116 }
117
118 hbdev->mtd = do_map_probe("cfi_probe", map);
119 if (!hbdev->mtd) {
120 dev_err(dev, "probing of hyperbus device failed\n");
121 return -ENODEV;
122 }
123
124 hbdev->mtd->dev.parent = dev;
125 mtd_set_of_node(hbdev->mtd, np);
126
127 ret = mtd_device_register(hbdev->mtd, NULL, 0);
128 if (ret) {
129 dev_err(dev, "failed to register mtd device\n");
130 map_destroy(hbdev->mtd);
131 return ret;
132 }
133
134 return 0;
135}
136EXPORT_SYMBOL_GPL(hyperbus_register_device);
137
138int hyperbus_unregister_device(struct hyperbus_device *hbdev)
139{
140 int ret = 0;
141
142 if (hbdev && hbdev->mtd) {
143 ret = mtd_device_unregister(hbdev->mtd);
144 map_destroy(hbdev->mtd);
145 }
146
147 return ret;
148}
149EXPORT_SYMBOL_GPL(hyperbus_unregister_device);
150
151MODULE_DESCRIPTION("HyperBus Framework");
152MODULE_LICENSE("GPL v2");
153MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 7324ff832b41..170a7221b35f 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -437,7 +437,8 @@ static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
437 return err; 437 return err;
438} 438}
439 439
440static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 440static int concat_xxlock(struct mtd_info *mtd, loff_t ofs, uint64_t len,
441 bool is_lock)
441{ 442{
442 struct mtd_concat *concat = CONCAT(mtd); 443 struct mtd_concat *concat = CONCAT(mtd);
443 int i, err = -EINVAL; 444 int i, err = -EINVAL;
@@ -456,7 +457,10 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
456 else 457 else
457 size = len; 458 size = len;
458 459
459 err = mtd_lock(subdev, ofs, size); 460 if (is_lock)
461 err = mtd_lock(subdev, ofs, size);
462 else
463 err = mtd_unlock(subdev, ofs, size);
460 if (err) 464 if (err)
461 break; 465 break;
462 466
@@ -471,35 +475,33 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
471 return err; 475 return err;
472} 476}
473 477
478static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
479{
480 return concat_xxlock(mtd, ofs, len, true);
481}
482
474static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) 483static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
475{ 484{
485 return concat_xxlock(mtd, ofs, len, false);
486}
487
488static int concat_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
489{
476 struct mtd_concat *concat = CONCAT(mtd); 490 struct mtd_concat *concat = CONCAT(mtd);
477 int i, err = 0; 491 int i, err = -EINVAL;
478 492
479 for (i = 0; i < concat->num_subdev; i++) { 493 for (i = 0; i < concat->num_subdev; i++) {
480 struct mtd_info *subdev = concat->subdev[i]; 494 struct mtd_info *subdev = concat->subdev[i];
481 uint64_t size;
482 495
483 if (ofs >= subdev->size) { 496 if (ofs >= subdev->size) {
484 size = 0;
485 ofs -= subdev->size; 497 ofs -= subdev->size;
486 continue; 498 continue;
487 } 499 }
488 if (ofs + len > subdev->size)
489 size = subdev->size - ofs;
490 else
491 size = len;
492
493 err = mtd_unlock(subdev, ofs, size);
494 if (err)
495 break;
496 500
497 len -= size; 501 if (ofs + len > subdev->size)
498 if (len == 0)
499 break; 502 break;
500 503
501 err = -EINVAL; 504 return mtd_is_locked(subdev, ofs, len);
502 ofs = 0;
503 } 505 }
504 506
505 return err; 507 return err;
@@ -704,6 +706,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
704 concat->mtd._sync = concat_sync; 706 concat->mtd._sync = concat_sync;
705 concat->mtd._lock = concat_lock; 707 concat->mtd._lock = concat_lock;
706 concat->mtd._unlock = concat_unlock; 708 concat->mtd._unlock = concat_unlock;
709 concat->mtd._is_locked = concat_is_locked;
707 concat->mtd._suspend = concat_suspend; 710 concat->mtd._suspend = concat_suspend;
708 concat->mtd._resume = concat_resume; 711 concat->mtd._resume = concat_resume;
709 712
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 453242d6cf56..408615f29e57 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -1124,6 +1124,9 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1124 return -EROFS; 1124 return -EROFS;
1125 if (!len) 1125 if (!len)
1126 return 0; 1126 return 0;
1127 if (!mtd->oops_panic_write)
1128 mtd->oops_panic_write = true;
1129
1127 return mtd->_panic_write(mtd, to, len, retlen, buf); 1130 return mtd->_panic_write(mtd, to, len, retlen, buf);
1128} 1131}
1129EXPORT_SYMBOL_GPL(mtd_panic_write); 1132EXPORT_SYMBOL_GPL(mtd_panic_write);
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index d759c02d9cb2..a1f8fe1abb10 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -3257,6 +3257,8 @@ static void onenand_check_features(struct mtd_info *mtd)
3257 3257
3258 /* Lock scheme */ 3258 /* Lock scheme */
3259 switch (density) { 3259 switch (density) {
3260 case ONENAND_DEVICE_DENSITY_8Gb:
3261 this->options |= ONENAND_HAS_NOP_1;
3260 case ONENAND_DEVICE_DENSITY_4Gb: 3262 case ONENAND_DEVICE_DENSITY_4Gb:
3261 if (ONENAND_IS_DDP(this)) 3263 if (ONENAND_IS_DDP(this))
3262 this->options |= ONENAND_HAS_2PLANE; 3264 this->options |= ONENAND_HAS_2PLANE;
@@ -3277,12 +3279,15 @@ static void onenand_check_features(struct mtd_info *mtd)
3277 if ((this->version_id & 0xf) == 0xe) 3279 if ((this->version_id & 0xf) == 0xe)
3278 this->options |= ONENAND_HAS_NOP_1; 3280 this->options |= ONENAND_HAS_NOP_1;
3279 } 3281 }
3282 this->options |= ONENAND_HAS_UNLOCK_ALL;
3283 break;
3280 3284
3281 case ONENAND_DEVICE_DENSITY_2Gb: 3285 case ONENAND_DEVICE_DENSITY_2Gb:
3282 /* 2Gb DDP does not have 2 plane */ 3286 /* 2Gb DDP does not have 2 plane */
3283 if (!ONENAND_IS_DDP(this)) 3287 if (!ONENAND_IS_DDP(this))
3284 this->options |= ONENAND_HAS_2PLANE; 3288 this->options |= ONENAND_HAS_2PLANE;
3285 this->options |= ONENAND_HAS_UNLOCK_ALL; 3289 this->options |= ONENAND_HAS_UNLOCK_ALL;
3290 break;
3286 3291
3287 case ONENAND_DEVICE_DENSITY_1Gb: 3292 case ONENAND_DEVICE_DENSITY_1Gb:
3288 /* A-Die has all block unlock */ 3293 /* A-Die has all block unlock */
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 873527753f52..33310b8a6eb8 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -84,6 +84,12 @@ struct brcm_nand_dma_desc {
84#define FLASH_DMA_ECC_ERROR (1 << 8) 84#define FLASH_DMA_ECC_ERROR (1 << 8)
85#define FLASH_DMA_CORR_ERROR (1 << 9) 85#define FLASH_DMA_CORR_ERROR (1 << 9)
86 86
87/* Bitfields for DMA_MODE */
88#define FLASH_DMA_MODE_STOP_ON_ERROR BIT(1) /* stop in Uncorr ECC error */
89#define FLASH_DMA_MODE_MODE BIT(0) /* link list */
90#define FLASH_DMA_MODE_MASK (FLASH_DMA_MODE_STOP_ON_ERROR | \
91 FLASH_DMA_MODE_MODE)
92
87/* 512B flash cache in the NAND controller HW */ 93/* 512B flash cache in the NAND controller HW */
88#define FC_SHIFT 9U 94#define FC_SHIFT 9U
89#define FC_BYTES 512U 95#define FC_BYTES 512U
@@ -96,6 +102,51 @@ struct brcm_nand_dma_desc {
96#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY) 102#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
97#define NAND_POLL_STATUS_TIMEOUT_MS 100 103#define NAND_POLL_STATUS_TIMEOUT_MS 100
98 104
105/* flash_dma registers */
106enum flash_dma_reg {
107 FLASH_DMA_REVISION = 0,
108 FLASH_DMA_FIRST_DESC,
109 FLASH_DMA_FIRST_DESC_EXT,
110 FLASH_DMA_CTRL,
111 FLASH_DMA_MODE,
112 FLASH_DMA_STATUS,
113 FLASH_DMA_INTERRUPT_DESC,
114 FLASH_DMA_INTERRUPT_DESC_EXT,
115 FLASH_DMA_ERROR_STATUS,
116 FLASH_DMA_CURRENT_DESC,
117 FLASH_DMA_CURRENT_DESC_EXT,
118};
119
120/* flash_dma registers v1*/
121static const u16 flash_dma_regs_v1[] = {
122 [FLASH_DMA_REVISION] = 0x00,
123 [FLASH_DMA_FIRST_DESC] = 0x04,
124 [FLASH_DMA_FIRST_DESC_EXT] = 0x08,
125 [FLASH_DMA_CTRL] = 0x0c,
126 [FLASH_DMA_MODE] = 0x10,
127 [FLASH_DMA_STATUS] = 0x14,
128 [FLASH_DMA_INTERRUPT_DESC] = 0x18,
129 [FLASH_DMA_INTERRUPT_DESC_EXT] = 0x1c,
130 [FLASH_DMA_ERROR_STATUS] = 0x20,
131 [FLASH_DMA_CURRENT_DESC] = 0x24,
132 [FLASH_DMA_CURRENT_DESC_EXT] = 0x28,
133};
134
135/* flash_dma registers v4 */
136static const u16 flash_dma_regs_v4[] = {
137 [FLASH_DMA_REVISION] = 0x00,
138 [FLASH_DMA_FIRST_DESC] = 0x08,
139 [FLASH_DMA_FIRST_DESC_EXT] = 0x0c,
140 [FLASH_DMA_CTRL] = 0x10,
141 [FLASH_DMA_MODE] = 0x14,
142 [FLASH_DMA_STATUS] = 0x18,
143 [FLASH_DMA_INTERRUPT_DESC] = 0x20,
144 [FLASH_DMA_INTERRUPT_DESC_EXT] = 0x24,
145 [FLASH_DMA_ERROR_STATUS] = 0x28,
146 [FLASH_DMA_CURRENT_DESC] = 0x30,
147 [FLASH_DMA_CURRENT_DESC_EXT] = 0x34,
148};
149
99/* Controller feature flags */ 150/* Controller feature flags */
100enum { 151enum {
101 BRCMNAND_HAS_1K_SECTORS = BIT(0), 152 BRCMNAND_HAS_1K_SECTORS = BIT(0),
@@ -128,6 +179,8 @@ struct brcmnand_controller {
128 /* List of NAND hosts (one for each chip-select) */ 179 /* List of NAND hosts (one for each chip-select) */
129 struct list_head host_list; 180 struct list_head host_list;
130 181
182 /* flash_dma reg */
183 const u16 *flash_dma_offsets;
131 struct brcm_nand_dma_desc *dma_desc; 184 struct brcm_nand_dma_desc *dma_desc;
132 dma_addr_t dma_pa; 185 dma_addr_t dma_pa;
133 186
@@ -151,6 +204,7 @@ struct brcmnand_controller {
151 u32 nand_cs_nand_xor; 204 u32 nand_cs_nand_xor;
152 u32 corr_stat_threshold; 205 u32 corr_stat_threshold;
153 u32 flash_dma_mode; 206 u32 flash_dma_mode;
207 bool pio_poll_mode;
154}; 208};
155 209
156struct brcmnand_cfg { 210struct brcmnand_cfg {
@@ -462,7 +516,7 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
462 /* Register offsets */ 516 /* Register offsets */
463 if (ctrl->nand_version >= 0x0702) 517 if (ctrl->nand_version >= 0x0702)
464 ctrl->reg_offsets = brcmnand_regs_v72; 518 ctrl->reg_offsets = brcmnand_regs_v72;
465 else if (ctrl->nand_version >= 0x0701) 519 else if (ctrl->nand_version == 0x0701)
466 ctrl->reg_offsets = brcmnand_regs_v71; 520 ctrl->reg_offsets = brcmnand_regs_v71;
467 else if (ctrl->nand_version >= 0x0600) 521 else if (ctrl->nand_version >= 0x0600)
468 ctrl->reg_offsets = brcmnand_regs_v60; 522 ctrl->reg_offsets = brcmnand_regs_v60;
@@ -507,7 +561,7 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
507 } 561 }
508 562
509 /* Maximum spare area sector size (per 512B) */ 563 /* Maximum spare area sector size (per 512B) */
510 if (ctrl->nand_version >= 0x0702) 564 if (ctrl->nand_version == 0x0702)
511 ctrl->max_oob = 128; 565 ctrl->max_oob = 128;
512 else if (ctrl->nand_version >= 0x0600) 566 else if (ctrl->nand_version >= 0x0600)
513 ctrl->max_oob = 64; 567 ctrl->max_oob = 64;
@@ -538,6 +592,15 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
538 return 0; 592 return 0;
539} 593}
540 594
595static void brcmnand_flash_dma_revision_init(struct brcmnand_controller *ctrl)
596{
597 /* flash_dma register offsets */
598 if (ctrl->nand_version >= 0x0703)
599 ctrl->flash_dma_offsets = flash_dma_regs_v4;
600 else
601 ctrl->flash_dma_offsets = flash_dma_regs_v1;
602}
603
541static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl, 604static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl,
542 enum brcmnand_reg reg) 605 enum brcmnand_reg reg)
543{ 606{
@@ -580,6 +643,54 @@ static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
580 __raw_writel(val, ctrl->nand_fc + word * 4); 643 __raw_writel(val, ctrl->nand_fc + word * 4);
581} 644}
582 645
646static void brcmnand_clear_ecc_addr(struct brcmnand_controller *ctrl)
647{
648
649 /* Clear error addresses */
650 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
651 brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
652 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
653 brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
654}
655
656static u64 brcmnand_get_uncorrecc_addr(struct brcmnand_controller *ctrl)
657{
658 u64 err_addr;
659
660 err_addr = brcmnand_read_reg(ctrl, BRCMNAND_UNCORR_ADDR);
661 err_addr |= ((u64)(brcmnand_read_reg(ctrl,
662 BRCMNAND_UNCORR_EXT_ADDR)
663 & 0xffff) << 32);
664
665 return err_addr;
666}
667
668static u64 brcmnand_get_correcc_addr(struct brcmnand_controller *ctrl)
669{
670 u64 err_addr;
671
672 err_addr = brcmnand_read_reg(ctrl, BRCMNAND_CORR_ADDR);
673 err_addr |= ((u64)(brcmnand_read_reg(ctrl,
674 BRCMNAND_CORR_EXT_ADDR)
675 & 0xffff) << 32);
676
677 return err_addr;
678}
679
680static void brcmnand_set_cmd_addr(struct mtd_info *mtd, u64 addr)
681{
682 struct nand_chip *chip = mtd_to_nand(mtd);
683 struct brcmnand_host *host = nand_get_controller_data(chip);
684 struct brcmnand_controller *ctrl = host->ctrl;
685
686 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
687 (host->cs << 16) | ((addr >> 32) & 0xffff));
688 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
689 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
690 lower_32_bits(addr));
691 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
692}
693
583static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs, 694static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
584 enum brcmnand_cs_reg reg) 695 enum brcmnand_cs_reg reg)
585{ 696{
@@ -612,7 +723,7 @@ static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
612 enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD; 723 enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
613 int cs = host->cs; 724 int cs = host->cs;
614 725
615 if (ctrl->nand_version >= 0x0702) 726 if (ctrl->nand_version == 0x0702)
616 bits = 7; 727 bits = 7;
617 else if (ctrl->nand_version >= 0x0600) 728 else if (ctrl->nand_version >= 0x0600)
618 bits = 6; 729 bits = 6;
@@ -666,7 +777,7 @@ enum {
666 777
667static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl) 778static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
668{ 779{
669 if (ctrl->nand_version >= 0x0702) 780 if (ctrl->nand_version == 0x0702)
670 return GENMASK(7, 0); 781 return GENMASK(7, 0);
671 else if (ctrl->nand_version >= 0x0600) 782 else if (ctrl->nand_version >= 0x0600)
672 return GENMASK(6, 0); 783 return GENMASK(6, 0);
@@ -796,39 +907,44 @@ static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
796 * Flash DMA 907 * Flash DMA
797 ***********************************************************************/ 908 ***********************************************************************/
798 909
799enum flash_dma_reg {
800 FLASH_DMA_REVISION = 0x00,
801 FLASH_DMA_FIRST_DESC = 0x04,
802 FLASH_DMA_FIRST_DESC_EXT = 0x08,
803 FLASH_DMA_CTRL = 0x0c,
804 FLASH_DMA_MODE = 0x10,
805 FLASH_DMA_STATUS = 0x14,
806 FLASH_DMA_INTERRUPT_DESC = 0x18,
807 FLASH_DMA_INTERRUPT_DESC_EXT = 0x1c,
808 FLASH_DMA_ERROR_STATUS = 0x20,
809 FLASH_DMA_CURRENT_DESC = 0x24,
810 FLASH_DMA_CURRENT_DESC_EXT = 0x28,
811};
812
813static inline bool has_flash_dma(struct brcmnand_controller *ctrl) 910static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
814{ 911{
815 return ctrl->flash_dma_base; 912 return ctrl->flash_dma_base;
816} 913}
817 914
915static inline void disable_ctrl_irqs(struct brcmnand_controller *ctrl)
916{
917 if (ctrl->pio_poll_mode)
918 return;
919
920 if (has_flash_dma(ctrl)) {
921 ctrl->flash_dma_base = 0;
922 disable_irq(ctrl->dma_irq);
923 }
924
925 disable_irq(ctrl->irq);
926 ctrl->pio_poll_mode = true;
927}
928
818static inline bool flash_dma_buf_ok(const void *buf) 929static inline bool flash_dma_buf_ok(const void *buf)
819{ 930{
820 return buf && !is_vmalloc_addr(buf) && 931 return buf && !is_vmalloc_addr(buf) &&
821 likely(IS_ALIGNED((uintptr_t)buf, 4)); 932 likely(IS_ALIGNED((uintptr_t)buf, 4));
822} 933}
823 934
824static inline void flash_dma_writel(struct brcmnand_controller *ctrl, u8 offs, 935static inline void flash_dma_writel(struct brcmnand_controller *ctrl,
825 u32 val) 936 enum flash_dma_reg dma_reg, u32 val)
826{ 937{
938 u16 offs = ctrl->flash_dma_offsets[dma_reg];
939
827 brcmnand_writel(val, ctrl->flash_dma_base + offs); 940 brcmnand_writel(val, ctrl->flash_dma_base + offs);
828} 941}
829 942
830static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl, u8 offs) 943static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl,
944 enum flash_dma_reg dma_reg)
831{ 945{
946 u16 offs = ctrl->flash_dma_offsets[dma_reg];
947
832 return brcmnand_readl(ctrl->flash_dma_base + offs); 948 return brcmnand_readl(ctrl->flash_dma_base + offs);
833} 949}
834 950
@@ -931,7 +1047,7 @@ static int brcmnand_bch_ooblayout_ecc(struct mtd_info *mtd, int section,
931 if (section >= sectors) 1047 if (section >= sectors)
932 return -ERANGE; 1048 return -ERANGE;
933 1049
934 oobregion->offset = (section * (sas + 1)) - chip->ecc.bytes; 1050 oobregion->offset = ((section + 1) * sas) - chip->ecc.bytes;
935 oobregion->length = chip->ecc.bytes; 1051 oobregion->length = chip->ecc.bytes;
936 1052
937 return 0; 1053 return 0;
@@ -1205,9 +1321,12 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
1205{ 1321{
1206 struct brcmnand_controller *ctrl = host->ctrl; 1322 struct brcmnand_controller *ctrl = host->ctrl;
1207 int ret; 1323 int ret;
1324 u64 cmd_addr;
1325
1326 cmd_addr = brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
1327
1328 dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);
1208 1329
1209 dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd,
1210 brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS));
1211 BUG_ON(ctrl->cmd_pending != 0); 1330 BUG_ON(ctrl->cmd_pending != 0);
1212 ctrl->cmd_pending = cmd; 1331 ctrl->cmd_pending = cmd;
1213 1332
@@ -1229,15 +1348,42 @@ static void brcmnand_cmd_ctrl(struct nand_chip *chip, int dat,
1229 /* intentionally left blank */ 1348 /* intentionally left blank */
1230} 1349}
1231 1350
1351static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip)
1352{
1353 struct brcmnand_host *host = nand_get_controller_data(chip);
1354 struct brcmnand_controller *ctrl = host->ctrl;
1355 struct mtd_info *mtd = nand_to_mtd(chip);
1356 bool err = false;
1357 int sts;
1358
1359 if (mtd->oops_panic_write) {
1360 /* switch to interrupt polling and PIO mode */
1361 disable_ctrl_irqs(ctrl);
1362 sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY,
1363 NAND_CTRL_RDY, 0);
1364 err = (sts < 0) ? true : false;
1365 } else {
1366 unsigned long timeo = msecs_to_jiffies(
1367 NAND_POLL_STATUS_TIMEOUT_MS);
1368 /* wait for completion interrupt */
1369 sts = wait_for_completion_timeout(&ctrl->done, timeo);
1370 err = (sts <= 0) ? true : false;
1371 }
1372
1373 return err;
1374}
1375
1232static int brcmnand_waitfunc(struct nand_chip *chip) 1376static int brcmnand_waitfunc(struct nand_chip *chip)
1233{ 1377{
1234 struct brcmnand_host *host = nand_get_controller_data(chip); 1378 struct brcmnand_host *host = nand_get_controller_data(chip);
1235 struct brcmnand_controller *ctrl = host->ctrl; 1379 struct brcmnand_controller *ctrl = host->ctrl;
1236 unsigned long timeo = msecs_to_jiffies(100); 1380 bool err = false;
1237 1381
1238 dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending); 1382 dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
1239 if (ctrl->cmd_pending && 1383 if (ctrl->cmd_pending)
1240 wait_for_completion_timeout(&ctrl->done, timeo) <= 0) { 1384 err = brcmstb_nand_wait_for_completion(chip);
1385
1386 if (err) {
1241 u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START) 1387 u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
1242 >> brcmnand_cmd_shift(ctrl); 1388 >> brcmnand_cmd_shift(ctrl);
1243 1389
@@ -1366,12 +1512,7 @@ static void brcmnand_cmdfunc(struct nand_chip *chip, unsigned command,
1366 if (!native_cmd) 1512 if (!native_cmd)
1367 return; 1513 return;
1368 1514
1369 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS, 1515 brcmnand_set_cmd_addr(mtd, addr);
1370 (host->cs << 16) | ((addr >> 32) & 0xffff));
1371 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
1372 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, lower_32_bits(addr));
1373 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
1374
1375 brcmnand_send_cmd(host, native_cmd); 1516 brcmnand_send_cmd(host, native_cmd);
1376 brcmnand_waitfunc(chip); 1517 brcmnand_waitfunc(chip);
1377 1518
@@ -1589,20 +1730,10 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
1589 struct brcmnand_controller *ctrl = host->ctrl; 1730 struct brcmnand_controller *ctrl = host->ctrl;
1590 int i, j, ret = 0; 1731 int i, j, ret = 0;
1591 1732
1592 /* Clear error addresses */ 1733 brcmnand_clear_ecc_addr(ctrl);
1593 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
1594 brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
1595 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
1596 brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
1597
1598 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
1599 (host->cs << 16) | ((addr >> 32) & 0xffff));
1600 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
1601 1734
1602 for (i = 0; i < trans; i++, addr += FC_BYTES) { 1735 for (i = 0; i < trans; i++, addr += FC_BYTES) {
1603 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, 1736 brcmnand_set_cmd_addr(mtd, addr);
1604 lower_32_bits(addr));
1605 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
1606 /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */ 1737 /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
1607 brcmnand_send_cmd(host, CMD_PAGE_READ); 1738 brcmnand_send_cmd(host, CMD_PAGE_READ);
1608 brcmnand_waitfunc(chip); 1739 brcmnand_waitfunc(chip);
@@ -1622,21 +1753,15 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
1622 host->hwcfg.sector_size_1k); 1753 host->hwcfg.sector_size_1k);
1623 1754
1624 if (!ret) { 1755 if (!ret) {
1625 *err_addr = brcmnand_read_reg(ctrl, 1756 *err_addr = brcmnand_get_uncorrecc_addr(ctrl);
1626 BRCMNAND_UNCORR_ADDR) | 1757
1627 ((u64)(brcmnand_read_reg(ctrl,
1628 BRCMNAND_UNCORR_EXT_ADDR)
1629 & 0xffff) << 32);
1630 if (*err_addr) 1758 if (*err_addr)
1631 ret = -EBADMSG; 1759 ret = -EBADMSG;
1632 } 1760 }
1633 1761
1634 if (!ret) { 1762 if (!ret) {
1635 *err_addr = brcmnand_read_reg(ctrl, 1763 *err_addr = brcmnand_get_correcc_addr(ctrl);
1636 BRCMNAND_CORR_ADDR) | 1764
1637 ((u64)(brcmnand_read_reg(ctrl,
1638 BRCMNAND_CORR_EXT_ADDR)
1639 & 0xffff) << 32);
1640 if (*err_addr) 1765 if (*err_addr)
1641 ret = -EUCLEAN; 1766 ret = -EUCLEAN;
1642 } 1767 }
@@ -1703,7 +1828,7 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
1703 dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf); 1828 dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
1704 1829
1705try_dmaread: 1830try_dmaread:
1706 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0); 1831 brcmnand_clear_ecc_addr(ctrl);
1707 1832
1708 if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) { 1833 if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
1709 err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES, 1834 err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES,
@@ -1850,15 +1975,9 @@ static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
1850 goto out; 1975 goto out;
1851 } 1976 }
1852 1977
1853 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
1854 (host->cs << 16) | ((addr >> 32) & 0xffff));
1855 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
1856
1857 for (i = 0; i < trans; i++, addr += FC_BYTES) { 1978 for (i = 0; i < trans; i++, addr += FC_BYTES) {
1858 /* full address MUST be set before populating FC */ 1979 /* full address MUST be set before populating FC */
1859 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, 1980 brcmnand_set_cmd_addr(mtd, addr);
1860 lower_32_bits(addr));
1861 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
1862 1981
1863 if (buf) { 1982 if (buf) {
1864 brcmnand_soc_data_bus_prepare(ctrl->soc, false); 1983 brcmnand_soc_data_bus_prepare(ctrl->soc, false);
@@ -2136,6 +2255,17 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
2136 return -EINVAL; 2255 return -EINVAL;
2137 } 2256 }
2138 2257
2258 if (chip->ecc.mode != NAND_ECC_NONE &&
2259 (!chip->ecc.size || !chip->ecc.strength)) {
2260 if (chip->base.eccreq.step_size && chip->base.eccreq.strength) {
2261 /* use detected ECC parameters */
2262 chip->ecc.size = chip->base.eccreq.step_size;
2263 chip->ecc.strength = chip->base.eccreq.strength;
2264 dev_info(ctrl->dev, "Using ECC step-size %d, strength %d\n",
2265 chip->ecc.size, chip->ecc.strength);
2266 }
2267 }
2268
2139 switch (chip->ecc.size) { 2269 switch (chip->ecc.size) {
2140 case 512: 2270 case 512:
2141 if (chip->ecc.algo == NAND_ECC_HAMMING) 2271 if (chip->ecc.algo == NAND_ECC_HAMMING)
@@ -2395,6 +2525,7 @@ static const struct of_device_id brcmnand_of_match[] = {
2395 { .compatible = "brcm,brcmnand-v7.0" }, 2525 { .compatible = "brcm,brcmnand-v7.0" },
2396 { .compatible = "brcm,brcmnand-v7.1" }, 2526 { .compatible = "brcm,brcmnand-v7.1" },
2397 { .compatible = "brcm,brcmnand-v7.2" }, 2527 { .compatible = "brcm,brcmnand-v7.2" },
2528 { .compatible = "brcm,brcmnand-v7.3" },
2398 {}, 2529 {},
2399}; 2530};
2400MODULE_DEVICE_TABLE(of, brcmnand_of_match); 2531MODULE_DEVICE_TABLE(of, brcmnand_of_match);
@@ -2481,7 +2612,11 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
2481 goto err; 2612 goto err;
2482 } 2613 }
2483 2614
2484 flash_dma_writel(ctrl, FLASH_DMA_MODE, 1); /* linked-list */ 2615 /* initialize the dma version */
2616 brcmnand_flash_dma_revision_init(ctrl);
2617
2618 /* linked-list and stop on error */
2619 flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK);
2485 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0); 2620 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
2486 2621
2487 /* Allocate descriptor(s) */ 2622 /* Allocate descriptor(s) */
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index 6c7ca41354be..a6964feeec77 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -613,28 +613,20 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
613 for (op_id = 0; op_id < op->ninstrs; op_id++) { 613 for (op_id = 0; op_id < op->ninstrs; op_id++) {
614 instr = &op->instrs[op_id]; 614 instr = &op->instrs[op_id];
615 615
616 nand_op_trace(" ", instr);
617
616 switch (instr->type) { 618 switch (instr->type) {
617 case NAND_OP_CMD_INSTR: 619 case NAND_OP_CMD_INSTR:
618 pr_debug(" ->CMD [0x%02x]\n",
619 instr->ctx.cmd.opcode);
620
621 writeb_relaxed(instr->ctx.cmd.opcode, host->cmd_va); 620 writeb_relaxed(instr->ctx.cmd.opcode, host->cmd_va);
622 break; 621 break;
623 622
624 case NAND_OP_ADDR_INSTR: 623 case NAND_OP_ADDR_INSTR:
625 pr_debug(" ->ADDR [%d cyc]",
626 instr->ctx.addr.naddrs);
627
628 for (i = 0; i < instr->ctx.addr.naddrs; i++) 624 for (i = 0; i < instr->ctx.addr.naddrs; i++)
629 writeb_relaxed(instr->ctx.addr.addrs[i], 625 writeb_relaxed(instr->ctx.addr.addrs[i],
630 host->addr_va); 626 host->addr_va);
631 break; 627 break;
632 628
633 case NAND_OP_DATA_IN_INSTR: 629 case NAND_OP_DATA_IN_INSTR:
634 pr_debug(" ->DATA_IN [%d B%s]\n", instr->ctx.data.len,
635 instr->ctx.data.force_8bit ?
636 ", force 8-bit" : "");
637
638 if (host->mode == USE_DMA_ACCESS) 630 if (host->mode == USE_DMA_ACCESS)
639 fsmc_read_buf_dma(host, instr->ctx.data.buf.in, 631 fsmc_read_buf_dma(host, instr->ctx.data.buf.in,
640 instr->ctx.data.len); 632 instr->ctx.data.len);
@@ -644,10 +636,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
644 break; 636 break;
645 637
646 case NAND_OP_DATA_OUT_INSTR: 638 case NAND_OP_DATA_OUT_INSTR:
647 pr_debug(" ->DATA_OUT [%d B%s]\n", instr->ctx.data.len,
648 instr->ctx.data.force_8bit ?
649 ", force 8-bit" : "");
650
651 if (host->mode == USE_DMA_ACCESS) 639 if (host->mode == USE_DMA_ACCESS)
652 fsmc_write_buf_dma(host, 640 fsmc_write_buf_dma(host,
653 instr->ctx.data.buf.out, 641 instr->ctx.data.buf.out,
@@ -658,9 +646,6 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
658 break; 646 break;
659 647
660 case NAND_OP_WAITRDY_INSTR: 648 case NAND_OP_WAITRDY_INSTR:
661 pr_debug(" ->WAITRDY [max %d ms]\n",
662 instr->ctx.waitrdy.timeout_ms);
663
664 ret = nand_soft_waitrdy(chip, 649 ret = nand_soft_waitrdy(chip,
665 instr->ctx.waitrdy.timeout_ms); 650 instr->ctx.waitrdy.timeout_ms);
666 break; 651 break;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/Makefile b/drivers/mtd/nand/raw/gpmi-nand/Makefile
index 30ceee9704d1..9bd81a31e02e 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/Makefile
+++ b/drivers/mtd/nand/raw/gpmi-nand/Makefile
@@ -1,4 +1,3 @@
1# SPDX-License-Identifier: GPL-2.0-only 1# SPDX-License-Identifier: GPL-2.0-only
2obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o 2obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o
3gpmi_nand-objs += gpmi-nand.o 3gpmi_nand-objs += gpmi-nand.o
4gpmi_nand-objs += gpmi-lib.o
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
deleted file mode 100644
index a8b26d2e793c..000000000000
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
+++ /dev/null
@@ -1,934 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Freescale GPMI NAND Flash Driver
4 *
5 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
6 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
7 */
8#include <linux/delay.h>
9#include <linux/clk.h>
10#include <linux/slab.h>
11
12#include "gpmi-nand.h"
13#include "gpmi-regs.h"
14#include "bch-regs.h"
15
16/* Converts time to clock cycles */
17#define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
18
19#define MXS_SET_ADDR 0x4
20#define MXS_CLR_ADDR 0x8
21/*
22 * Clear the bit and poll it cleared. This is usually called with
23 * a reset address and mask being either SFTRST(bit 31) or CLKGATE
24 * (bit 30).
25 */
26static int clear_poll_bit(void __iomem *addr, u32 mask)
27{
28 int timeout = 0x400;
29
30 /* clear the bit */
31 writel(mask, addr + MXS_CLR_ADDR);
32
33 /*
34 * SFTRST needs 3 GPMI clocks to settle, the reference manual
35 * recommends to wait 1us.
36 */
37 udelay(1);
38
39 /* poll the bit becoming clear */
40 while ((readl(addr) & mask) && --timeout)
41 /* nothing */;
42
43 return !timeout;
44}
45
46#define MODULE_CLKGATE (1 << 30)
47#define MODULE_SFTRST (1 << 31)
48/*
49 * The current mxs_reset_block() will do two things:
50 * [1] enable the module.
51 * [2] reset the module.
52 *
53 * In most of the cases, it's ok.
54 * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
55 * If you try to soft reset the BCH block, it becomes unusable until
56 * the next hard reset. This case occurs in the NAND boot mode. When the board
57 * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
58 * So If the driver tries to reset the BCH again, the BCH will not work anymore.
59 * You will see a DMA timeout in this case. The bug has been fixed
60 * in the following chips, such as MX28.
61 *
62 * To avoid this bug, just add a new parameter `just_enable` for
63 * the mxs_reset_block(), and rewrite it here.
64 */
65static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
66{
67 int ret;
68 int timeout = 0x400;
69
70 /* clear and poll SFTRST */
71 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
72 if (unlikely(ret))
73 goto error;
74
75 /* clear CLKGATE */
76 writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
77
78 if (!just_enable) {
79 /* set SFTRST to reset the block */
80 writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
81 udelay(1);
82
83 /* poll CLKGATE becoming set */
84 while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
85 /* nothing */;
86 if (unlikely(!timeout))
87 goto error;
88 }
89
90 /* clear and poll SFTRST */
91 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
92 if (unlikely(ret))
93 goto error;
94
95 /* clear and poll CLKGATE */
96 ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
97 if (unlikely(ret))
98 goto error;
99
100 return 0;
101
102error:
103 pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
104 return -ETIMEDOUT;
105}
106
107static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
108{
109 struct clk *clk;
110 int ret;
111 int i;
112
113 for (i = 0; i < GPMI_CLK_MAX; i++) {
114 clk = this->resources.clock[i];
115 if (!clk)
116 break;
117
118 if (v) {
119 ret = clk_prepare_enable(clk);
120 if (ret)
121 goto err_clk;
122 } else {
123 clk_disable_unprepare(clk);
124 }
125 }
126 return 0;
127
128err_clk:
129 for (; i > 0; i--)
130 clk_disable_unprepare(this->resources.clock[i - 1]);
131 return ret;
132}
133
134int gpmi_enable_clk(struct gpmi_nand_data *this)
135{
136 return __gpmi_enable_clk(this, true);
137}
138
139int gpmi_disable_clk(struct gpmi_nand_data *this)
140{
141 return __gpmi_enable_clk(this, false);
142}
143
144int gpmi_init(struct gpmi_nand_data *this)
145{
146 struct resources *r = &this->resources;
147 int ret;
148
149 ret = gpmi_enable_clk(this);
150 if (ret)
151 return ret;
152 ret = gpmi_reset_block(r->gpmi_regs, false);
153 if (ret)
154 goto err_out;
155
156 /*
157 * Reset BCH here, too. We got failures otherwise :(
158 * See later BCH reset for explanation of MX23 and MX28 handling
159 */
160 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
161 if (ret)
162 goto err_out;
163
164 /* Choose NAND mode. */
165 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
166
167 /* Set the IRQ polarity. */
168 writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
169 r->gpmi_regs + HW_GPMI_CTRL1_SET);
170
171 /* Disable Write-Protection. */
172 writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
173
174 /* Select BCH ECC. */
175 writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
176
177 /*
178 * Decouple the chip select from dma channel. We use dma0 for all
179 * the chips.
180 */
181 writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
182
183 gpmi_disable_clk(this);
184 return 0;
185err_out:
186 gpmi_disable_clk(this);
187 return ret;
188}
189
190/* This function is very useful. It is called only when the bug occur. */
191void gpmi_dump_info(struct gpmi_nand_data *this)
192{
193 struct resources *r = &this->resources;
194 struct bch_geometry *geo = &this->bch_geometry;
195 u32 reg;
196 int i;
197
198 dev_err(this->dev, "Show GPMI registers :\n");
199 for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
200 reg = readl(r->gpmi_regs + i * 0x10);
201 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
202 }
203
204 /* start to print out the BCH info */
205 dev_err(this->dev, "Show BCH registers :\n");
206 for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
207 reg = readl(r->bch_regs + i * 0x10);
208 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
209 }
210 dev_err(this->dev, "BCH Geometry :\n"
211 "GF length : %u\n"
212 "ECC Strength : %u\n"
213 "Page Size in Bytes : %u\n"
214 "Metadata Size in Bytes : %u\n"
215 "ECC Chunk Size in Bytes: %u\n"
216 "ECC Chunk Count : %u\n"
217 "Payload Size in Bytes : %u\n"
218 "Auxiliary Size in Bytes: %u\n"
219 "Auxiliary Status Offset: %u\n"
220 "Block Mark Byte Offset : %u\n"
221 "Block Mark Bit Offset : %u\n",
222 geo->gf_len,
223 geo->ecc_strength,
224 geo->page_size,
225 geo->metadata_size,
226 geo->ecc_chunk_size,
227 geo->ecc_chunk_count,
228 geo->payload_size,
229 geo->auxiliary_size,
230 geo->auxiliary_status_offset,
231 geo->block_mark_byte_offset,
232 geo->block_mark_bit_offset);
233}
234
235/* Configures the geometry for BCH. */
236int bch_set_geometry(struct gpmi_nand_data *this)
237{
238 struct resources *r = &this->resources;
239 struct bch_geometry *bch_geo = &this->bch_geometry;
240 unsigned int block_count;
241 unsigned int block_size;
242 unsigned int metadata_size;
243 unsigned int ecc_strength;
244 unsigned int page_size;
245 unsigned int gf_len;
246 int ret;
247
248 ret = common_nfc_set_geometry(this);
249 if (ret)
250 return ret;
251
252 block_count = bch_geo->ecc_chunk_count - 1;
253 block_size = bch_geo->ecc_chunk_size;
254 metadata_size = bch_geo->metadata_size;
255 ecc_strength = bch_geo->ecc_strength >> 1;
256 page_size = bch_geo->page_size;
257 gf_len = bch_geo->gf_len;
258
259 ret = gpmi_enable_clk(this);
260 if (ret)
261 return ret;
262
263 /*
264 * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
265 * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
266 * and MX28.
267 */
268 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
269 if (ret)
270 goto err_out;
271
272 /* Configure layout 0. */
273 writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
274 | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
275 | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
276 | BF_BCH_FLASH0LAYOUT0_GF(gf_len, this)
277 | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
278 r->bch_regs + HW_BCH_FLASH0LAYOUT0);
279
280 writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
281 | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
282 | BF_BCH_FLASH0LAYOUT1_GF(gf_len, this)
283 | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
284 r->bch_regs + HW_BCH_FLASH0LAYOUT1);
285
286 /* Set *all* chip selects to use layout 0. */
287 writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
288
289 /* Enable interrupts. */
290 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
291 r->bch_regs + HW_BCH_CTRL_SET);
292
293 gpmi_disable_clk(this);
294 return 0;
295err_out:
296 gpmi_disable_clk(this);
297 return ret;
298}
299
300/*
301 * <1> Firstly, we should know what's the GPMI-clock means.
302 * The GPMI-clock is the internal clock in the gpmi nand controller.
303 * If you set 100MHz to gpmi nand controller, the GPMI-clock's period
304 * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
305 *
306 * <2> Secondly, we should know what's the frequency on the nand chip pins.
307 * The frequency on the nand chip pins is derived from the GPMI-clock.
308 * We can get it from the following equation:
309 *
310 * F = G / (DS + DH)
311 *
312 * F : the frequency on the nand chip pins.
313 * G : the GPMI clock, such as 100MHz.
314 * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
315 * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
316 *
317 * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
318 * the nand EDO(extended Data Out) timing could be applied.
319 * The GPMI implements a feedback read strobe to sample the read data.
320 * The feedback read strobe can be delayed to support the nand EDO timing
321 * where the read strobe may deasserts before the read data is valid, and
322 * read data is valid for some time after read strobe.
323 *
324 * The following figure illustrates some aspects of a NAND Flash read:
325 *
326 * |<---tREA---->|
327 * | |
328 * | | |
329 * |<--tRP-->| |
330 * | | |
331 * __ ___|__________________________________
332 * RDN \________/ |
333 * |
334 * /---------\
335 * Read Data --------------< >---------
336 * \---------/
337 * | |
338 * |<-D->|
339 * FeedbackRDN ________ ____________
340 * \___________/
341 *
342 * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
343 *
344 *
345 * <4> Now, we begin to describe how to compute the right RDN_DELAY.
346 *
347 * 4.1) From the aspect of the nand chip pins:
348 * Delay = (tREA + C - tRP) {1}
349 *
350 * tREA : the maximum read access time.
351 * C : a constant to adjust the delay. default is 4000ps.
352 * tRP : the read pulse width, which is exactly:
353 * tRP = (GPMI-clock-period) * DATA_SETUP
354 *
355 * 4.2) From the aspect of the GPMI nand controller:
356 * Delay = RDN_DELAY * 0.125 * RP {2}
357 *
358 * RP : the DLL reference period.
359 * if (GPMI-clock-period > DLL_THRETHOLD)
360 * RP = GPMI-clock-period / 2;
361 * else
362 * RP = GPMI-clock-period;
363 *
364 * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
365 * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
366 * is 16000ps, but in mx6q, we use 12000ps.
367 *
368 * 4.3) since {1} equals {2}, we get:
369 *
370 * (tREA + 4000 - tRP) * 8
371 * RDN_DELAY = ----------------------- {3}
372 * RP
373 */
374static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
375 const struct nand_sdr_timings *sdr)
376{
377 struct gpmi_nfc_hardware_timing *hw = &this->hw;
378 unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
379 unsigned int period_ps, reference_period_ps;
380 unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
381 unsigned int tRP_ps;
382 bool use_half_period;
383 int sample_delay_ps, sample_delay_factor;
384 u16 busy_timeout_cycles;
385 u8 wrn_dly_sel;
386
387 if (sdr->tRC_min >= 30000) {
388 /* ONFI non-EDO modes [0-3] */
389 hw->clk_rate = 22000000;
390 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
391 } else if (sdr->tRC_min >= 25000) {
392 /* ONFI EDO mode 4 */
393 hw->clk_rate = 80000000;
394 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
395 } else {
396 /* ONFI EDO mode 5 */
397 hw->clk_rate = 100000000;
398 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
399 }
400
401 /* SDR core timings are given in picoseconds */
402 period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
403
404 addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
405 data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
406 data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
407 busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
408
409 hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
410 BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
411 BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
412 hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
413
414 /*
415 * Derive NFC ideal delay from {3}:
416 *
417 * (tREA + 4000 - tRP) * 8
418 * RDN_DELAY = -----------------------
419 * RP
420 */
421 if (period_ps > dll_threshold_ps) {
422 use_half_period = true;
423 reference_period_ps = period_ps / 2;
424 } else {
425 use_half_period = false;
426 reference_period_ps = period_ps;
427 }
428
429 tRP_ps = data_setup_cycles * period_ps;
430 sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
431 if (sample_delay_ps > 0)
432 sample_delay_factor = sample_delay_ps / reference_period_ps;
433 else
434 sample_delay_factor = 0;
435
436 hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
437 if (sample_delay_factor)
438 hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
439 BM_GPMI_CTRL1_DLL_ENABLE |
440 (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
441}
442
443void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
444{
445 struct gpmi_nfc_hardware_timing *hw = &this->hw;
446 struct resources *r = &this->resources;
447 void __iomem *gpmi_regs = r->gpmi_regs;
448 unsigned int dll_wait_time_us;
449
450 clk_set_rate(r->clock[0], hw->clk_rate);
451
452 writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
453 writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
454
455 /*
456 * Clear several CTRL1 fields, DLL must be disabled when setting
457 * RDN_DELAY or HALF_PERIOD.
458 */
459 writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
460 writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
461
462 /* Wait 64 clock cycles before using the GPMI after enabling the DLL */
463 dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
464 if (!dll_wait_time_us)
465 dll_wait_time_us = 1;
466
467 /* Wait for the DLL to settle. */
468 udelay(dll_wait_time_us);
469}
470
471int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr,
472 const struct nand_data_interface *conf)
473{
474 struct gpmi_nand_data *this = nand_get_controller_data(chip);
475 const struct nand_sdr_timings *sdr;
476
477 /* Retrieve required NAND timings */
478 sdr = nand_get_sdr_timings(conf);
479 if (IS_ERR(sdr))
480 return PTR_ERR(sdr);
481
482 /* Only MX6 GPMI controller can reach EDO timings */
483 if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this))
484 return -ENOTSUPP;
485
486 /* Stop here if this call was just a check */
487 if (chipnr < 0)
488 return 0;
489
490 /* Do the actual derivation of the controller timings */
491 gpmi_nfc_compute_timings(this, sdr);
492
493 this->hw.must_apply_timings = true;
494
495 return 0;
496}
497
498/* Clears a BCH interrupt. */
499void gpmi_clear_bch(struct gpmi_nand_data *this)
500{
501 struct resources *r = &this->resources;
502 writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
503}
504
505/* Returns the Ready/Busy status of the given chip. */
506int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
507{
508 struct resources *r = &this->resources;
509 uint32_t mask = 0;
510 uint32_t reg = 0;
511
512 if (GPMI_IS_MX23(this)) {
513 mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
514 reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
515 } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6(this)) {
516 /*
517 * In the imx6, all the ready/busy pins are bound
518 * together. So we only need to check chip 0.
519 */
520 if (GPMI_IS_MX6(this))
521 chip = 0;
522
523 /* MX28 shares the same R/B register as MX6Q. */
524 mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
525 reg = readl(r->gpmi_regs + HW_GPMI_STAT);
526 } else
527 dev_err(this->dev, "unknown arch.\n");
528 return reg & mask;
529}
530
531int gpmi_send_command(struct gpmi_nand_data *this)
532{
533 struct dma_chan *channel = get_dma_chan(this);
534 struct dma_async_tx_descriptor *desc;
535 struct scatterlist *sgl;
536 int chip = this->current_chip;
537 int ret;
538 u32 pio[3];
539
540 /* [1] send out the PIO words */
541 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
542 | BM_GPMI_CTRL0_WORD_LENGTH
543 | BF_GPMI_CTRL0_CS(chip, this)
544 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
545 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
546 | BM_GPMI_CTRL0_ADDRESS_INCREMENT
547 | BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
548 pio[1] = pio[2] = 0;
549 desc = dmaengine_prep_slave_sg(channel,
550 (struct scatterlist *)pio,
551 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
552 if (!desc)
553 return -EINVAL;
554
555 /* [2] send out the COMMAND + ADDRESS string stored in @buffer */
556 sgl = &this->cmd_sgl;
557
558 sg_init_one(sgl, this->cmd_buffer, this->command_length);
559 dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
560 desc = dmaengine_prep_slave_sg(channel,
561 sgl, 1, DMA_MEM_TO_DEV,
562 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
563 if (!desc)
564 return -EINVAL;
565
566 /* [3] submit the DMA */
567 ret = start_dma_without_bch_irq(this, desc);
568
569 dma_unmap_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
570
571 return ret;
572}
573
574int gpmi_send_data(struct gpmi_nand_data *this, const void *buf, int len)
575{
576 struct dma_async_tx_descriptor *desc;
577 struct dma_chan *channel = get_dma_chan(this);
578 int chip = this->current_chip;
579 int ret;
580 uint32_t command_mode;
581 uint32_t address;
582 u32 pio[2];
583
584 /* [1] PIO */
585 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
586 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
587
588 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
589 | BM_GPMI_CTRL0_WORD_LENGTH
590 | BF_GPMI_CTRL0_CS(chip, this)
591 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
592 | BF_GPMI_CTRL0_ADDRESS(address)
593 | BF_GPMI_CTRL0_XFER_COUNT(len);
594 pio[1] = 0;
595 desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
596 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
597 if (!desc)
598 return -EINVAL;
599
600 /* [2] send DMA request */
601 prepare_data_dma(this, buf, len, DMA_TO_DEVICE);
602 desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
603 1, DMA_MEM_TO_DEV,
604 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
605 if (!desc)
606 return -EINVAL;
607
608 /* [3] submit the DMA */
609 ret = start_dma_without_bch_irq(this, desc);
610
611 dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
612
613 return ret;
614}
615
616int gpmi_read_data(struct gpmi_nand_data *this, void *buf, int len)
617{
618 struct dma_async_tx_descriptor *desc;
619 struct dma_chan *channel = get_dma_chan(this);
620 int chip = this->current_chip;
621 int ret;
622 u32 pio[2];
623 bool direct;
624
625 /* [1] : send PIO */
626 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
627 | BM_GPMI_CTRL0_WORD_LENGTH
628 | BF_GPMI_CTRL0_CS(chip, this)
629 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
630 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
631 | BF_GPMI_CTRL0_XFER_COUNT(len);
632 pio[1] = 0;
633 desc = dmaengine_prep_slave_sg(channel,
634 (struct scatterlist *)pio,
635 ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
636 if (!desc)
637 return -EINVAL;
638
639 /* [2] : send DMA request */
640 direct = prepare_data_dma(this, buf, len, DMA_FROM_DEVICE);
641 desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
642 1, DMA_DEV_TO_MEM,
643 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
644 if (!desc)
645 return -EINVAL;
646
647 /* [3] : submit the DMA */
648
649 ret = start_dma_without_bch_irq(this, desc);
650
651 dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
652 if (!direct)
653 memcpy(buf, this->data_buffer_dma, len);
654
655 return ret;
656}
657
658int gpmi_send_page(struct gpmi_nand_data *this,
659 dma_addr_t payload, dma_addr_t auxiliary)
660{
661 struct bch_geometry *geo = &this->bch_geometry;
662 uint32_t command_mode;
663 uint32_t address;
664 uint32_t ecc_command;
665 uint32_t buffer_mask;
666 struct dma_async_tx_descriptor *desc;
667 struct dma_chan *channel = get_dma_chan(this);
668 int chip = this->current_chip;
669 u32 pio[6];
670
671 /* A DMA descriptor that does an ECC page read. */
672 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
673 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
674 ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE;
675 buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
676 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
677
678 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
679 | BM_GPMI_CTRL0_WORD_LENGTH
680 | BF_GPMI_CTRL0_CS(chip, this)
681 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
682 | BF_GPMI_CTRL0_ADDRESS(address)
683 | BF_GPMI_CTRL0_XFER_COUNT(0);
684 pio[1] = 0;
685 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
686 | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
687 | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
688 pio[3] = geo->page_size;
689 pio[4] = payload;
690 pio[5] = auxiliary;
691
692 desc = dmaengine_prep_slave_sg(channel,
693 (struct scatterlist *)pio,
694 ARRAY_SIZE(pio), DMA_TRANS_NONE,
695 DMA_CTRL_ACK);
696 if (!desc)
697 return -EINVAL;
698
699 return start_dma_with_bch_irq(this, desc);
700}
701
702int gpmi_read_page(struct gpmi_nand_data *this,
703 dma_addr_t payload, dma_addr_t auxiliary)
704{
705 struct bch_geometry *geo = &this->bch_geometry;
706 uint32_t command_mode;
707 uint32_t address;
708 uint32_t ecc_command;
709 uint32_t buffer_mask;
710 struct dma_async_tx_descriptor *desc;
711 struct dma_chan *channel = get_dma_chan(this);
712 int chip = this->current_chip;
713 u32 pio[6];
714
715 /* [1] Wait for the chip to report ready. */
716 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
717 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
718
719 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
720 | BM_GPMI_CTRL0_WORD_LENGTH
721 | BF_GPMI_CTRL0_CS(chip, this)
722 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
723 | BF_GPMI_CTRL0_ADDRESS(address)
724 | BF_GPMI_CTRL0_XFER_COUNT(0);
725 pio[1] = 0;
726 desc = dmaengine_prep_slave_sg(channel,
727 (struct scatterlist *)pio, 2,
728 DMA_TRANS_NONE, 0);
729 if (!desc)
730 return -EINVAL;
731
732 /* [2] Enable the BCH block and read. */
733 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
734 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
735 ecc_command = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE;
736 buffer_mask = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
737 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
738
739 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
740 | BM_GPMI_CTRL0_WORD_LENGTH
741 | BF_GPMI_CTRL0_CS(chip, this)
742 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
743 | BF_GPMI_CTRL0_ADDRESS(address)
744 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
745
746 pio[1] = 0;
747 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
748 | BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
749 | BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
750 pio[3] = geo->page_size;
751 pio[4] = payload;
752 pio[5] = auxiliary;
753 desc = dmaengine_prep_slave_sg(channel,
754 (struct scatterlist *)pio,
755 ARRAY_SIZE(pio), DMA_TRANS_NONE,
756 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
757 if (!desc)
758 return -EINVAL;
759
760 /* [3] Disable the BCH block */
761 command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
762 address = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
763
764 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
765 | BM_GPMI_CTRL0_WORD_LENGTH
766 | BF_GPMI_CTRL0_CS(chip, this)
767 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
768 | BF_GPMI_CTRL0_ADDRESS(address)
769 | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
770 pio[1] = 0;
771 pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */
772 desc = dmaengine_prep_slave_sg(channel,
773 (struct scatterlist *)pio, 3,
774 DMA_TRANS_NONE,
775 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
776 if (!desc)
777 return -EINVAL;
778
779 /* [4] submit the DMA */
780 return start_dma_with_bch_irq(this, desc);
781}
782
783/**
784 * gpmi_copy_bits - copy bits from one memory region to another
785 * @dst: destination buffer
786 * @dst_bit_off: bit offset we're starting to write at
787 * @src: source buffer
788 * @src_bit_off: bit offset we're starting to read from
789 * @nbits: number of bits to copy
790 *
791 * This functions copies bits from one memory region to another, and is used by
792 * the GPMI driver to copy ECC sections which are not guaranteed to be byte
793 * aligned.
794 *
795 * src and dst should not overlap.
796 *
797 */
798void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
799 const u8 *src, size_t src_bit_off,
800 size_t nbits)
801{
802 size_t i;
803 size_t nbytes;
804 u32 src_buffer = 0;
805 size_t bits_in_src_buffer = 0;
806
807 if (!nbits)
808 return;
809
810 /*
811 * Move src and dst pointers to the closest byte pointer and store bit
812 * offsets within a byte.
813 */
814 src += src_bit_off / 8;
815 src_bit_off %= 8;
816
817 dst += dst_bit_off / 8;
818 dst_bit_off %= 8;
819
820 /*
821 * Initialize the src_buffer value with bits available in the first
822 * byte of data so that we end up with a byte aligned src pointer.
823 */
824 if (src_bit_off) {
825 src_buffer = src[0] >> src_bit_off;
826 if (nbits >= (8 - src_bit_off)) {
827 bits_in_src_buffer += 8 - src_bit_off;
828 } else {
829 src_buffer &= GENMASK(nbits - 1, 0);
830 bits_in_src_buffer += nbits;
831 }
832 nbits -= bits_in_src_buffer;
833 src++;
834 }
835
836 /* Calculate the number of bytes that can be copied from src to dst. */
837 nbytes = nbits / 8;
838
839 /* Try to align dst to a byte boundary. */
840 if (dst_bit_off) {
841 if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
842 src_buffer |= src[0] << bits_in_src_buffer;
843 bits_in_src_buffer += 8;
844 src++;
845 nbytes--;
846 }
847
848 if (bits_in_src_buffer >= (8 - dst_bit_off)) {
849 dst[0] &= GENMASK(dst_bit_off - 1, 0);
850 dst[0] |= src_buffer << dst_bit_off;
851 src_buffer >>= (8 - dst_bit_off);
852 bits_in_src_buffer -= (8 - dst_bit_off);
853 dst_bit_off = 0;
854 dst++;
855 if (bits_in_src_buffer > 7) {
856 bits_in_src_buffer -= 8;
857 dst[0] = src_buffer;
858 dst++;
859 src_buffer >>= 8;
860 }
861 }
862 }
863
864 if (!bits_in_src_buffer && !dst_bit_off) {
865 /*
866 * Both src and dst pointers are byte aligned, thus we can
867 * just use the optimized memcpy function.
868 */
869 if (nbytes)
870 memcpy(dst, src, nbytes);
871 } else {
872 /*
873 * src buffer is not byte aligned, hence we have to copy each
874 * src byte to the src_buffer variable before extracting a byte
875 * to store in dst.
876 */
877 for (i = 0; i < nbytes; i++) {
878 src_buffer |= src[i] << bits_in_src_buffer;
879 dst[i] = src_buffer;
880 src_buffer >>= 8;
881 }
882 }
883 /* Update dst and src pointers */
884 dst += nbytes;
885 src += nbytes;
886
887 /*
888 * nbits is the number of remaining bits. It should not exceed 8 as
889 * we've already copied as much bytes as possible.
890 */
891 nbits %= 8;
892
893 /*
894 * If there's no more bits to copy to the destination and src buffer
895 * was already byte aligned, then we're done.
896 */
897 if (!nbits && !bits_in_src_buffer)
898 return;
899
900 /* Copy the remaining bits to src_buffer */
901 if (nbits)
902 src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
903 bits_in_src_buffer;
904 bits_in_src_buffer += nbits;
905
906 /*
907 * In case there were not enough bits to get a byte aligned dst buffer
908 * prepare the src_buffer variable to match the dst organization (shift
909 * src_buffer by dst_bit_off and retrieve the least significant bits
910 * from dst).
911 */
912 if (dst_bit_off)
913 src_buffer = (src_buffer << dst_bit_off) |
914 (*dst & GENMASK(dst_bit_off - 1, 0));
915 bits_in_src_buffer += dst_bit_off;
916
917 /*
918 * Keep most significant bits from dst if we end up with an unaligned
919 * number of bits.
920 */
921 nbytes = bits_in_src_buffer / 8;
922 if (bits_in_src_buffer % 8) {
923 src_buffer |= (dst[nbytes] &
924 GENMASK(7, bits_in_src_buffer % 8)) <<
925 (nbytes * 8);
926 nbytes++;
927 }
928
929 /* Copy the remaining bytes to dst */
930 for (i = 0; i < nbytes; i++) {
931 dst[i] = src_buffer;
932 src_buffer >>= 8;
933 }
934}
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 40df20d1adf5..334fe3130285 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -6,6 +6,7 @@
6 * Copyright (C) 2008 Embedded Alley Solutions, Inc. 6 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
7 */ 7 */
8#include <linux/clk.h> 8#include <linux/clk.h>
9#include <linux/delay.h>
9#include <linux/slab.h> 10#include <linux/slab.h>
10#include <linux/sched/task_stack.h> 11#include <linux/sched/task_stack.h>
11#include <linux/interrupt.h> 12#include <linux/interrupt.h>
@@ -13,7 +14,10 @@
13#include <linux/mtd/partitions.h> 14#include <linux/mtd/partitions.h>
14#include <linux/of.h> 15#include <linux/of.h>
15#include <linux/of_device.h> 16#include <linux/of_device.h>
17#include <linux/pm_runtime.h>
18#include <linux/dma/mxs-dma.h>
16#include "gpmi-nand.h" 19#include "gpmi-nand.h"
20#include "gpmi-regs.h"
17#include "bch-regs.h" 21#include "bch-regs.h"
18 22
19/* Resource names for the GPMI NAND driver. */ 23/* Resource names for the GPMI NAND driver. */
@@ -21,149 +25,208 @@
21#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch" 25#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
22#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch" 26#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
23 27
24/* add our owner bbt descriptor */ 28/* Converts time to clock cycles */
25static uint8_t scan_ff_pattern[] = { 0xff }; 29#define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
26static struct nand_bbt_descr gpmi_bbt_descr = {
27 .options = 0,
28 .offs = 0,
29 .len = 1,
30 .pattern = scan_ff_pattern
31};
32 30
31#define MXS_SET_ADDR 0x4
32#define MXS_CLR_ADDR 0x8
33/* 33/*
34 * We may change the layout if we can get the ECC info from the datasheet, 34 * Clear the bit and poll it cleared. This is usually called with
35 * else we will use all the (page + OOB). 35 * a reset address and mask being either SFTRST(bit 31) or CLKGATE
36 * (bit 30).
36 */ 37 */
37static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section, 38static int clear_poll_bit(void __iomem *addr, u32 mask)
38 struct mtd_oob_region *oobregion)
39{ 39{
40 struct nand_chip *chip = mtd_to_nand(mtd); 40 int timeout = 0x400;
41 struct gpmi_nand_data *this = nand_get_controller_data(chip);
42 struct bch_geometry *geo = &this->bch_geometry;
43 41
44 if (section) 42 /* clear the bit */
45 return -ERANGE; 43 writel(mask, addr + MXS_CLR_ADDR);
46 44
47 oobregion->offset = 0; 45 /*
48 oobregion->length = geo->page_size - mtd->writesize; 46 * SFTRST needs 3 GPMI clocks to settle, the reference manual
47 * recommends to wait 1us.
48 */
49 udelay(1);
49 50
50 return 0; 51 /* poll the bit becoming clear */
52 while ((readl(addr) & mask) && --timeout)
53 /* nothing */;
54
55 return !timeout;
51} 56}
52 57
53static int gpmi_ooblayout_free(struct mtd_info *mtd, int section, 58#define MODULE_CLKGATE (1 << 30)
54 struct mtd_oob_region *oobregion) 59#define MODULE_SFTRST (1 << 31)
60/*
61 * The current mxs_reset_block() will do two things:
62 * [1] enable the module.
63 * [2] reset the module.
64 *
65 * In most of the cases, it's ok.
66 * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
67 * If you try to soft reset the BCH block, it becomes unusable until
68 * the next hard reset. This case occurs in the NAND boot mode. When the board
69 * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
70 * So If the driver tries to reset the BCH again, the BCH will not work anymore.
71 * You will see a DMA timeout in this case. The bug has been fixed
72 * in the following chips, such as MX28.
73 *
74 * To avoid this bug, just add a new parameter `just_enable` for
75 * the mxs_reset_block(), and rewrite it here.
76 */
77static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
55{ 78{
56 struct nand_chip *chip = mtd_to_nand(mtd); 79 int ret;
57 struct gpmi_nand_data *this = nand_get_controller_data(chip); 80 int timeout = 0x400;
58 struct bch_geometry *geo = &this->bch_geometry; 81
82 /* clear and poll SFTRST */
83 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
84 if (unlikely(ret))
85 goto error;
86
87 /* clear CLKGATE */
88 writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
89
90 if (!just_enable) {
91 /* set SFTRST to reset the block */
92 writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
93 udelay(1);
94
95 /* poll CLKGATE becoming set */
96 while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
97 /* nothing */;
98 if (unlikely(!timeout))
99 goto error;
100 }
59 101
60 if (section) 102 /* clear and poll SFTRST */
61 return -ERANGE; 103 ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
104 if (unlikely(ret))
105 goto error;
62 106
63 /* The available oob size we have. */ 107 /* clear and poll CLKGATE */
64 if (geo->page_size < mtd->writesize + mtd->oobsize) { 108 ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
65 oobregion->offset = geo->page_size - mtd->writesize; 109 if (unlikely(ret))
66 oobregion->length = mtd->oobsize - oobregion->offset; 110 goto error;
67 }
68 111
69 return 0; 112 return 0;
113
114error:
115 pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
116 return -ETIMEDOUT;
70} 117}
71 118
72static const char * const gpmi_clks_for_mx2x[] = { 119static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
73 "gpmi_io", 120{
74}; 121 struct clk *clk;
122 int ret;
123 int i;
75 124
76static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = { 125 for (i = 0; i < GPMI_CLK_MAX; i++) {
77 .ecc = gpmi_ooblayout_ecc, 126 clk = this->resources.clock[i];
78 .free = gpmi_ooblayout_free, 127 if (!clk)
79}; 128 break;
80 129
81static const struct gpmi_devdata gpmi_devdata_imx23 = { 130 if (v) {
82 .type = IS_MX23, 131 ret = clk_prepare_enable(clk);
83 .bch_max_ecc_strength = 20, 132 if (ret)
84 .max_chain_delay = 16000, 133 goto err_clk;
85 .clks = gpmi_clks_for_mx2x, 134 } else {
86 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x), 135 clk_disable_unprepare(clk);
87}; 136 }
137 }
138 return 0;
88 139
89static const struct gpmi_devdata gpmi_devdata_imx28 = { 140err_clk:
90 .type = IS_MX28, 141 for (; i > 0; i--)
91 .bch_max_ecc_strength = 20, 142 clk_disable_unprepare(this->resources.clock[i - 1]);
92 .max_chain_delay = 16000, 143 return ret;
93 .clks = gpmi_clks_for_mx2x, 144}
94 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
95};
96 145
97static const char * const gpmi_clks_for_mx6[] = { 146static int gpmi_init(struct gpmi_nand_data *this)
98 "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch", 147{
99}; 148 struct resources *r = &this->resources;
149 int ret;
100 150
101static const struct gpmi_devdata gpmi_devdata_imx6q = { 151 ret = gpmi_reset_block(r->gpmi_regs, false);
102 .type = IS_MX6Q, 152 if (ret)
103 .bch_max_ecc_strength = 40, 153 goto err_out;
104 .max_chain_delay = 12000,
105 .clks = gpmi_clks_for_mx6,
106 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
107};
108 154
109static const struct gpmi_devdata gpmi_devdata_imx6sx = { 155 /*
110 .type = IS_MX6SX, 156 * Reset BCH here, too. We got failures otherwise :(
111 .bch_max_ecc_strength = 62, 157 * See later BCH reset for explanation of MX23 and MX28 handling
112 .max_chain_delay = 12000, 158 */
113 .clks = gpmi_clks_for_mx6, 159 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
114 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6), 160 if (ret)
115}; 161 goto err_out;
116 162
117static const char * const gpmi_clks_for_mx7d[] = { 163 /* Choose NAND mode. */
118 "gpmi_io", "gpmi_bch_apb", 164 writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
119};
120 165
121static const struct gpmi_devdata gpmi_devdata_imx7d = { 166 /* Set the IRQ polarity. */
122 .type = IS_MX7D, 167 writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
123 .bch_max_ecc_strength = 62, 168 r->gpmi_regs + HW_GPMI_CTRL1_SET);
124 .max_chain_delay = 12000,
125 .clks = gpmi_clks_for_mx7d,
126 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
127};
128 169
129static irqreturn_t bch_irq(int irq, void *cookie) 170 /* Disable Write-Protection. */
130{ 171 writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
131 struct gpmi_nand_data *this = cookie;
132 172
133 gpmi_clear_bch(this); 173 /* Select BCH ECC. */
134 complete(&this->bch_done); 174 writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
135 return IRQ_HANDLED; 175
176 /*
177 * Decouple the chip select from dma channel. We use dma0 for all
178 * the chips.
179 */
180 writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
181
182 return 0;
183err_out:
184 return ret;
136} 185}
137 186
138/* 187/* This function is very useful. It is called only when the bug occur. */
139 * Calculate the ECC strength by hand: 188static void gpmi_dump_info(struct gpmi_nand_data *this)
140 * E : The ECC strength.
141 * G : the length of Galois Field.
142 * N : The chunk count of per page.
143 * O : the oobsize of the NAND chip.
144 * M : the metasize of per page.
145 *
146 * The formula is :
147 * E * G * N
148 * ------------ <= (O - M)
149 * 8
150 *
151 * So, we get E by:
152 * (O - M) * 8
153 * E <= -------------
154 * G * N
155 */
156static inline int get_ecc_strength(struct gpmi_nand_data *this)
157{ 189{
190 struct resources *r = &this->resources;
158 struct bch_geometry *geo = &this->bch_geometry; 191 struct bch_geometry *geo = &this->bch_geometry;
159 struct mtd_info *mtd = nand_to_mtd(&this->nand); 192 u32 reg;
160 int ecc_strength; 193 int i;
161 194
162 ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8) 195 dev_err(this->dev, "Show GPMI registers :\n");
163 / (geo->gf_len * geo->ecc_chunk_count); 196 for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
197 reg = readl(r->gpmi_regs + i * 0x10);
198 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
199 }
164 200
165 /* We need the minor even number. */ 201 /* start to print out the BCH info */
166 return round_down(ecc_strength, 2); 202 dev_err(this->dev, "Show BCH registers :\n");
203 for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
204 reg = readl(r->bch_regs + i * 0x10);
205 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
206 }
207 dev_err(this->dev, "BCH Geometry :\n"
208 "GF length : %u\n"
209 "ECC Strength : %u\n"
210 "Page Size in Bytes : %u\n"
211 "Metadata Size in Bytes : %u\n"
212 "ECC Chunk Size in Bytes: %u\n"
213 "ECC Chunk Count : %u\n"
214 "Payload Size in Bytes : %u\n"
215 "Auxiliary Size in Bytes: %u\n"
216 "Auxiliary Status Offset: %u\n"
217 "Block Mark Byte Offset : %u\n"
218 "Block Mark Bit Offset : %u\n",
219 geo->gf_len,
220 geo->ecc_strength,
221 geo->page_size,
222 geo->metadata_size,
223 geo->ecc_chunk_size,
224 geo->ecc_chunk_count,
225 geo->payload_size,
226 geo->auxiliary_size,
227 geo->auxiliary_status_offset,
228 geo->block_mark_byte_offset,
229 geo->block_mark_bit_offset);
167} 230}
168 231
169static inline bool gpmi_check_ecc(struct gpmi_nand_data *this) 232static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
@@ -296,6 +359,37 @@ static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
296 return 0; 359 return 0;
297} 360}
298 361
362/*
363 * Calculate the ECC strength by hand:
364 * E : The ECC strength.
365 * G : the length of Galois Field.
366 * N : The chunk count of per page.
367 * O : the oobsize of the NAND chip.
368 * M : the metasize of per page.
369 *
370 * The formula is :
371 * E * G * N
372 * ------------ <= (O - M)
373 * 8
374 *
375 * So, we get E by:
376 * (O - M) * 8
377 * E <= -------------
378 * G * N
379 */
380static inline int get_ecc_strength(struct gpmi_nand_data *this)
381{
382 struct bch_geometry *geo = &this->bch_geometry;
383 struct mtd_info *mtd = nand_to_mtd(&this->nand);
384 int ecc_strength;
385
386 ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
387 / (geo->gf_len * geo->ecc_chunk_count);
388
389 /* We need the minor even number. */
390 return round_down(ecc_strength, 2);
391}
392
299static int legacy_set_geometry(struct gpmi_nand_data *this) 393static int legacy_set_geometry(struct gpmi_nand_data *this)
300{ 394{
301 struct bch_geometry *geo = &this->bch_geometry; 395 struct bch_geometry *geo = &this->bch_geometry;
@@ -408,7 +502,7 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
408 return 0; 502 return 0;
409} 503}
410 504
411int common_nfc_set_geometry(struct gpmi_nand_data *this) 505static int common_nfc_set_geometry(struct gpmi_nand_data *this)
412{ 506{
413 struct nand_chip *chip = &this->nand; 507 struct nand_chip *chip = &this->nand;
414 508
@@ -430,18 +524,288 @@ int common_nfc_set_geometry(struct gpmi_nand_data *this)
430 return 0; 524 return 0;
431} 525}
432 526
433struct dma_chan *get_dma_chan(struct gpmi_nand_data *this) 527/* Configures the geometry for BCH. */
528static int bch_set_geometry(struct gpmi_nand_data *this)
529{
530 struct resources *r = &this->resources;
531 int ret;
532
533 ret = common_nfc_set_geometry(this);
534 if (ret)
535 return ret;
536
537 ret = pm_runtime_get_sync(this->dev);
538 if (ret < 0)
539 return ret;
540
541 /*
542 * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
543 * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
544 * and MX28.
545 */
546 ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
547 if (ret)
548 goto err_out;
549
550 /* Set *all* chip selects to use layout 0. */
551 writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
552
553 ret = 0;
554err_out:
555 pm_runtime_mark_last_busy(this->dev);
556 pm_runtime_put_autosuspend(this->dev);
557
558 return ret;
559}
560
561/*
562 * <1> Firstly, we should know what's the GPMI-clock means.
563 * The GPMI-clock is the internal clock in the gpmi nand controller.
564 * If you set 100MHz to gpmi nand controller, the GPMI-clock's period
565 * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
566 *
567 * <2> Secondly, we should know what's the frequency on the nand chip pins.
568 * The frequency on the nand chip pins is derived from the GPMI-clock.
569 * We can get it from the following equation:
570 *
571 * F = G / (DS + DH)
572 *
573 * F : the frequency on the nand chip pins.
574 * G : the GPMI clock, such as 100MHz.
575 * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
576 * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
577 *
578 * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
579 * the nand EDO(extended Data Out) timing could be applied.
580 * The GPMI implements a feedback read strobe to sample the read data.
581 * The feedback read strobe can be delayed to support the nand EDO timing
582 * where the read strobe may deasserts before the read data is valid, and
583 * read data is valid for some time after read strobe.
584 *
585 * The following figure illustrates some aspects of a NAND Flash read:
586 *
587 * |<---tREA---->|
588 * | |
589 * | | |
590 * |<--tRP-->| |
591 * | | |
592 * __ ___|__________________________________
593 * RDN \________/ |
594 * |
595 * /---------\
596 * Read Data --------------< >---------
597 * \---------/
598 * | |
599 * |<-D->|
600 * FeedbackRDN ________ ____________
601 * \___________/
602 *
603 * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
604 *
605 *
606 * <4> Now, we begin to describe how to compute the right RDN_DELAY.
607 *
608 * 4.1) From the aspect of the nand chip pins:
609 * Delay = (tREA + C - tRP) {1}
610 *
611 * tREA : the maximum read access time.
612 * C : a constant to adjust the delay. default is 4000ps.
613 * tRP : the read pulse width, which is exactly:
614 * tRP = (GPMI-clock-period) * DATA_SETUP
615 *
616 * 4.2) From the aspect of the GPMI nand controller:
617 * Delay = RDN_DELAY * 0.125 * RP {2}
618 *
619 * RP : the DLL reference period.
620 * if (GPMI-clock-period > DLL_THRETHOLD)
621 * RP = GPMI-clock-period / 2;
622 * else
623 * RP = GPMI-clock-period;
624 *
625 * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
626 * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
627 * is 16000ps, but in mx6q, we use 12000ps.
628 *
629 * 4.3) since {1} equals {2}, we get:
630 *
631 * (tREA + 4000 - tRP) * 8
632 * RDN_DELAY = ----------------------- {3}
633 * RP
634 */
635static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
636 const struct nand_sdr_timings *sdr)
637{
638 struct gpmi_nfc_hardware_timing *hw = &this->hw;
639 unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
640 unsigned int period_ps, reference_period_ps;
641 unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
642 unsigned int tRP_ps;
643 bool use_half_period;
644 int sample_delay_ps, sample_delay_factor;
645 u16 busy_timeout_cycles;
646 u8 wrn_dly_sel;
647
648 if (sdr->tRC_min >= 30000) {
649 /* ONFI non-EDO modes [0-3] */
650 hw->clk_rate = 22000000;
651 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
652 } else if (sdr->tRC_min >= 25000) {
653 /* ONFI EDO mode 4 */
654 hw->clk_rate = 80000000;
655 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
656 } else {
657 /* ONFI EDO mode 5 */
658 hw->clk_rate = 100000000;
659 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
660 }
661
662 /* SDR core timings are given in picoseconds */
663 period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
664
665 addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
666 data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
667 data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
668 busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
669
670 hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
671 BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
672 BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
673 hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
674
675 /*
676 * Derive NFC ideal delay from {3}:
677 *
678 * (tREA + 4000 - tRP) * 8
679 * RDN_DELAY = -----------------------
680 * RP
681 */
682 if (period_ps > dll_threshold_ps) {
683 use_half_period = true;
684 reference_period_ps = period_ps / 2;
685 } else {
686 use_half_period = false;
687 reference_period_ps = period_ps;
688 }
689
690 tRP_ps = data_setup_cycles * period_ps;
691 sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
692 if (sample_delay_ps > 0)
693 sample_delay_factor = sample_delay_ps / reference_period_ps;
694 else
695 sample_delay_factor = 0;
696
697 hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
698 if (sample_delay_factor)
699 hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
700 BM_GPMI_CTRL1_DLL_ENABLE |
701 (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
702}
703
704static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
705{
706 struct gpmi_nfc_hardware_timing *hw = &this->hw;
707 struct resources *r = &this->resources;
708 void __iomem *gpmi_regs = r->gpmi_regs;
709 unsigned int dll_wait_time_us;
710
711 clk_set_rate(r->clock[0], hw->clk_rate);
712
713 writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
714 writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
715
716 /*
717 * Clear several CTRL1 fields, DLL must be disabled when setting
718 * RDN_DELAY or HALF_PERIOD.
719 */
720 writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
721 writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
722
723 /* Wait 64 clock cycles before using the GPMI after enabling the DLL */
724 dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
725 if (!dll_wait_time_us)
726 dll_wait_time_us = 1;
727
728 /* Wait for the DLL to settle. */
729 udelay(dll_wait_time_us);
730}
731
732static int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr,
733 const struct nand_data_interface *conf)
734{
735 struct gpmi_nand_data *this = nand_get_controller_data(chip);
736 const struct nand_sdr_timings *sdr;
737
738 /* Retrieve required NAND timings */
739 sdr = nand_get_sdr_timings(conf);
740 if (IS_ERR(sdr))
741 return PTR_ERR(sdr);
742
743 /* Only MX6 GPMI controller can reach EDO timings */
744 if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this))
745 return -ENOTSUPP;
746
747 /* Stop here if this call was just a check */
748 if (chipnr < 0)
749 return 0;
750
751 /* Do the actual derivation of the controller timings */
752 gpmi_nfc_compute_timings(this, sdr);
753
754 this->hw.must_apply_timings = true;
755
756 return 0;
757}
758
759/* Clears a BCH interrupt. */
760static void gpmi_clear_bch(struct gpmi_nand_data *this)
761{
762 struct resources *r = &this->resources;
763 writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
764}
765
766static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
434{ 767{
435 /* We use the DMA channel 0 to access all the nand chips. */ 768 /* We use the DMA channel 0 to access all the nand chips. */
436 return this->dma_chans[0]; 769 return this->dma_chans[0];
437} 770}
438 771
772/* This will be called after the DMA operation is finished. */
773static void dma_irq_callback(void *param)
774{
775 struct gpmi_nand_data *this = param;
776 struct completion *dma_c = &this->dma_done;
777
778 complete(dma_c);
779}
780
781static irqreturn_t bch_irq(int irq, void *cookie)
782{
783 struct gpmi_nand_data *this = cookie;
784
785 gpmi_clear_bch(this);
786 complete(&this->bch_done);
787 return IRQ_HANDLED;
788}
789
790static int gpmi_raw_len_to_len(struct gpmi_nand_data *this, int raw_len)
791{
792 /*
793 * raw_len is the length to read/write including bch data which
794 * we are passed in exec_op. Calculate the data length from it.
795 */
796 if (this->bch)
797 return ALIGN_DOWN(raw_len, this->bch_geometry.ecc_chunk_size);
798 else
799 return raw_len;
800}
801
439/* Can we use the upper's buffer directly for DMA? */ 802/* Can we use the upper's buffer directly for DMA? */
440bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf, int len, 803static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf,
441 enum dma_data_direction dr) 804 int raw_len, struct scatterlist *sgl,
805 enum dma_data_direction dr)
442{ 806{
443 struct scatterlist *sgl = &this->data_sgl;
444 int ret; 807 int ret;
808 int len = gpmi_raw_len_to_len(this, raw_len);
445 809
446 /* first try to map the upper buffer directly */ 810 /* first try to map the upper buffer directly */
447 if (virt_addr_valid(buf) && !object_is_on_stack(buf)) { 811 if (virt_addr_valid(buf) && !object_is_on_stack(buf)) {
@@ -457,7 +821,7 @@ map_fail:
457 /* We have to use our own DMA buffer. */ 821 /* We have to use our own DMA buffer. */
458 sg_init_one(sgl, this->data_buffer_dma, len); 822 sg_init_one(sgl, this->data_buffer_dma, len);
459 823
460 if (dr == DMA_TO_DEVICE) 824 if (dr == DMA_TO_DEVICE && buf != this->data_buffer_dma)
461 memcpy(this->data_buffer_dma, buf, len); 825 memcpy(this->data_buffer_dma, buf, len);
462 826
463 dma_map_sg(this->dev, sgl, 1, dr); 827 dma_map_sg(this->dev, sgl, 1, dr);
@@ -465,67 +829,263 @@ map_fail:
465 return false; 829 return false;
466} 830}
467 831
468/* This will be called after the DMA operation is finished. */ 832/**
469static void dma_irq_callback(void *param) 833 * gpmi_copy_bits - copy bits from one memory region to another
834 * @dst: destination buffer
835 * @dst_bit_off: bit offset we're starting to write at
836 * @src: source buffer
837 * @src_bit_off: bit offset we're starting to read from
838 * @nbits: number of bits to copy
839 *
840 * This functions copies bits from one memory region to another, and is used by
841 * the GPMI driver to copy ECC sections which are not guaranteed to be byte
842 * aligned.
843 *
844 * src and dst should not overlap.
845 *
846 */
847static void gpmi_copy_bits(u8 *dst, size_t dst_bit_off, const u8 *src,
848 size_t src_bit_off, size_t nbits)
470{ 849{
471 struct gpmi_nand_data *this = param; 850 size_t i;
472 struct completion *dma_c = &this->dma_done; 851 size_t nbytes;
852 u32 src_buffer = 0;
853 size_t bits_in_src_buffer = 0;
473 854
474 complete(dma_c); 855 if (!nbits)
475} 856 return;
476 857
477int start_dma_without_bch_irq(struct gpmi_nand_data *this, 858 /*
478 struct dma_async_tx_descriptor *desc) 859 * Move src and dst pointers to the closest byte pointer and store bit
479{ 860 * offsets within a byte.
480 struct completion *dma_c = &this->dma_done; 861 */
481 unsigned long timeout; 862 src += src_bit_off / 8;
863 src_bit_off %= 8;
482 864
483 init_completion(dma_c); 865 dst += dst_bit_off / 8;
866 dst_bit_off %= 8;
484 867
485 desc->callback = dma_irq_callback; 868 /*
486 desc->callback_param = this; 869 * Initialize the src_buffer value with bits available in the first
487 dmaengine_submit(desc); 870 * byte of data so that we end up with a byte aligned src pointer.
488 dma_async_issue_pending(get_dma_chan(this)); 871 */
872 if (src_bit_off) {
873 src_buffer = src[0] >> src_bit_off;
874 if (nbits >= (8 - src_bit_off)) {
875 bits_in_src_buffer += 8 - src_bit_off;
876 } else {
877 src_buffer &= GENMASK(nbits - 1, 0);
878 bits_in_src_buffer += nbits;
879 }
880 nbits -= bits_in_src_buffer;
881 src++;
882 }
489 883
490 /* Wait for the interrupt from the DMA block. */ 884 /* Calculate the number of bytes that can be copied from src to dst. */
491 timeout = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000)); 885 nbytes = nbits / 8;
492 if (!timeout) { 886
493 dev_err(this->dev, "DMA timeout, last DMA\n"); 887 /* Try to align dst to a byte boundary. */
494 gpmi_dump_info(this); 888 if (dst_bit_off) {
495 return -ETIMEDOUT; 889 if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
890 src_buffer |= src[0] << bits_in_src_buffer;
891 bits_in_src_buffer += 8;
892 src++;
893 nbytes--;
894 }
895
896 if (bits_in_src_buffer >= (8 - dst_bit_off)) {
897 dst[0] &= GENMASK(dst_bit_off - 1, 0);
898 dst[0] |= src_buffer << dst_bit_off;
899 src_buffer >>= (8 - dst_bit_off);
900 bits_in_src_buffer -= (8 - dst_bit_off);
901 dst_bit_off = 0;
902 dst++;
903 if (bits_in_src_buffer > 7) {
904 bits_in_src_buffer -= 8;
905 dst[0] = src_buffer;
906 dst++;
907 src_buffer >>= 8;
908 }
909 }
910 }
911
912 if (!bits_in_src_buffer && !dst_bit_off) {
913 /*
914 * Both src and dst pointers are byte aligned, thus we can
915 * just use the optimized memcpy function.
916 */
917 if (nbytes)
918 memcpy(dst, src, nbytes);
919 } else {
920 /*
921 * src buffer is not byte aligned, hence we have to copy each
922 * src byte to the src_buffer variable before extracting a byte
923 * to store in dst.
924 */
925 for (i = 0; i < nbytes; i++) {
926 src_buffer |= src[i] << bits_in_src_buffer;
927 dst[i] = src_buffer;
928 src_buffer >>= 8;
929 }
930 }
931 /* Update dst and src pointers */
932 dst += nbytes;
933 src += nbytes;
934
935 /*
936 * nbits is the number of remaining bits. It should not exceed 8 as
937 * we've already copied as much bytes as possible.
938 */
939 nbits %= 8;
940
941 /*
942 * If there's no more bits to copy to the destination and src buffer
943 * was already byte aligned, then we're done.
944 */
945 if (!nbits && !bits_in_src_buffer)
946 return;
947
948 /* Copy the remaining bits to src_buffer */
949 if (nbits)
950 src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
951 bits_in_src_buffer;
952 bits_in_src_buffer += nbits;
953
954 /*
955 * In case there were not enough bits to get a byte aligned dst buffer
956 * prepare the src_buffer variable to match the dst organization (shift
957 * src_buffer by dst_bit_off and retrieve the least significant bits
958 * from dst).
959 */
960 if (dst_bit_off)
961 src_buffer = (src_buffer << dst_bit_off) |
962 (*dst & GENMASK(dst_bit_off - 1, 0));
963 bits_in_src_buffer += dst_bit_off;
964
965 /*
966 * Keep most significant bits from dst if we end up with an unaligned
967 * number of bits.
968 */
969 nbytes = bits_in_src_buffer / 8;
970 if (bits_in_src_buffer % 8) {
971 src_buffer |= (dst[nbytes] &
972 GENMASK(7, bits_in_src_buffer % 8)) <<
973 (nbytes * 8);
974 nbytes++;
975 }
976
977 /* Copy the remaining bytes to dst */
978 for (i = 0; i < nbytes; i++) {
979 dst[i] = src_buffer;
980 src_buffer >>= 8;
496 } 981 }
497 return 0;
498} 982}
499 983
984/* add our owner bbt descriptor */
985static uint8_t scan_ff_pattern[] = { 0xff };
986static struct nand_bbt_descr gpmi_bbt_descr = {
987 .options = 0,
988 .offs = 0,
989 .len = 1,
990 .pattern = scan_ff_pattern
991};
992
500/* 993/*
501 * This function is used in BCH reading or BCH writing pages. 994 * We may change the layout if we can get the ECC info from the datasheet,
502 * It will wait for the BCH interrupt as long as ONE second. 995 * else we will use all the (page + OOB).
503 * Actually, we must wait for two interrupts :
504 * [1] firstly the DMA interrupt and
505 * [2] secondly the BCH interrupt.
506 */ 996 */
507int start_dma_with_bch_irq(struct gpmi_nand_data *this, 997static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
508 struct dma_async_tx_descriptor *desc) 998 struct mtd_oob_region *oobregion)
509{ 999{
510 struct completion *bch_c = &this->bch_done; 1000 struct nand_chip *chip = mtd_to_nand(mtd);
511 unsigned long timeout; 1001 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1002 struct bch_geometry *geo = &this->bch_geometry;
512 1003
513 /* Prepare to receive an interrupt from the BCH block. */ 1004 if (section)
514 init_completion(bch_c); 1005 return -ERANGE;
515 1006
516 /* start the DMA */ 1007 oobregion->offset = 0;
517 start_dma_without_bch_irq(this, desc); 1008 oobregion->length = geo->page_size - mtd->writesize;
518 1009
519 /* Wait for the interrupt from the BCH block. */ 1010 return 0;
520 timeout = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000)); 1011}
521 if (!timeout) { 1012
522 dev_err(this->dev, "BCH timeout\n"); 1013static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
523 gpmi_dump_info(this); 1014 struct mtd_oob_region *oobregion)
524 return -ETIMEDOUT; 1015{
1016 struct nand_chip *chip = mtd_to_nand(mtd);
1017 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1018 struct bch_geometry *geo = &this->bch_geometry;
1019
1020 if (section)
1021 return -ERANGE;
1022
1023 /* The available oob size we have. */
1024 if (geo->page_size < mtd->writesize + mtd->oobsize) {
1025 oobregion->offset = geo->page_size - mtd->writesize;
1026 oobregion->length = mtd->oobsize - oobregion->offset;
525 } 1027 }
1028
526 return 0; 1029 return 0;
527} 1030}
528 1031
1032static const char * const gpmi_clks_for_mx2x[] = {
1033 "gpmi_io",
1034};
1035
1036static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
1037 .ecc = gpmi_ooblayout_ecc,
1038 .free = gpmi_ooblayout_free,
1039};
1040
1041static const struct gpmi_devdata gpmi_devdata_imx23 = {
1042 .type = IS_MX23,
1043 .bch_max_ecc_strength = 20,
1044 .max_chain_delay = 16000,
1045 .clks = gpmi_clks_for_mx2x,
1046 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
1047};
1048
1049static const struct gpmi_devdata gpmi_devdata_imx28 = {
1050 .type = IS_MX28,
1051 .bch_max_ecc_strength = 20,
1052 .max_chain_delay = 16000,
1053 .clks = gpmi_clks_for_mx2x,
1054 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
1055};
1056
1057static const char * const gpmi_clks_for_mx6[] = {
1058 "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
1059};
1060
1061static const struct gpmi_devdata gpmi_devdata_imx6q = {
1062 .type = IS_MX6Q,
1063 .bch_max_ecc_strength = 40,
1064 .max_chain_delay = 12000,
1065 .clks = gpmi_clks_for_mx6,
1066 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
1067};
1068
1069static const struct gpmi_devdata gpmi_devdata_imx6sx = {
1070 .type = IS_MX6SX,
1071 .bch_max_ecc_strength = 62,
1072 .max_chain_delay = 12000,
1073 .clks = gpmi_clks_for_mx6,
1074 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
1075};
1076
1077static const char * const gpmi_clks_for_mx7d[] = {
1078 "gpmi_io", "gpmi_bch_apb",
1079};
1080
1081static const struct gpmi_devdata gpmi_devdata_imx7d = {
1082 .type = IS_MX7D,
1083 .bch_max_ecc_strength = 62,
1084 .max_chain_delay = 12000,
1085 .clks = gpmi_clks_for_mx7d,
1086 .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
1087};
1088
529static int acquire_register_block(struct gpmi_nand_data *this, 1089static int acquire_register_block(struct gpmi_nand_data *this,
530 const char *res_name) 1090 const char *res_name)
531{ 1091{
@@ -667,68 +1227,20 @@ static void release_resources(struct gpmi_nand_data *this)
667 release_dma_channels(this); 1227 release_dma_channels(this);
668} 1228}
669 1229
670static int send_page_prepare(struct gpmi_nand_data *this,
671 const void *source, unsigned length,
672 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
673 const void **use_virt, dma_addr_t *use_phys)
674{
675 struct device *dev = this->dev;
676
677 if (virt_addr_valid(source)) {
678 dma_addr_t source_phys;
679
680 source_phys = dma_map_single(dev, (void *)source, length,
681 DMA_TO_DEVICE);
682 if (dma_mapping_error(dev, source_phys)) {
683 if (alt_size < length) {
684 dev_err(dev, "Alternate buffer is too small\n");
685 return -ENOMEM;
686 }
687 goto map_failed;
688 }
689 *use_virt = source;
690 *use_phys = source_phys;
691 return 0;
692 }
693map_failed:
694 /*
695 * Copy the content of the source buffer into the alternate
696 * buffer and set up the return values accordingly.
697 */
698 memcpy(alt_virt, source, length);
699
700 *use_virt = alt_virt;
701 *use_phys = alt_phys;
702 return 0;
703}
704
705static void send_page_end(struct gpmi_nand_data *this,
706 const void *source, unsigned length,
707 void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
708 const void *used_virt, dma_addr_t used_phys)
709{
710 struct device *dev = this->dev;
711 if (used_virt == source)
712 dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE);
713}
714
715static void gpmi_free_dma_buffer(struct gpmi_nand_data *this) 1230static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
716{ 1231{
717 struct device *dev = this->dev; 1232 struct device *dev = this->dev;
1233 struct bch_geometry *geo = &this->bch_geometry;
718 1234
719 if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt)) 1235 if (this->auxiliary_virt && virt_addr_valid(this->auxiliary_virt))
720 dma_free_coherent(dev, this->page_buffer_size, 1236 dma_free_coherent(dev, geo->auxiliary_size,
721 this->page_buffer_virt, 1237 this->auxiliary_virt,
722 this->page_buffer_phys); 1238 this->auxiliary_phys);
723 kfree(this->cmd_buffer);
724 kfree(this->data_buffer_dma); 1239 kfree(this->data_buffer_dma);
725 kfree(this->raw_buffer); 1240 kfree(this->raw_buffer);
726 1241
727 this->cmd_buffer = NULL;
728 this->data_buffer_dma = NULL; 1242 this->data_buffer_dma = NULL;
729 this->raw_buffer = NULL; 1243 this->raw_buffer = NULL;
730 this->page_buffer_virt = NULL;
731 this->page_buffer_size = 0;
732} 1244}
733 1245
734/* Allocate the DMA buffers */ 1246/* Allocate the DMA buffers */
@@ -738,11 +1250,6 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
738 struct device *dev = this->dev; 1250 struct device *dev = this->dev;
739 struct mtd_info *mtd = nand_to_mtd(&this->nand); 1251 struct mtd_info *mtd = nand_to_mtd(&this->nand);
740 1252
741 /* [1] Allocate a command buffer. PAGE_SIZE is enough. */
742 this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
743 if (this->cmd_buffer == NULL)
744 goto error_alloc;
745
746 /* 1253 /*
747 * [2] Allocate a read/write data buffer. 1254 * [2] Allocate a read/write data buffer.
748 * The gpmi_alloc_dma_buffer can be called twice. 1255 * The gpmi_alloc_dma_buffer can be called twice.
@@ -756,29 +1263,15 @@ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
756 if (this->data_buffer_dma == NULL) 1263 if (this->data_buffer_dma == NULL)
757 goto error_alloc; 1264 goto error_alloc;
758 1265
759 /* 1266 this->auxiliary_virt = dma_alloc_coherent(dev, geo->auxiliary_size,
760 * [3] Allocate the page buffer. 1267 &this->auxiliary_phys, GFP_DMA);
761 * 1268 if (!this->auxiliary_virt)
762 * Both the payload buffer and the auxiliary buffer must appear on
763 * 32-bit boundaries. We presume the size of the payload buffer is a
764 * power of two and is much larger than four, which guarantees the
765 * auxiliary buffer will appear on a 32-bit boundary.
766 */
767 this->page_buffer_size = geo->payload_size + geo->auxiliary_size;
768 this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size,
769 &this->page_buffer_phys, GFP_DMA);
770 if (!this->page_buffer_virt)
771 goto error_alloc; 1269 goto error_alloc;
772 1270
773 this->raw_buffer = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); 1271 this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL);
774 if (!this->raw_buffer) 1272 if (!this->raw_buffer)
775 goto error_alloc; 1273 goto error_alloc;
776 1274
777 /* Slice up the page buffer. */
778 this->payload_virt = this->page_buffer_virt;
779 this->payload_phys = this->page_buffer_phys;
780 this->auxiliary_virt = this->payload_virt + geo->payload_size;
781 this->auxiliary_phys = this->payload_phys + geo->payload_size;
782 return 0; 1275 return 0;
783 1276
784error_alloc: 1277error_alloc:
@@ -786,106 +1279,6 @@ error_alloc:
786 return -ENOMEM; 1279 return -ENOMEM;
787} 1280}
788 1281
789static void gpmi_cmd_ctrl(struct nand_chip *chip, int data, unsigned int ctrl)
790{
791 struct gpmi_nand_data *this = nand_get_controller_data(chip);
792 int ret;
793
794 /*
795 * Every operation begins with a command byte and a series of zero or
796 * more address bytes. These are distinguished by either the Address
797 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
798 * asserted. When MTD is ready to execute the command, it will deassert
799 * both latch enables.
800 *
801 * Rather than run a separate DMA operation for every single byte, we
802 * queue them up and run a single DMA operation for the entire series
803 * of command and data bytes. NAND_CMD_NONE means the END of the queue.
804 */
805 if ((ctrl & (NAND_ALE | NAND_CLE))) {
806 if (data != NAND_CMD_NONE)
807 this->cmd_buffer[this->command_length++] = data;
808 return;
809 }
810
811 if (!this->command_length)
812 return;
813
814 ret = gpmi_send_command(this);
815 if (ret)
816 dev_err(this->dev, "Chip: %u, Error %d\n",
817 this->current_chip, ret);
818
819 this->command_length = 0;
820}
821
822static int gpmi_dev_ready(struct nand_chip *chip)
823{
824 struct gpmi_nand_data *this = nand_get_controller_data(chip);
825
826 return gpmi_is_ready(this, this->current_chip);
827}
828
829static void gpmi_select_chip(struct nand_chip *chip, int chipnr)
830{
831 struct gpmi_nand_data *this = nand_get_controller_data(chip);
832 int ret;
833
834 /*
835 * For power consumption matters, disable/enable the clock each time a
836 * die is selected/unselected.
837 */
838 if (this->current_chip < 0 && chipnr >= 0) {
839 ret = gpmi_enable_clk(this);
840 if (ret)
841 dev_err(this->dev, "Failed to enable the clock\n");
842 } else if (this->current_chip >= 0 && chipnr < 0) {
843 ret = gpmi_disable_clk(this);
844 if (ret)
845 dev_err(this->dev, "Failed to disable the clock\n");
846 }
847
848 /*
849 * This driver currently supports only one NAND chip. Plus, dies share
850 * the same configuration. So once timings have been applied on the
851 * controller side, they will not change anymore. When the time will
852 * come, the check on must_apply_timings will have to be dropped.
853 */
854 if (chipnr >= 0 && this->hw.must_apply_timings) {
855 this->hw.must_apply_timings = false;
856 gpmi_nfc_apply_timings(this);
857 }
858
859 this->current_chip = chipnr;
860}
861
862static void gpmi_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
863{
864 struct gpmi_nand_data *this = nand_get_controller_data(chip);
865
866 dev_dbg(this->dev, "len is %d\n", len);
867
868 gpmi_read_data(this, buf, len);
869}
870
871static void gpmi_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
872{
873 struct gpmi_nand_data *this = nand_get_controller_data(chip);
874
875 dev_dbg(this->dev, "len is %d\n", len);
876
877 gpmi_send_data(this, buf, len);
878}
879
880static uint8_t gpmi_read_byte(struct nand_chip *chip)
881{
882 struct gpmi_nand_data *this = nand_get_controller_data(chip);
883 uint8_t *buf = this->data_buffer_dma;
884
885 gpmi_read_buf(chip, buf, 1);
886 return buf[0];
887}
888
889/* 1282/*
890 * Handles block mark swapping. 1283 * Handles block mark swapping.
891 * It can be called in swapping the block mark, or swapping it back, 1284 * It can be called in swapping the block mark, or swapping it back,
@@ -934,54 +1327,20 @@ static void block_mark_swapping(struct gpmi_nand_data *this,
934 p[1] = (p[1] & mask) | (from_oob >> (8 - bit)); 1327 p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
935} 1328}
936 1329
937static int gpmi_ecc_read_page_data(struct nand_chip *chip, 1330static int gpmi_count_bitflips(struct nand_chip *chip, void *buf, int first,
938 uint8_t *buf, int oob_required, 1331 int last, int meta)
939 int page)
940{ 1332{
941 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1333 struct gpmi_nand_data *this = nand_get_controller_data(chip);
942 struct bch_geometry *nfc_geo = &this->bch_geometry; 1334 struct bch_geometry *nfc_geo = &this->bch_geometry;
943 struct mtd_info *mtd = nand_to_mtd(chip); 1335 struct mtd_info *mtd = nand_to_mtd(chip);
944 dma_addr_t payload_phys; 1336 int i;
945 unsigned int i;
946 unsigned char *status; 1337 unsigned char *status;
947 unsigned int max_bitflips = 0; 1338 unsigned int max_bitflips = 0;
948 int ret;
949 bool direct = false;
950
951 dev_dbg(this->dev, "page number is : %d\n", page);
952
953 payload_phys = this->payload_phys;
954
955 if (virt_addr_valid(buf)) {
956 dma_addr_t dest_phys;
957
958 dest_phys = dma_map_single(this->dev, buf, nfc_geo->payload_size,
959 DMA_FROM_DEVICE);
960 if (!dma_mapping_error(this->dev, dest_phys)) {
961 payload_phys = dest_phys;
962 direct = true;
963 }
964 }
965
966 /* go! */
967 ret = gpmi_read_page(this, payload_phys, this->auxiliary_phys);
968
969 if (direct)
970 dma_unmap_single(this->dev, payload_phys, nfc_geo->payload_size,
971 DMA_FROM_DEVICE);
972
973 if (ret) {
974 dev_err(this->dev, "Error in ECC-based read: %d\n", ret);
975 return ret;
976 }
977 1339
978 /* Loop over status bytes, accumulating ECC status. */ 1340 /* Loop over status bytes, accumulating ECC status. */
979 status = this->auxiliary_virt + nfc_geo->auxiliary_status_offset; 1341 status = this->auxiliary_virt + ALIGN(meta, 4);
980
981 if (!direct)
982 memcpy(buf, this->payload_virt, nfc_geo->payload_size);
983 1342
984 for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) { 1343 for (i = first; i < last; i++, status++) {
985 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED)) 1344 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
986 continue; 1345 continue;
987 1346
@@ -1061,6 +1420,50 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
1061 max_bitflips = max_t(unsigned int, max_bitflips, *status); 1420 max_bitflips = max_t(unsigned int, max_bitflips, *status);
1062 } 1421 }
1063 1422
1423 return max_bitflips;
1424}
1425
1426static void gpmi_bch_layout_std(struct gpmi_nand_data *this)
1427{
1428 struct bch_geometry *geo = &this->bch_geometry;
1429 unsigned int ecc_strength = geo->ecc_strength >> 1;
1430 unsigned int gf_len = geo->gf_len;
1431 unsigned int block_size = geo->ecc_chunk_size;
1432
1433 this->bch_flashlayout0 =
1434 BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) |
1435 BF_BCH_FLASH0LAYOUT0_META_SIZE(geo->metadata_size) |
1436 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1437 BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) |
1438 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this);
1439
1440 this->bch_flashlayout1 =
1441 BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo->page_size) |
1442 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1443 BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) |
1444 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this);
1445}
1446
1447static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
1448 int oob_required, int page)
1449{
1450 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1451 struct mtd_info *mtd = nand_to_mtd(chip);
1452 struct bch_geometry *geo = &this->bch_geometry;
1453 unsigned int max_bitflips;
1454 int ret;
1455
1456 gpmi_bch_layout_std(this);
1457 this->bch = true;
1458
1459 ret = nand_read_page_op(chip, page, 0, buf, geo->page_size);
1460 if (ret)
1461 return ret;
1462
1463 max_bitflips = gpmi_count_bitflips(chip, buf, 0,
1464 geo->ecc_chunk_count,
1465 geo->auxiliary_status_offset);
1466
1064 /* handle the block mark swapping */ 1467 /* handle the block mark swapping */
1065 block_mark_swapping(this, buf, this->auxiliary_virt); 1468 block_mark_swapping(this, buf, this->auxiliary_virt);
1066 1469
@@ -1082,30 +1485,20 @@ static int gpmi_ecc_read_page_data(struct nand_chip *chip,
1082 return max_bitflips; 1485 return max_bitflips;
1083} 1486}
1084 1487
1085static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
1086 int oob_required, int page)
1087{
1088 nand_read_page_op(chip, page, 0, NULL, 0);
1089
1090 return gpmi_ecc_read_page_data(chip, buf, oob_required, page);
1091}
1092
1093/* Fake a virtual small page for the subpage read */ 1488/* Fake a virtual small page for the subpage read */
1094static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs, 1489static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs,
1095 uint32_t len, uint8_t *buf, int page) 1490 uint32_t len, uint8_t *buf, int page)
1096{ 1491{
1097 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1492 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1098 void __iomem *bch_regs = this->resources.bch_regs;
1099 struct bch_geometry old_geo = this->bch_geometry;
1100 struct bch_geometry *geo = &this->bch_geometry; 1493 struct bch_geometry *geo = &this->bch_geometry;
1101 int size = chip->ecc.size; /* ECC chunk size */ 1494 int size = chip->ecc.size; /* ECC chunk size */
1102 int meta, n, page_size; 1495 int meta, n, page_size;
1103 u32 r1_old, r2_old, r1_new, r2_new;
1104 unsigned int max_bitflips; 1496 unsigned int max_bitflips;
1497 unsigned int ecc_strength;
1105 int first, last, marker_pos; 1498 int first, last, marker_pos;
1106 int ecc_parity_size; 1499 int ecc_parity_size;
1107 int col = 0; 1500 int col = 0;
1108 int old_swap_block_mark = this->swap_block_mark; 1501 int ret;
1109 1502
1110 /* The size of ECC parity */ 1503 /* The size of ECC parity */
1111 ecc_parity_size = geo->gf_len * geo->ecc_strength / 8; 1504 ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
@@ -1138,43 +1531,33 @@ static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs,
1138 buf = buf + first * size; 1531 buf = buf + first * size;
1139 } 1532 }
1140 1533
1141 nand_read_page_op(chip, page, col, NULL, 0); 1534 ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1142
1143 /* Save the old environment */
1144 r1_old = r1_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT0);
1145 r2_old = r2_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT1);
1146 1535
1147 /* change the BCH registers and bch_geometry{} */
1148 n = last - first + 1; 1536 n = last - first + 1;
1149 page_size = meta + (size + ecc_parity_size) * n; 1537 page_size = meta + (size + ecc_parity_size) * n;
1538 ecc_strength = geo->ecc_strength >> 1;
1150 1539
1151 r1_new &= ~(BM_BCH_FLASH0LAYOUT0_NBLOCKS | 1540 this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1) |
1152 BM_BCH_FLASH0LAYOUT0_META_SIZE); 1541 BF_BCH_FLASH0LAYOUT0_META_SIZE(meta) |
1153 r1_new |= BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1) 1542 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1154 | BF_BCH_FLASH0LAYOUT0_META_SIZE(meta); 1543 BF_BCH_FLASH0LAYOUT0_GF(geo->gf_len, this) |
1155 writel(r1_new, bch_regs + HW_BCH_FLASH0LAYOUT0); 1544 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(geo->ecc_chunk_size, this);
1156 1545
1157 r2_new &= ~BM_BCH_FLASH0LAYOUT1_PAGE_SIZE; 1546 this->bch_flashlayout1 = BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) |
1158 r2_new |= BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size); 1547 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1159 writel(r2_new, bch_regs + HW_BCH_FLASH0LAYOUT1); 1548 BF_BCH_FLASH0LAYOUT1_GF(geo->gf_len, this) |
1549 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo->ecc_chunk_size, this);
1160 1550
1161 geo->ecc_chunk_count = n; 1551 this->bch = true;
1162 geo->payload_size = n * size; 1552
1163 geo->page_size = page_size; 1553 ret = nand_read_page_op(chip, page, col, buf, page_size);
1164 geo->auxiliary_status_offset = ALIGN(meta, 4); 1554 if (ret)
1555 return ret;
1165 1556
1166 dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n", 1557 dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
1167 page, offs, len, col, first, n, page_size); 1558 page, offs, len, col, first, n, page_size);
1168 1559
1169 /* Read the subpage now */ 1560 max_bitflips = gpmi_count_bitflips(chip, buf, first, last, meta);
1170 this->swap_block_mark = false;
1171 max_bitflips = gpmi_ecc_read_page_data(chip, buf, 0, page);
1172
1173 /* Restore */
1174 writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0);
1175 writel(r2_old, bch_regs + HW_BCH_FLASH0LAYOUT1);
1176 this->bch_geometry = old_geo;
1177 this->swap_block_mark = old_swap_block_mark;
1178 1561
1179 return max_bitflips; 1562 return max_bitflips;
1180} 1563}
@@ -1185,81 +1568,29 @@ static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
1185 struct mtd_info *mtd = nand_to_mtd(chip); 1568 struct mtd_info *mtd = nand_to_mtd(chip);
1186 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1569 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1187 struct bch_geometry *nfc_geo = &this->bch_geometry; 1570 struct bch_geometry *nfc_geo = &this->bch_geometry;
1188 const void *payload_virt; 1571 int ret;
1189 dma_addr_t payload_phys;
1190 const void *auxiliary_virt;
1191 dma_addr_t auxiliary_phys;
1192 int ret;
1193 1572
1194 dev_dbg(this->dev, "ecc write page.\n"); 1573 dev_dbg(this->dev, "ecc write page.\n");
1195 1574
1196 nand_prog_page_begin_op(chip, page, 0, NULL, 0); 1575 gpmi_bch_layout_std(this);
1576 this->bch = true;
1577
1578 memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size);
1197 1579
1198 if (this->swap_block_mark) { 1580 if (this->swap_block_mark) {
1199 /* 1581 /*
1200 * If control arrives here, we're doing block mark swapping. 1582 * When doing bad block marker swapping we must always copy the
1201 * Since we can't modify the caller's buffers, we must copy them 1583 * input buffer as we can't modify the const buffer.
1202 * into our own.
1203 */
1204 memcpy(this->payload_virt, buf, mtd->writesize);
1205 payload_virt = this->payload_virt;
1206 payload_phys = this->payload_phys;
1207
1208 memcpy(this->auxiliary_virt, chip->oob_poi,
1209 nfc_geo->auxiliary_size);
1210 auxiliary_virt = this->auxiliary_virt;
1211 auxiliary_phys = this->auxiliary_phys;
1212
1213 /* Handle block mark swapping. */
1214 block_mark_swapping(this,
1215 (void *)payload_virt, (void *)auxiliary_virt);
1216 } else {
1217 /*
1218 * If control arrives here, we're not doing block mark swapping,
1219 * so we can to try and use the caller's buffers.
1220 */ 1584 */
1221 ret = send_page_prepare(this, 1585 memcpy(this->data_buffer_dma, buf, mtd->writesize);
1222 buf, mtd->writesize, 1586 buf = this->data_buffer_dma;
1223 this->payload_virt, this->payload_phys, 1587 block_mark_swapping(this, this->data_buffer_dma,
1224 nfc_geo->payload_size, 1588 this->auxiliary_virt);
1225 &payload_virt, &payload_phys);
1226 if (ret) {
1227 dev_err(this->dev, "Inadequate payload DMA buffer\n");
1228 return 0;
1229 }
1230
1231 ret = send_page_prepare(this,
1232 chip->oob_poi, mtd->oobsize,
1233 this->auxiliary_virt, this->auxiliary_phys,
1234 nfc_geo->auxiliary_size,
1235 &auxiliary_virt, &auxiliary_phys);
1236 if (ret) {
1237 dev_err(this->dev, "Inadequate auxiliary DMA buffer\n");
1238 goto exit_auxiliary;
1239 }
1240 } 1589 }
1241 1590
1242 /* Ask the NFC. */ 1591 ret = nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
1243 ret = gpmi_send_page(this, payload_phys, auxiliary_phys);
1244 if (ret)
1245 dev_err(this->dev, "Error in ECC-based write: %d\n", ret);
1246
1247 if (!this->swap_block_mark) {
1248 send_page_end(this, chip->oob_poi, mtd->oobsize,
1249 this->auxiliary_virt, this->auxiliary_phys,
1250 nfc_geo->auxiliary_size,
1251 auxiliary_virt, auxiliary_phys);
1252exit_auxiliary:
1253 send_page_end(this, buf, mtd->writesize,
1254 this->payload_virt, this->payload_phys,
1255 nfc_geo->payload_size,
1256 payload_virt, payload_phys);
1257 }
1258 1592
1259 if (ret) 1593 return ret;
1260 return ret;
1261
1262 return nand_prog_page_end_op(chip);
1263} 1594}
1264 1595
1265/* 1596/*
@@ -1326,14 +1657,16 @@ static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
1326{ 1657{
1327 struct mtd_info *mtd = nand_to_mtd(chip); 1658 struct mtd_info *mtd = nand_to_mtd(chip);
1328 struct gpmi_nand_data *this = nand_get_controller_data(chip); 1659 struct gpmi_nand_data *this = nand_get_controller_data(chip);
1660 int ret;
1329 1661
1330 dev_dbg(this->dev, "page number is %d\n", page);
1331 /* clear the OOB buffer */ 1662 /* clear the OOB buffer */
1332 memset(chip->oob_poi, ~0, mtd->oobsize); 1663 memset(chip->oob_poi, ~0, mtd->oobsize);
1333 1664
1334 /* Read out the conventional OOB. */ 1665 /* Read out the conventional OOB. */
1335 nand_read_page_op(chip, page, mtd->writesize, NULL, 0); 1666 ret = nand_read_page_op(chip, page, mtd->writesize, chip->oob_poi,
1336 chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize); 1667 mtd->oobsize);
1668 if (ret)
1669 return ret;
1337 1670
1338 /* 1671 /*
1339 * Now, we want to make sure the block mark is correct. In the 1672 * Now, we want to make sure the block mark is correct. In the
@@ -1342,8 +1675,9 @@ static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
1342 */ 1675 */
1343 if (GPMI_IS_MX23(this)) { 1676 if (GPMI_IS_MX23(this)) {
1344 /* Read the block mark into the first byte of the OOB buffer. */ 1677 /* Read the block mark into the first byte of the OOB buffer. */
1345 nand_read_page_op(chip, page, 0, NULL, 0); 1678 ret = nand_read_page_op(chip, page, 0, chip->oob_poi, 1);
1346 chip->oob_poi[0] = chip->legacy.read_byte(chip); 1679 if (ret)
1680 return ret;
1347 } 1681 }
1348 1682
1349 return 0; 1683 return 0;
@@ -1392,9 +1726,12 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
1392 size_t oob_byte_off; 1726 size_t oob_byte_off;
1393 uint8_t *oob = chip->oob_poi; 1727 uint8_t *oob = chip->oob_poi;
1394 int step; 1728 int step;
1729 int ret;
1395 1730
1396 nand_read_page_op(chip, page, 0, tmp_buf, 1731 ret = nand_read_page_op(chip, page, 0, tmp_buf,
1397 mtd->writesize + mtd->oobsize); 1732 mtd->writesize + mtd->oobsize);
1733 if (ret)
1734 return ret;
1398 1735
1399 /* 1736 /*
1400 * If required, swap the bad block marker and the data stored in the 1737 * If required, swap the bad block marker and the data stored in the
@@ -1606,13 +1943,12 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1606 unsigned int stride; 1943 unsigned int stride;
1607 unsigned int page; 1944 unsigned int page;
1608 u8 *buffer = nand_get_data_buf(chip); 1945 u8 *buffer = nand_get_data_buf(chip);
1609 int saved_chip_number;
1610 int found_an_ncb_fingerprint = false; 1946 int found_an_ncb_fingerprint = false;
1947 int ret;
1611 1948
1612 /* Compute the number of strides in a search area. */ 1949 /* Compute the number of strides in a search area. */
1613 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; 1950 search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1614 1951
1615 saved_chip_number = this->current_chip;
1616 nand_select_target(chip, 0); 1952 nand_select_target(chip, 0);
1617 1953
1618 /* 1954 /*
@@ -1630,8 +1966,10 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1630 * Read the NCB fingerprint. The fingerprint is four bytes long 1966 * Read the NCB fingerprint. The fingerprint is four bytes long
1631 * and starts in the 12th byte of the page. 1967 * and starts in the 12th byte of the page.
1632 */ 1968 */
1633 nand_read_page_op(chip, page, 12, NULL, 0); 1969 ret = nand_read_page_op(chip, page, 12, buffer,
1634 chip->legacy.read_buf(chip, buffer, strlen(fingerprint)); 1970 strlen(fingerprint));
1971 if (ret)
1972 continue;
1635 1973
1636 /* Look for the fingerprint. */ 1974 /* Look for the fingerprint. */
1637 if (!memcmp(buffer, fingerprint, strlen(fingerprint))) { 1975 if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
@@ -1641,10 +1979,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1641 1979
1642 } 1980 }
1643 1981
1644 if (saved_chip_number >= 0) 1982 nand_deselect_target(chip);
1645 nand_select_target(chip, saved_chip_number);
1646 else
1647 nand_deselect_target(chip);
1648 1983
1649 if (found_an_ncb_fingerprint) 1984 if (found_an_ncb_fingerprint)
1650 dev_dbg(dev, "\tFound a fingerprint\n"); 1985 dev_dbg(dev, "\tFound a fingerprint\n");
@@ -1668,7 +2003,6 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1668 unsigned int stride; 2003 unsigned int stride;
1669 unsigned int page; 2004 unsigned int page;
1670 u8 *buffer = nand_get_data_buf(chip); 2005 u8 *buffer = nand_get_data_buf(chip);
1671 int saved_chip_number;
1672 int status; 2006 int status;
1673 2007
1674 /* Compute the search area geometry. */ 2008 /* Compute the search area geometry. */
@@ -1685,8 +2019,6 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1685 dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides); 2019 dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
1686 dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages); 2020 dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages);
1687 2021
1688 /* Select chip 0. */
1689 saved_chip_number = this->current_chip;
1690 nand_select_target(chip, 0); 2022 nand_select_target(chip, 0);
1691 2023
1692 /* Loop over blocks in the first search area, erasing them. */ 2024 /* Loop over blocks in the first search area, erasing them. */
@@ -1718,11 +2050,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1718 dev_err(dev, "[%s] Write failed.\n", __func__); 2050 dev_err(dev, "[%s] Write failed.\n", __func__);
1719 } 2051 }
1720 2052
1721 /* Deselect chip 0. */ 2053 nand_deselect_target(chip);
1722 if (saved_chip_number >= 0)
1723 nand_select_target(chip, saved_chip_number);
1724 else
1725 nand_deselect_target(chip);
1726 2054
1727 return 0; 2055 return 0;
1728} 2056}
@@ -1773,10 +2101,13 @@ static int mx23_boot_init(struct gpmi_nand_data *this)
1773 2101
1774 /* Send the command to read the conventional block mark. */ 2102 /* Send the command to read the conventional block mark. */
1775 nand_select_target(chip, chipnr); 2103 nand_select_target(chip, chipnr);
1776 nand_read_page_op(chip, page, mtd->writesize, NULL, 0); 2104 ret = nand_read_page_op(chip, page, mtd->writesize, &block_mark,
1777 block_mark = chip->legacy.read_byte(chip); 2105 1);
1778 nand_deselect_target(chip); 2106 nand_deselect_target(chip);
1779 2107
2108 if (ret)
2109 continue;
2110
1780 /* 2111 /*
1781 * Check if the block is marked bad. If so, we need to mark it 2112 * Check if the block is marked bad. If so, we need to mark it
1782 * again, but this time the result will be a mark in the 2113 * again, but this time the result will be a mark in the
@@ -1890,9 +2221,330 @@ static int gpmi_nand_attach_chip(struct nand_chip *chip)
1890 return 0; 2221 return 0;
1891} 2222}
1892 2223
2224static struct gpmi_transfer *get_next_transfer(struct gpmi_nand_data *this)
2225{
2226 struct gpmi_transfer *transfer = &this->transfers[this->ntransfers];
2227
2228 this->ntransfers++;
2229
2230 if (this->ntransfers == GPMI_MAX_TRANSFERS)
2231 return NULL;
2232
2233 return transfer;
2234}
2235
2236static struct dma_async_tx_descriptor *gpmi_chain_command(
2237 struct gpmi_nand_data *this, u8 cmd, const u8 *addr, int naddr)
2238{
2239 struct dma_chan *channel = get_dma_chan(this);
2240 struct dma_async_tx_descriptor *desc;
2241 struct gpmi_transfer *transfer;
2242 int chip = this->nand.cur_cs;
2243 u32 pio[3];
2244
2245 /* [1] send out the PIO words */
2246 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2247 | BM_GPMI_CTRL0_WORD_LENGTH
2248 | BF_GPMI_CTRL0_CS(chip, this)
2249 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2250 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
2251 | BM_GPMI_CTRL0_ADDRESS_INCREMENT
2252 | BF_GPMI_CTRL0_XFER_COUNT(naddr + 1);
2253 pio[1] = 0;
2254 pio[2] = 0;
2255 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2256 DMA_TRANS_NONE, 0);
2257 if (!desc)
2258 return NULL;
2259
2260 transfer = get_next_transfer(this);
2261 if (!transfer)
2262 return NULL;
2263
2264 transfer->cmdbuf[0] = cmd;
2265 if (naddr)
2266 memcpy(&transfer->cmdbuf[1], addr, naddr);
2267
2268 sg_init_one(&transfer->sgl, transfer->cmdbuf, naddr + 1);
2269 dma_map_sg(this->dev, &transfer->sgl, 1, DMA_TO_DEVICE);
2270
2271 transfer->direction = DMA_TO_DEVICE;
2272
2273 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, DMA_MEM_TO_DEV,
2274 MXS_DMA_CTRL_WAIT4END);
2275 return desc;
2276}
2277
2278static struct dma_async_tx_descriptor *gpmi_chain_wait_ready(
2279 struct gpmi_nand_data *this)
2280{
2281 struct dma_chan *channel = get_dma_chan(this);
2282 u32 pio[2];
2283
2284 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY)
2285 | BM_GPMI_CTRL0_WORD_LENGTH
2286 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2287 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2288 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2289 | BF_GPMI_CTRL0_XFER_COUNT(0);
2290 pio[1] = 0;
2291
2292 return mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE,
2293 MXS_DMA_CTRL_WAIT4END | MXS_DMA_CTRL_WAIT4RDY);
2294}
2295
2296static struct dma_async_tx_descriptor *gpmi_chain_data_read(
2297 struct gpmi_nand_data *this, void *buf, int raw_len, bool *direct)
2298{
2299 struct dma_async_tx_descriptor *desc;
2300 struct dma_chan *channel = get_dma_chan(this);
2301 struct gpmi_transfer *transfer;
2302 u32 pio[6] = {};
2303
2304 transfer = get_next_transfer(this);
2305 if (!transfer)
2306 return NULL;
2307
2308 transfer->direction = DMA_FROM_DEVICE;
2309
2310 *direct = prepare_data_dma(this, buf, raw_len, &transfer->sgl,
2311 DMA_FROM_DEVICE);
2312
2313 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
2314 | BM_GPMI_CTRL0_WORD_LENGTH
2315 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2316 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2317 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2318 | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2319
2320 if (this->bch) {
2321 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
2322 | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE)
2323 | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
2324 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2325 pio[3] = raw_len;
2326 pio[4] = transfer->sgl.dma_address;
2327 pio[5] = this->auxiliary_phys;
2328 }
2329
2330 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2331 DMA_TRANS_NONE, 0);
2332 if (!desc)
2333 return NULL;
2334
2335 if (!this->bch)
2336 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2337 DMA_DEV_TO_MEM,
2338 MXS_DMA_CTRL_WAIT4END);
2339
2340 return desc;
2341}
2342
2343static struct dma_async_tx_descriptor *gpmi_chain_data_write(
2344 struct gpmi_nand_data *this, const void *buf, int raw_len)
2345{
2346 struct dma_chan *channel = get_dma_chan(this);
2347 struct dma_async_tx_descriptor *desc;
2348 struct gpmi_transfer *transfer;
2349 u32 pio[6] = {};
2350
2351 transfer = get_next_transfer(this);
2352 if (!transfer)
2353 return NULL;
2354
2355 transfer->direction = DMA_TO_DEVICE;
2356
2357 prepare_data_dma(this, buf, raw_len, &transfer->sgl, DMA_TO_DEVICE);
2358
2359 pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2360 | BM_GPMI_CTRL0_WORD_LENGTH
2361 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2362 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2363 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2364 | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2365
2366 if (this->bch) {
2367 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
2368 | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE)
2369 | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
2370 BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2371 pio[3] = raw_len;
2372 pio[4] = transfer->sgl.dma_address;
2373 pio[5] = this->auxiliary_phys;
2374 }
2375
2376 desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2377 DMA_TRANS_NONE,
2378 (this->bch ? MXS_DMA_CTRL_WAIT4END : 0));
2379 if (!desc)
2380 return NULL;
2381
2382 if (!this->bch)
2383 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2384 DMA_MEM_TO_DEV,
2385 MXS_DMA_CTRL_WAIT4END);
2386
2387 return desc;
2388}
2389
2390static int gpmi_nfc_exec_op(struct nand_chip *chip,
2391 const struct nand_operation *op,
2392 bool check_only)
2393{
2394 const struct nand_op_instr *instr;
2395 struct gpmi_nand_data *this = nand_get_controller_data(chip);
2396 struct dma_async_tx_descriptor *desc = NULL;
2397 int i, ret, buf_len = 0, nbufs = 0;
2398 u8 cmd = 0;
2399 void *buf_read = NULL;
2400 const void *buf_write = NULL;
2401 bool direct = false;
2402 struct completion *completion;
2403 unsigned long to;
2404
2405 this->ntransfers = 0;
2406 for (i = 0; i < GPMI_MAX_TRANSFERS; i++)
2407 this->transfers[i].direction = DMA_NONE;
2408
2409 ret = pm_runtime_get_sync(this->dev);
2410 if (ret < 0)
2411 return ret;
2412
2413 /*
2414 * This driver currently supports only one NAND chip. Plus, dies share
2415 * the same configuration. So once timings have been applied on the
2416 * controller side, they will not change anymore. When the time will
2417 * come, the check on must_apply_timings will have to be dropped.
2418 */
2419 if (this->hw.must_apply_timings) {
2420 this->hw.must_apply_timings = false;
2421 gpmi_nfc_apply_timings(this);
2422 }
2423
2424 dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
2425
2426 for (i = 0; i < op->ninstrs; i++) {
2427 instr = &op->instrs[i];
2428
2429 nand_op_trace(" ", instr);
2430
2431 switch (instr->type) {
2432 case NAND_OP_WAITRDY_INSTR:
2433 desc = gpmi_chain_wait_ready(this);
2434 break;
2435 case NAND_OP_CMD_INSTR:
2436 cmd = instr->ctx.cmd.opcode;
2437
2438 /*
2439 * When this command has an address cycle chain it
2440 * together with the address cycle
2441 */
2442 if (i + 1 != op->ninstrs &&
2443 op->instrs[i + 1].type == NAND_OP_ADDR_INSTR)
2444 continue;
2445
2446 desc = gpmi_chain_command(this, cmd, NULL, 0);
2447
2448 break;
2449 case NAND_OP_ADDR_INSTR:
2450 desc = gpmi_chain_command(this, cmd, instr->ctx.addr.addrs,
2451 instr->ctx.addr.naddrs);
2452 break;
2453 case NAND_OP_DATA_OUT_INSTR:
2454 buf_write = instr->ctx.data.buf.out;
2455 buf_len = instr->ctx.data.len;
2456 nbufs++;
2457
2458 desc = gpmi_chain_data_write(this, buf_write, buf_len);
2459
2460 break;
2461 case NAND_OP_DATA_IN_INSTR:
2462 if (!instr->ctx.data.len)
2463 break;
2464 buf_read = instr->ctx.data.buf.in;
2465 buf_len = instr->ctx.data.len;
2466 nbufs++;
2467
2468 desc = gpmi_chain_data_read(this, buf_read, buf_len,
2469 &direct);
2470 break;
2471 }
2472
2473 if (!desc) {
2474 ret = -ENXIO;
2475 goto unmap;
2476 }
2477 }
2478
2479 dev_dbg(this->dev, "%s setup done\n", __func__);
2480
2481 if (nbufs > 1) {
2482 dev_err(this->dev, "Multiple data instructions not supported\n");
2483 ret = -EINVAL;
2484 goto unmap;
2485 }
2486
2487 if (this->bch) {
2488 writel(this->bch_flashlayout0,
2489 this->resources.bch_regs + HW_BCH_FLASH0LAYOUT0);
2490 writel(this->bch_flashlayout1,
2491 this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1);
2492 }
2493
2494 if (this->bch && buf_read) {
2495 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2496 this->resources.bch_regs + HW_BCH_CTRL_SET);
2497 completion = &this->bch_done;
2498 } else {
2499 desc->callback = dma_irq_callback;
2500 desc->callback_param = this;
2501 completion = &this->dma_done;
2502 }
2503
2504 init_completion(completion);
2505
2506 dmaengine_submit(desc);
2507 dma_async_issue_pending(get_dma_chan(this));
2508
2509 to = wait_for_completion_timeout(completion, msecs_to_jiffies(1000));
2510 if (!to) {
2511 dev_err(this->dev, "DMA timeout, last DMA\n");
2512 gpmi_dump_info(this);
2513 ret = -ETIMEDOUT;
2514 goto unmap;
2515 }
2516
2517 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2518 this->resources.bch_regs + HW_BCH_CTRL_CLR);
2519 gpmi_clear_bch(this);
2520
2521 ret = 0;
2522
2523unmap:
2524 for (i = 0; i < this->ntransfers; i++) {
2525 struct gpmi_transfer *transfer = &this->transfers[i];
2526
2527 if (transfer->direction != DMA_NONE)
2528 dma_unmap_sg(this->dev, &transfer->sgl, 1,
2529 transfer->direction);
2530 }
2531
2532 if (!ret && buf_read && !direct)
2533 memcpy(buf_read, this->data_buffer_dma,
2534 gpmi_raw_len_to_len(this, buf_len));
2535
2536 this->bch = false;
2537
2538 pm_runtime_mark_last_busy(this->dev);
2539 pm_runtime_put_autosuspend(this->dev);
2540
2541 return ret;
2542}
2543
1893static const struct nand_controller_ops gpmi_nand_controller_ops = { 2544static const struct nand_controller_ops gpmi_nand_controller_ops = {
1894 .attach_chip = gpmi_nand_attach_chip, 2545 .attach_chip = gpmi_nand_attach_chip,
1895 .setup_data_interface = gpmi_setup_data_interface, 2546 .setup_data_interface = gpmi_setup_data_interface,
2547 .exec_op = gpmi_nfc_exec_op,
1896}; 2548};
1897 2549
1898static int gpmi_nand_init(struct gpmi_nand_data *this) 2550static int gpmi_nand_init(struct gpmi_nand_data *this)
@@ -1901,9 +2553,6 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
1901 struct mtd_info *mtd = nand_to_mtd(chip); 2553 struct mtd_info *mtd = nand_to_mtd(chip);
1902 int ret; 2554 int ret;
1903 2555
1904 /* init current chip */
1905 this->current_chip = -1;
1906
1907 /* init the MTD data structures */ 2556 /* init the MTD data structures */
1908 mtd->name = "gpmi-nand"; 2557 mtd->name = "gpmi-nand";
1909 mtd->dev.parent = this->dev; 2558 mtd->dev.parent = this->dev;
@@ -1911,14 +2560,8 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
1911 /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */ 2560 /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
1912 nand_set_controller_data(chip, this); 2561 nand_set_controller_data(chip, this);
1913 nand_set_flash_node(chip, this->pdev->dev.of_node); 2562 nand_set_flash_node(chip, this->pdev->dev.of_node);
1914 chip->legacy.select_chip = gpmi_select_chip;
1915 chip->legacy.cmd_ctrl = gpmi_cmd_ctrl;
1916 chip->legacy.dev_ready = gpmi_dev_ready;
1917 chip->legacy.read_byte = gpmi_read_byte;
1918 chip->legacy.read_buf = gpmi_read_buf;
1919 chip->legacy.write_buf = gpmi_write_buf;
1920 chip->badblock_pattern = &gpmi_bbt_descr;
1921 chip->legacy.block_markbad = gpmi_block_markbad; 2563 chip->legacy.block_markbad = gpmi_block_markbad;
2564 chip->badblock_pattern = &gpmi_bbt_descr;
1922 chip->options |= NAND_NO_SUBPAGE_WRITE; 2565 chip->options |= NAND_NO_SUBPAGE_WRITE;
1923 2566
1924 /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */ 2567 /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
@@ -1934,7 +2577,10 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
1934 if (ret) 2577 if (ret)
1935 goto err_out; 2578 goto err_out;
1936 2579
1937 chip->legacy.dummy_controller.ops = &gpmi_nand_controller_ops; 2580 nand_controller_init(&this->base);
2581 this->base.ops = &gpmi_nand_controller_ops;
2582 chip->controller = &this->base;
2583
1938 ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1); 2584 ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
1939 if (ret) 2585 if (ret)
1940 goto err_out; 2586 goto err_out;
@@ -2004,6 +2650,16 @@ static int gpmi_nand_probe(struct platform_device *pdev)
2004 if (ret) 2650 if (ret)
2005 goto exit_acquire_resources; 2651 goto exit_acquire_resources;
2006 2652
2653 ret = __gpmi_enable_clk(this, true);
2654 if (ret)
2655 goto exit_nfc_init;
2656
2657 pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
2658 pm_runtime_use_autosuspend(&pdev->dev);
2659 pm_runtime_set_active(&pdev->dev);
2660 pm_runtime_enable(&pdev->dev);
2661 pm_runtime_get_sync(&pdev->dev);
2662
2007 ret = gpmi_init(this); 2663 ret = gpmi_init(this);
2008 if (ret) 2664 if (ret)
2009 goto exit_nfc_init; 2665 goto exit_nfc_init;
@@ -2012,11 +2668,16 @@ static int gpmi_nand_probe(struct platform_device *pdev)
2012 if (ret) 2668 if (ret)
2013 goto exit_nfc_init; 2669 goto exit_nfc_init;
2014 2670
2671 pm_runtime_mark_last_busy(&pdev->dev);
2672 pm_runtime_put_autosuspend(&pdev->dev);
2673
2015 dev_info(this->dev, "driver registered.\n"); 2674 dev_info(this->dev, "driver registered.\n");
2016 2675
2017 return 0; 2676 return 0;
2018 2677
2019exit_nfc_init: 2678exit_nfc_init:
2679 pm_runtime_put(&pdev->dev);
2680 pm_runtime_disable(&pdev->dev);
2020 release_resources(this); 2681 release_resources(this);
2021exit_acquire_resources: 2682exit_acquire_resources:
2022 2683
@@ -2027,6 +2688,9 @@ static int gpmi_nand_remove(struct platform_device *pdev)
2027{ 2688{
2028 struct gpmi_nand_data *this = platform_get_drvdata(pdev); 2689 struct gpmi_nand_data *this = platform_get_drvdata(pdev);
2029 2690
2691 pm_runtime_put_sync(&pdev->dev);
2692 pm_runtime_disable(&pdev->dev);
2693
2030 nand_release(&this->nand); 2694 nand_release(&this->nand);
2031 gpmi_free_dma_buffer(this); 2695 gpmi_free_dma_buffer(this);
2032 release_resources(this); 2696 release_resources(this);
@@ -2069,8 +2733,23 @@ static int gpmi_pm_resume(struct device *dev)
2069} 2733}
2070#endif /* CONFIG_PM_SLEEP */ 2734#endif /* CONFIG_PM_SLEEP */
2071 2735
2736static int __maybe_unused gpmi_runtime_suspend(struct device *dev)
2737{
2738 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2739
2740 return __gpmi_enable_clk(this, false);
2741}
2742
2743static int __maybe_unused gpmi_runtime_resume(struct device *dev)
2744{
2745 struct gpmi_nand_data *this = dev_get_drvdata(dev);
2746
2747 return __gpmi_enable_clk(this, true);
2748}
2749
2072static const struct dev_pm_ops gpmi_pm_ops = { 2750static const struct dev_pm_ops gpmi_pm_ops = {
2073 SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume) 2751 SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
2752 SET_RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL)
2074}; 2753};
2075 2754
2076static struct platform_driver gpmi_nand_driver = { 2755static struct platform_driver gpmi_nand_driver = {
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
index a804a4a5bd46..fdc5ed7de083 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h
@@ -103,6 +103,14 @@ struct gpmi_nfc_hardware_timing {
103 u32 ctrl1n; 103 u32 ctrl1n;
104}; 104};
105 105
106#define GPMI_MAX_TRANSFERS 8
107
108struct gpmi_transfer {
109 u8 cmdbuf[8];
110 struct scatterlist sgl;
111 enum dma_data_direction direction;
112};
113
106struct gpmi_nand_data { 114struct gpmi_nand_data {
107 /* Devdata */ 115 /* Devdata */
108 const struct gpmi_devdata *devdata; 116 const struct gpmi_devdata *devdata;
@@ -126,25 +134,18 @@ struct gpmi_nand_data {
126 struct boot_rom_geometry rom_geometry; 134 struct boot_rom_geometry rom_geometry;
127 135
128 /* MTD / NAND */ 136 /* MTD / NAND */
137 struct nand_controller base;
129 struct nand_chip nand; 138 struct nand_chip nand;
130 139
131 /* General-use Variables */ 140 struct gpmi_transfer transfers[GPMI_MAX_TRANSFERS];
132 int current_chip; 141 int ntransfers;
133 unsigned int command_length;
134 142
135 struct scatterlist cmd_sgl; 143 bool bch;
136 char *cmd_buffer; 144 uint32_t bch_flashlayout0;
145 uint32_t bch_flashlayout1;
137 146
138 struct scatterlist data_sgl;
139 char *data_buffer_dma; 147 char *data_buffer_dma;
140 148
141 void *page_buffer_virt;
142 dma_addr_t page_buffer_phys;
143 unsigned int page_buffer_size;
144
145 void *payload_virt;
146 dma_addr_t payload_phys;
147
148 void *auxiliary_virt; 149 void *auxiliary_virt;
149 dma_addr_t auxiliary_phys; 150 dma_addr_t auxiliary_phys;
150 151
@@ -154,45 +155,8 @@ struct gpmi_nand_data {
154#define DMA_CHANS 8 155#define DMA_CHANS 8
155 struct dma_chan *dma_chans[DMA_CHANS]; 156 struct dma_chan *dma_chans[DMA_CHANS];
156 struct completion dma_done; 157 struct completion dma_done;
157
158 /* private */
159 void *private;
160}; 158};
161 159
162/* Common Services */
163int common_nfc_set_geometry(struct gpmi_nand_data *);
164struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
165bool prepare_data_dma(struct gpmi_nand_data *, const void *buf, int len,
166 enum dma_data_direction dr);
167int start_dma_without_bch_irq(struct gpmi_nand_data *,
168 struct dma_async_tx_descriptor *);
169int start_dma_with_bch_irq(struct gpmi_nand_data *,
170 struct dma_async_tx_descriptor *);
171
172/* GPMI-NAND helper function library */
173int gpmi_init(struct gpmi_nand_data *);
174void gpmi_clear_bch(struct gpmi_nand_data *);
175void gpmi_dump_info(struct gpmi_nand_data *);
176int bch_set_geometry(struct gpmi_nand_data *);
177int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
178int gpmi_send_command(struct gpmi_nand_data *);
179int gpmi_enable_clk(struct gpmi_nand_data *this);
180int gpmi_disable_clk(struct gpmi_nand_data *this);
181int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr,
182 const struct nand_data_interface *conf);
183void gpmi_nfc_apply_timings(struct gpmi_nand_data *this);
184int gpmi_read_data(struct gpmi_nand_data *, void *buf, int len);
185int gpmi_send_data(struct gpmi_nand_data *, const void *buf, int len);
186
187int gpmi_send_page(struct gpmi_nand_data *,
188 dma_addr_t payload, dma_addr_t auxiliary);
189int gpmi_read_page(struct gpmi_nand_data *,
190 dma_addr_t payload, dma_addr_t auxiliary);
191
192void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
193 const u8 *src, size_t src_bit_off,
194 size_t nbits);
195
196/* BCH : Status Block Completion Codes */ 160/* BCH : Status Block Completion Codes */
197#define STATUS_GOOD 0x00 161#define STATUS_GOOD 0x00
198#define STATUS_ERASED 0xff 162#define STATUS_ERASED 0xff
diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c
index 0f90e060dae8..74595b644b7c 100644
--- a/drivers/mtd/nand/raw/mtk_ecc.c
+++ b/drivers/mtd/nand/raw/mtk_ecc.c
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only 1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/* 2/*
3 * MTK ECC controller driver. 3 * MTK ECC controller driver.
4 * Copyright (C) 2016 MediaTek Inc. 4 * Copyright (C) 2016 MediaTek Inc.
@@ -596,4 +596,4 @@ module_platform_driver(mtk_ecc_driver);
596 596
597MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>"); 597MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
598MODULE_DESCRIPTION("MTK Nand ECC Driver"); 598MODULE_DESCRIPTION("MTK Nand ECC Driver");
599MODULE_LICENSE("GPL"); 599MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/mtd/nand/raw/mtk_ecc.h b/drivers/mtd/nand/raw/mtk_ecc.h
index aa52e94c771d..0e48c36e6ca0 100644
--- a/drivers/mtd/nand/raw/mtk_ecc.h
+++ b/drivers/mtd/nand/raw/mtk_ecc.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0-only */ 1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2/* 2/*
3 * MTK SDG1 ECC controller 3 * MTK SDG1 ECC controller
4 * 4 *
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index dceff28c9a31..373d47d1ba4c 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only 1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/* 2/*
3 * MTK NAND Flash controller driver. 3 * MTK NAND Flash controller driver.
4 * Copyright (C) 2016 MediaTek Inc. 4 * Copyright (C) 2016 MediaTek Inc.
@@ -79,6 +79,10 @@
79#define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2) 79#define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2)
80#define NFI_FDM_MAX_SIZE (8) 80#define NFI_FDM_MAX_SIZE (8)
81#define NFI_FDM_MIN_SIZE (1) 81#define NFI_FDM_MIN_SIZE (1)
82#define NFI_DEBUG_CON1 (0x220)
83#define STROBE_MASK GENMASK(4, 3)
84#define STROBE_SHIFT (3)
85#define MAX_STROBE_DLY (3)
82#define NFI_MASTER_STA (0x224) 86#define NFI_MASTER_STA (0x224)
83#define MASTER_STA_MASK (0x0FFF) 87#define MASTER_STA_MASK (0x0FFF)
84#define NFI_EMPTY_THRESH (0x23C) 88#define NFI_EMPTY_THRESH (0x23C)
@@ -150,6 +154,8 @@ struct mtk_nfc {
150 struct list_head chips; 154 struct list_head chips;
151 155
152 u8 *buffer; 156 u8 *buffer;
157
158 unsigned long assigned_cs;
153}; 159};
154 160
155/* 161/*
@@ -500,7 +506,8 @@ static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline,
500{ 506{
501 struct mtk_nfc *nfc = nand_get_controller_data(chip); 507 struct mtk_nfc *nfc = nand_get_controller_data(chip);
502 const struct nand_sdr_timings *timings; 508 const struct nand_sdr_timings *timings;
503 u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt; 509 u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst = 0, trlt = 0;
510 u32 temp, tsel = 0;
504 511
505 timings = nand_get_sdr_timings(conf); 512 timings = nand_get_sdr_timings(conf);
506 if (IS_ERR(timings)) 513 if (IS_ERR(timings))
@@ -536,14 +543,53 @@ static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline,
536 twh = DIV_ROUND_UP(twh * rate, 1000000) - 1; 543 twh = DIV_ROUND_UP(twh * rate, 1000000) - 1;
537 twh &= 0xf; 544 twh &= 0xf;
538 545
539 twst = timings->tWP_min / 1000; 546 /* Calculate real WE#/RE# hold time in nanosecond */
547 temp = (twh + 1) * 1000000 / rate;
548 /* nanosecond to picosecond */
549 temp *= 1000;
550
551 /*
552 * WE# low level time should be expaned to meet WE# pulse time
553 * and WE# cycle time at the same time.
554 */
555 if (temp < timings->tWC_min)
556 twst = timings->tWC_min - temp;
557 twst = max(timings->tWP_min, twst) / 1000;
540 twst = DIV_ROUND_UP(twst * rate, 1000000) - 1; 558 twst = DIV_ROUND_UP(twst * rate, 1000000) - 1;
541 twst &= 0xf; 559 twst &= 0xf;
542 560
543 trlt = max(timings->tREA_max, timings->tRP_min) / 1000; 561 /*
562 * RE# low level time should be expaned to meet RE# pulse time
563 * and RE# cycle time at the same time.
564 */
565 if (temp < timings->tRC_min)
566 trlt = timings->tRC_min - temp;
567 trlt = max(trlt, timings->tRP_min) / 1000;
544 trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1; 568 trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1;
545 trlt &= 0xf; 569 trlt &= 0xf;
546 570
571 /* Calculate RE# pulse time in nanosecond. */
572 temp = (trlt + 1) * 1000000 / rate;
573 /* nanosecond to picosecond */
574 temp *= 1000;
575 /*
576 * If RE# access time is bigger than RE# pulse time,
577 * delay sampling data timing.
578 */
579 if (temp < timings->tREA_max) {
580 tsel = timings->tREA_max / 1000;
581 tsel = DIV_ROUND_UP(tsel * rate, 1000000);
582 tsel -= (trlt + 1);
583 if (tsel > MAX_STROBE_DLY) {
584 trlt += tsel - MAX_STROBE_DLY;
585 tsel = MAX_STROBE_DLY;
586 }
587 }
588 temp = nfi_readl(nfc, NFI_DEBUG_CON1);
589 temp &= ~STROBE_MASK;
590 temp |= tsel << STROBE_SHIFT;
591 nfi_writel(nfc, temp, NFI_DEBUG_CON1);
592
547 /* 593 /*
548 * ACCON: access timing control register 594 * ACCON: access timing control register
549 * ------------------------------------- 595 * -------------------------------------
@@ -835,19 +881,21 @@ static int mtk_nfc_write_oob_std(struct nand_chip *chip, int page)
835 return mtk_nfc_write_page_raw(chip, NULL, 1, page); 881 return mtk_nfc_write_page_raw(chip, NULL, 1, page);
836} 882}
837 883
838static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors) 884static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 start,
885 u32 sectors)
839{ 886{
840 struct nand_chip *chip = mtd_to_nand(mtd); 887 struct nand_chip *chip = mtd_to_nand(mtd);
841 struct mtk_nfc *nfc = nand_get_controller_data(chip); 888 struct mtk_nfc *nfc = nand_get_controller_data(chip);
842 struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); 889 struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
843 struct mtk_ecc_stats stats; 890 struct mtk_ecc_stats stats;
891 u32 reg_size = mtk_nand->fdm.reg_size;
844 int rc, i; 892 int rc, i;
845 893
846 rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE; 894 rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
847 if (rc) { 895 if (rc) {
848 memset(buf, 0xff, sectors * chip->ecc.size); 896 memset(buf, 0xff, sectors * chip->ecc.size);
849 for (i = 0; i < sectors; i++) 897 for (i = 0; i < sectors; i++)
850 memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size); 898 memset(oob_ptr(chip, start + i), 0xff, reg_size);
851 return 0; 899 return 0;
852 } 900 }
853 901
@@ -867,7 +915,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
867 u32 spare = mtk_nand->spare_per_sector; 915 u32 spare = mtk_nand->spare_per_sector;
868 u32 column, sectors, start, end, reg; 916 u32 column, sectors, start, end, reg;
869 dma_addr_t addr; 917 dma_addr_t addr;
870 int bitflips; 918 int bitflips = 0;
871 size_t len; 919 size_t len;
872 u8 *buf; 920 u8 *buf;
873 int rc; 921 int rc;
@@ -934,14 +982,11 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
934 if (rc < 0) { 982 if (rc < 0) {
935 dev_err(nfc->dev, "subpage done timeout\n"); 983 dev_err(nfc->dev, "subpage done timeout\n");
936 bitflips = -EIO; 984 bitflips = -EIO;
937 } else { 985 } else if (!raw) {
938 bitflips = 0; 986 rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
939 if (!raw) { 987 bitflips = rc < 0 ? -ETIMEDOUT :
940 rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE); 988 mtk_nfc_update_ecc_stats(mtd, buf, start, sectors);
941 bitflips = rc < 0 ? -ETIMEDOUT : 989 mtk_nfc_read_fdm(chip, start, sectors);
942 mtk_nfc_update_ecc_stats(mtd, buf, sectors);
943 mtk_nfc_read_fdm(chip, start, sectors);
944 }
945 } 990 }
946 991
947 dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE); 992 dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
@@ -1315,6 +1360,17 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
1315 dev_err(dev, "reg property failure : %d\n", ret); 1360 dev_err(dev, "reg property failure : %d\n", ret);
1316 return ret; 1361 return ret;
1317 } 1362 }
1363
1364 if (tmp >= MTK_NAND_MAX_NSELS) {
1365 dev_err(dev, "invalid CS: %u\n", tmp);
1366 return -EINVAL;
1367 }
1368
1369 if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
1370 dev_err(dev, "CS %u already assigned\n", tmp);
1371 return -EINVAL;
1372 }
1373
1318 chip->sels[i] = tmp; 1374 chip->sels[i] = tmp;
1319 } 1375 }
1320 1376
@@ -1589,6 +1645,6 @@ static struct platform_driver mtk_nfc_driver = {
1589 1645
1590module_platform_driver(mtk_nfc_driver); 1646module_platform_driver(mtk_nfc_driver);
1591 1647
1592MODULE_LICENSE("GPL"); 1648MODULE_LICENSE("Dual MIT/GPL");
1593MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>"); 1649MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
1594MODULE_DESCRIPTION("MTK Nand Flash Controller Driver"); 1650MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 6eb131292eb2..91f046d4d452 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -2111,35 +2111,7 @@ static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2111 if (instr == &ctx->subop.instrs[0]) 2111 if (instr == &ctx->subop.instrs[0])
2112 prefix = " ->"; 2112 prefix = " ->";
2113 2113
2114 switch (instr->type) { 2114 nand_op_trace(prefix, instr);
2115 case NAND_OP_CMD_INSTR:
2116 pr_debug("%sCMD [0x%02x]\n", prefix,
2117 instr->ctx.cmd.opcode);
2118 break;
2119 case NAND_OP_ADDR_INSTR:
2120 pr_debug("%sADDR [%d cyc: %*ph]\n", prefix,
2121 instr->ctx.addr.naddrs,
2122 instr->ctx.addr.naddrs < 64 ?
2123 instr->ctx.addr.naddrs : 64,
2124 instr->ctx.addr.addrs);
2125 break;
2126 case NAND_OP_DATA_IN_INSTR:
2127 pr_debug("%sDATA_IN [%d B%s]\n", prefix,
2128 instr->ctx.data.len,
2129 instr->ctx.data.force_8bit ?
2130 ", force 8-bit" : "");
2131 break;
2132 case NAND_OP_DATA_OUT_INSTR:
2133 pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
2134 instr->ctx.data.len,
2135 instr->ctx.data.force_8bit ?
2136 ", force 8-bit" : "");
2137 break;
2138 case NAND_OP_WAITRDY_INSTR:
2139 pr_debug("%sWAITRDY [max %d ms]\n", prefix,
2140 instr->ctx.waitrdy.timeout_ms);
2141 break;
2142 }
2143 2115
2144 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1]) 2116 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2145 prefix = " "; 2117 prefix = " ";
@@ -2152,6 +2124,22 @@ static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2152} 2124}
2153#endif 2125#endif
2154 2126
2127static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
2128 const struct nand_op_parser_ctx *b)
2129{
2130 if (a->subop.ninstrs < b->subop.ninstrs)
2131 return -1;
2132 else if (a->subop.ninstrs > b->subop.ninstrs)
2133 return 1;
2134
2135 if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
2136 return -1;
2137 else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
2138 return 1;
2139
2140 return 0;
2141}
2142
2155/** 2143/**
2156 * nand_op_parser_exec_op - exec_op parser 2144 * nand_op_parser_exec_op - exec_op parser
2157 * @chip: the NAND chip 2145 * @chip: the NAND chip
@@ -2186,32 +2174,40 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
2186 unsigned int i; 2174 unsigned int i;
2187 2175
2188 while (ctx.subop.instrs < op->instrs + op->ninstrs) { 2176 while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2189 int ret; 2177 const struct nand_op_parser_pattern *pattern;
2178 struct nand_op_parser_ctx best_ctx;
2179 int ret, best_pattern = -1;
2190 2180
2191 for (i = 0; i < parser->npatterns; i++) { 2181 for (i = 0; i < parser->npatterns; i++) {
2192 const struct nand_op_parser_pattern *pattern; 2182 struct nand_op_parser_ctx test_ctx = ctx;
2193 2183
2194 pattern = &parser->patterns[i]; 2184 pattern = &parser->patterns[i];
2195 if (!nand_op_parser_match_pat(pattern, &ctx)) 2185 if (!nand_op_parser_match_pat(pattern, &test_ctx))
2196 continue; 2186 continue;
2197 2187
2198 nand_op_parser_trace(&ctx); 2188 if (best_pattern >= 0 &&
2199 2189 nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
2200 if (check_only) 2190 continue;
2201 break;
2202
2203 ret = pattern->exec(chip, &ctx.subop);
2204 if (ret)
2205 return ret;
2206 2191
2207 break; 2192 best_pattern = i;
2193 best_ctx = test_ctx;
2208 } 2194 }
2209 2195
2210 if (i == parser->npatterns) { 2196 if (best_pattern < 0) {
2211 pr_debug("->exec_op() parser: pattern not found!\n"); 2197 pr_debug("->exec_op() parser: pattern not found!\n");
2212 return -ENOTSUPP; 2198 return -ENOTSUPP;
2213 } 2199 }
2214 2200
2201 ctx = best_ctx;
2202 nand_op_parser_trace(&ctx);
2203
2204 if (!check_only) {
2205 pattern = &parser->patterns[best_pattern];
2206 ret = pattern->exec(chip, &ctx.subop);
2207 if (ret)
2208 return ret;
2209 }
2210
2215 /* 2211 /*
2216 * Update the context structure by pointing to the start of the 2212 * Update the context structure by pointing to the start of the
2217 * next subop. 2213 * next subop.
diff --git a/drivers/mtd/nand/raw/nand_bch.c b/drivers/mtd/nand/raw/nand_bch.c
index 55aa4c1cd414..17527310c3a1 100644
--- a/drivers/mtd/nand/raw/nand_bch.c
+++ b/drivers/mtd/nand/raw/nand_bch.c
@@ -170,7 +170,7 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
170 goto fail; 170 goto fail;
171 } 171 }
172 172
173 nbc->eccmask = kmalloc(eccbytes, GFP_KERNEL); 173 nbc->eccmask = kzalloc(eccbytes, GFP_KERNEL);
174 nbc->errloc = kmalloc_array(t, sizeof(*nbc->errloc), GFP_KERNEL); 174 nbc->errloc = kmalloc_array(t, sizeof(*nbc->errloc), GFP_KERNEL);
175 if (!nbc->eccmask || !nbc->errloc) 175 if (!nbc->eccmask || !nbc->errloc)
176 goto fail; 176 goto fail;
@@ -182,7 +182,6 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
182 goto fail; 182 goto fail;
183 183
184 memset(erased_page, 0xff, eccsize); 184 memset(erased_page, 0xff, eccsize);
185 memset(nbc->eccmask, 0, eccbytes);
186 encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask); 185 encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask);
187 kfree(erased_page); 186 kfree(erased_page);
188 187
diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
index fad57c378dd2..58511aeb0c9a 100644
--- a/drivers/mtd/nand/raw/nand_macronix.c
+++ b/drivers/mtd/nand/raw/nand_macronix.c
@@ -8,6 +8,50 @@
8 8
9#include "internals.h" 9#include "internals.h"
10 10
11#define MACRONIX_READ_RETRY_BIT BIT(0)
12#define MACRONIX_NUM_READ_RETRY_MODES 6
13
14struct nand_onfi_vendor_macronix {
15 u8 reserved;
16 u8 reliability_func;
17} __packed;
18
19static int macronix_nand_setup_read_retry(struct nand_chip *chip, int mode)
20{
21 u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
22
23 if (!chip->parameters.supports_set_get_features ||
24 !test_bit(ONFI_FEATURE_ADDR_READ_RETRY,
25 chip->parameters.set_feature_list))
26 return -ENOTSUPP;
27
28 feature[0] = mode;
29 return nand_set_features(chip, ONFI_FEATURE_ADDR_READ_RETRY, feature);
30}
31
32static void macronix_nand_onfi_init(struct nand_chip *chip)
33{
34 struct nand_parameters *p = &chip->parameters;
35 struct nand_onfi_vendor_macronix *mxic;
36
37 if (!p->onfi)
38 return;
39
40 mxic = (struct nand_onfi_vendor_macronix *)p->onfi->vendor;
41 if ((mxic->reliability_func & MACRONIX_READ_RETRY_BIT) == 0)
42 return;
43
44 chip->read_retries = MACRONIX_NUM_READ_RETRY_MODES;
45 chip->setup_read_retry = macronix_nand_setup_read_retry;
46
47 if (p->supports_set_get_features) {
48 bitmap_set(p->set_feature_list,
49 ONFI_FEATURE_ADDR_READ_RETRY, 1);
50 bitmap_set(p->get_feature_list,
51 ONFI_FEATURE_ADDR_READ_RETRY, 1);
52 }
53}
54
11/* 55/*
12 * Macronix AC series does not support using SET/GET_FEATURES to change 56 * Macronix AC series does not support using SET/GET_FEATURES to change
13 * the timings unlike what is declared in the parameter page. Unflag 57 * the timings unlike what is declared in the parameter page. Unflag
@@ -56,6 +100,7 @@ static int macronix_nand_init(struct nand_chip *chip)
56 chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE; 100 chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
57 101
58 macronix_nand_fix_broken_get_timings(chip); 102 macronix_nand_fix_broken_get_timings(chip);
103 macronix_nand_onfi_init(chip);
59 104
60 return 0; 105 return 0;
61} 106}
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 999ca6a66036..e63acc077c18 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -37,6 +37,8 @@
37/* Max ECC buffer length */ 37/* Max ECC buffer length */
38#define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG) 38#define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
39 39
40#define FMC2_TIMEOUT_MS 1000
41
40/* Timings */ 42/* Timings */
41#define FMC2_THIZ 1 43#define FMC2_THIZ 1
42#define FMC2_TIO 8000 44#define FMC2_TIO 8000
@@ -530,7 +532,8 @@ static int stm32_fmc2_ham_calculate(struct nand_chip *chip, const u8 *data,
530 int ret; 532 int ret;
531 533
532 ret = readl_relaxed_poll_timeout(fmc2->io_base + FMC2_SR, 534 ret = readl_relaxed_poll_timeout(fmc2->io_base + FMC2_SR,
533 sr, sr & FMC2_SR_NWRF, 10, 1000); 535 sr, sr & FMC2_SR_NWRF, 10,
536 FMC2_TIMEOUT_MS);
534 if (ret) { 537 if (ret) {
535 dev_err(fmc2->dev, "ham timeout\n"); 538 dev_err(fmc2->dev, "ham timeout\n");
536 return ret; 539 return ret;
@@ -611,7 +614,7 @@ static int stm32_fmc2_bch_calculate(struct nand_chip *chip, const u8 *data,
611 614
612 /* Wait until the BCH code is ready */ 615 /* Wait until the BCH code is ready */
613 if (!wait_for_completion_timeout(&fmc2->complete, 616 if (!wait_for_completion_timeout(&fmc2->complete,
614 msecs_to_jiffies(1000))) { 617 msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
615 dev_err(fmc2->dev, "bch timeout\n"); 618 dev_err(fmc2->dev, "bch timeout\n");
616 stm32_fmc2_disable_bch_irq(fmc2); 619 stm32_fmc2_disable_bch_irq(fmc2);
617 return -ETIMEDOUT; 620 return -ETIMEDOUT;
@@ -696,7 +699,7 @@ static int stm32_fmc2_bch_correct(struct nand_chip *chip, u8 *dat,
696 699
697 /* Wait until the decoding error is ready */ 700 /* Wait until the decoding error is ready */
698 if (!wait_for_completion_timeout(&fmc2->complete, 701 if (!wait_for_completion_timeout(&fmc2->complete,
699 msecs_to_jiffies(1000))) { 702 msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
700 dev_err(fmc2->dev, "bch timeout\n"); 703 dev_err(fmc2->dev, "bch timeout\n");
701 stm32_fmc2_disable_bch_irq(fmc2); 704 stm32_fmc2_disable_bch_irq(fmc2);
702 return -ETIMEDOUT; 705 return -ETIMEDOUT;
@@ -969,7 +972,7 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
969 972
970 /* Wait end of sequencer transfer */ 973 /* Wait end of sequencer transfer */
971 if (!wait_for_completion_timeout(&fmc2->complete, 974 if (!wait_for_completion_timeout(&fmc2->complete,
972 msecs_to_jiffies(1000))) { 975 msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
973 dev_err(fmc2->dev, "seq timeout\n"); 976 dev_err(fmc2->dev, "seq timeout\n");
974 stm32_fmc2_disable_seq_irq(fmc2); 977 stm32_fmc2_disable_seq_irq(fmc2);
975 dmaengine_terminate_all(dma_ch); 978 dmaengine_terminate_all(dma_ch);
@@ -981,7 +984,7 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
981 984
982 /* Wait DMA data transfer completion */ 985 /* Wait DMA data transfer completion */
983 if (!wait_for_completion_timeout(&fmc2->dma_data_complete, 986 if (!wait_for_completion_timeout(&fmc2->dma_data_complete,
984 msecs_to_jiffies(100))) { 987 msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
985 dev_err(fmc2->dev, "data DMA timeout\n"); 988 dev_err(fmc2->dev, "data DMA timeout\n");
986 dmaengine_terminate_all(dma_ch); 989 dmaengine_terminate_all(dma_ch);
987 ret = -ETIMEDOUT; 990 ret = -ETIMEDOUT;
@@ -990,7 +993,7 @@ static int stm32_fmc2_xfer(struct nand_chip *chip, const u8 *buf,
990 /* Wait DMA ECC transfer completion */ 993 /* Wait DMA ECC transfer completion */
991 if (!write_data && !raw) { 994 if (!write_data && !raw) {
992 if (!wait_for_completion_timeout(&fmc2->dma_ecc_complete, 995 if (!wait_for_completion_timeout(&fmc2->dma_ecc_complete,
993 msecs_to_jiffies(100))) { 996 msecs_to_jiffies(FMC2_TIMEOUT_MS))) {
994 dev_err(fmc2->dev, "ECC DMA timeout\n"); 997 dev_err(fmc2->dev, "ECC DMA timeout\n");
995 dmaengine_terminate_all(fmc2->dma_ecc_ch); 998 dmaengine_terminate_all(fmc2->dma_ecc_ch);
996 ret = -ETIMEDOUT; 999 ret = -ETIMEDOUT;
@@ -1909,6 +1912,12 @@ static int stm32_fmc2_probe(struct platform_device *pdev)
1909 } 1912 }
1910 1913
1911 irq = platform_get_irq(pdev, 0); 1914 irq = platform_get_irq(pdev, 0);
1915 if (irq < 0) {
1916 if (irq != -EPROBE_DEFER)
1917 dev_err(dev, "IRQ error missing or invalid\n");
1918 return irq;
1919 }
1920
1912 ret = devm_request_irq(dev, irq, stm32_fmc2_irq, 0, 1921 ret = devm_request_irq(dev, irq, stm32_fmc2_irq, 0,
1913 dev_name(dev), fmc2); 1922 dev_name(dev), fmc2);
1914 if (ret) { 1923 if (ret) {
diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile
index 753125082640..9662b9c1d5a9 100644
--- a/drivers/mtd/nand/spi/Makefile
+++ b/drivers/mtd/nand/spi/Makefile
@@ -1,3 +1,3 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2spinand-objs := core.o gigadevice.o macronix.o micron.o toshiba.o winbond.o 2spinand-objs := core.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o
3obj-$(CONFIG_MTD_SPI_NAND) += spinand.o 3obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 4c15bb58c623..89f6beefb01c 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -511,12 +511,12 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
511 if (ret == -EBADMSG) { 511 if (ret == -EBADMSG) {
512 ecc_failed = true; 512 ecc_failed = true;
513 mtd->ecc_stats.failed++; 513 mtd->ecc_stats.failed++;
514 ret = 0;
515 } else { 514 } else {
516 mtd->ecc_stats.corrected += ret; 515 mtd->ecc_stats.corrected += ret;
517 max_bitflips = max_t(unsigned int, max_bitflips, ret); 516 max_bitflips = max_t(unsigned int, max_bitflips, ret);
518 } 517 }
519 518
519 ret = 0;
520 ops->retlen += iter.req.datalen; 520 ops->retlen += iter.req.datalen;
521 ops->oobretlen += iter.req.ooblen; 521 ops->oobretlen += iter.req.ooblen;
522 } 522 }
@@ -757,6 +757,7 @@ static const struct spinand_manufacturer *spinand_manufacturers[] = {
757 &gigadevice_spinand_manufacturer, 757 &gigadevice_spinand_manufacturer,
758 &macronix_spinand_manufacturer, 758 &macronix_spinand_manufacturer,
759 &micron_spinand_manufacturer, 759 &micron_spinand_manufacturer,
760 &paragon_spinand_manufacturer,
760 &toshiba_spinand_manufacturer, 761 &toshiba_spinand_manufacturer,
761 &winbond_spinand_manufacturer, 762 &winbond_spinand_manufacturer,
762}; 763};
@@ -845,7 +846,7 @@ spinand_select_op_variant(struct spinand_device *spinand,
845 */ 846 */
846int spinand_match_and_init(struct spinand_device *spinand, 847int spinand_match_and_init(struct spinand_device *spinand,
847 const struct spinand_info *table, 848 const struct spinand_info *table,
848 unsigned int table_size, u8 devid) 849 unsigned int table_size, u16 devid)
849{ 850{
850 struct nand_device *nand = spinand_to_nand(spinand); 851 struct nand_device *nand = spinand_to_nand(spinand);
851 unsigned int i; 852 unsigned int i;
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index e6c646007cda..e99d425aa93f 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -9,11 +9,17 @@
9#include <linux/mtd/spinand.h> 9#include <linux/mtd/spinand.h>
10 10
11#define SPINAND_MFR_GIGADEVICE 0xC8 11#define SPINAND_MFR_GIGADEVICE 0xC8
12
12#define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4) 13#define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4)
13#define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4) 14#define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4)
14 15
15#define GD5FXGQ4UEXXG_REG_STATUS2 0xf0 16#define GD5FXGQ4UEXXG_REG_STATUS2 0xf0
16 17
18#define GD5FXGQ4UXFXXG_STATUS_ECC_MASK (7 << 4)
19#define GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS (0 << 4)
20#define GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS (1 << 4)
21#define GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR (7 << 4)
22
17static SPINAND_OP_VARIANTS(read_cache_variants, 23static SPINAND_OP_VARIANTS(read_cache_variants,
18 SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), 24 SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
19 SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 25 SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
@@ -22,6 +28,14 @@ static SPINAND_OP_VARIANTS(read_cache_variants,
22 SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 28 SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
23 SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 29 SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
24 30
31static SPINAND_OP_VARIANTS(read_cache_variants_f,
32 SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
33 SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(0, 1, NULL, 0),
34 SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
35 SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(0, 1, NULL, 0),
36 SPINAND_PAGE_READ_FROM_CACHE_OP_3A(true, 0, 1, NULL, 0),
37 SPINAND_PAGE_READ_FROM_CACHE_OP_3A(false, 0, 0, NULL, 0));
38
25static SPINAND_OP_VARIANTS(write_cache_variants, 39static SPINAND_OP_VARIANTS(write_cache_variants,
26 SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 40 SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
27 SPINAND_PROG_LOAD(true, 0, NULL, 0)); 41 SPINAND_PROG_LOAD(true, 0, NULL, 0));
@@ -59,6 +73,11 @@ static int gd5fxgq4xa_ooblayout_free(struct mtd_info *mtd, int section,
59 return 0; 73 return 0;
60} 74}
61 75
76static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = {
77 .ecc = gd5fxgq4xa_ooblayout_ecc,
78 .free = gd5fxgq4xa_ooblayout_free,
79};
80
62static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand, 81static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand,
63 u8 status) 82 u8 status)
64{ 83{
@@ -83,7 +102,7 @@ static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand,
83 return -EINVAL; 102 return -EINVAL;
84} 103}
85 104
86static int gd5fxgq4uexxg_ooblayout_ecc(struct mtd_info *mtd, int section, 105static int gd5fxgq4_variant2_ooblayout_ecc(struct mtd_info *mtd, int section,
87 struct mtd_oob_region *region) 106 struct mtd_oob_region *region)
88{ 107{
89 if (section) 108 if (section)
@@ -95,7 +114,7 @@ static int gd5fxgq4uexxg_ooblayout_ecc(struct mtd_info *mtd, int section,
95 return 0; 114 return 0;
96} 115}
97 116
98static int gd5fxgq4uexxg_ooblayout_free(struct mtd_info *mtd, int section, 117static int gd5fxgq4_variant2_ooblayout_free(struct mtd_info *mtd, int section,
99 struct mtd_oob_region *region) 118 struct mtd_oob_region *region)
100{ 119{
101 if (section) 120 if (section)
@@ -108,6 +127,11 @@ static int gd5fxgq4uexxg_ooblayout_free(struct mtd_info *mtd, int section,
108 return 0; 127 return 0;
109} 128}
110 129
130static const struct mtd_ooblayout_ops gd5fxgq4_variant2_ooblayout = {
131 .ecc = gd5fxgq4_variant2_ooblayout_ecc,
132 .free = gd5fxgq4_variant2_ooblayout_free,
133};
134
111static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand, 135static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
112 u8 status) 136 u8 status)
113{ 137{
@@ -150,15 +174,25 @@ static int gd5fxgq4uexxg_ecc_get_status(struct spinand_device *spinand,
150 return -EINVAL; 174 return -EINVAL;
151} 175}
152 176
153static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = { 177static int gd5fxgq4ufxxg_ecc_get_status(struct spinand_device *spinand,
154 .ecc = gd5fxgq4xa_ooblayout_ecc, 178 u8 status)
155 .free = gd5fxgq4xa_ooblayout_free, 179{
156}; 180 switch (status & GD5FXGQ4UXFXXG_STATUS_ECC_MASK) {
181 case GD5FXGQ4UXFXXG_STATUS_ECC_NO_BITFLIPS:
182 return 0;
157 183
158static const struct mtd_ooblayout_ops gd5fxgq4uexxg_ooblayout = { 184 case GD5FXGQ4UXFXXG_STATUS_ECC_1_3_BITFLIPS:
159 .ecc = gd5fxgq4uexxg_ooblayout_ecc, 185 return 3;
160 .free = gd5fxgq4uexxg_ooblayout_free, 186
161}; 187 case GD5FXGQ4UXFXXG_STATUS_ECC_UNCOR_ERROR:
188 return -EBADMSG;
189
190 default: /* (2 << 4) through (6 << 4) are 4-8 corrected errors */
191 return ((status & GD5FXGQ4UXFXXG_STATUS_ECC_MASK) >> 4) + 2;
192 }
193
194 return -EINVAL;
195}
162 196
163static const struct spinand_info gigadevice_spinand_table[] = { 197static const struct spinand_info gigadevice_spinand_table[] = {
164 SPINAND_INFO("GD5F1GQ4xA", 0xF1, 198 SPINAND_INFO("GD5F1GQ4xA", 0xF1,
@@ -195,25 +229,40 @@ static const struct spinand_info gigadevice_spinand_table[] = {
195 &write_cache_variants, 229 &write_cache_variants,
196 &update_cache_variants), 230 &update_cache_variants),
197 0, 231 0,
198 SPINAND_ECCINFO(&gd5fxgq4uexxg_ooblayout, 232 SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
199 gd5fxgq4uexxg_ecc_get_status)), 233 gd5fxgq4uexxg_ecc_get_status)),
234 SPINAND_INFO("GD5F1GQ4UFxxG", 0xb148,
235 NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
236 NAND_ECCREQ(8, 512),
237 SPINAND_INFO_OP_VARIANTS(&read_cache_variants_f,
238 &write_cache_variants,
239 &update_cache_variants),
240 0,
241 SPINAND_ECCINFO(&gd5fxgq4_variant2_ooblayout,
242 gd5fxgq4ufxxg_ecc_get_status)),
200}; 243};
201 244
202static int gigadevice_spinand_detect(struct spinand_device *spinand) 245static int gigadevice_spinand_detect(struct spinand_device *spinand)
203{ 246{
204 u8 *id = spinand->id.data; 247 u8 *id = spinand->id.data;
248 u16 did;
205 int ret; 249 int ret;
206 250
207 /* 251 /*
208 * For GD NANDs, There is an address byte needed to shift in before IDs 252 * Earlier GDF5-series devices (A,E) return [0][MID][DID]
209 * are read out, so the first byte in raw_id is dummy. 253 * Later (F) devices return [MID][DID1][DID2]
210 */ 254 */
211 if (id[1] != SPINAND_MFR_GIGADEVICE) 255
256 if (id[0] == SPINAND_MFR_GIGADEVICE)
257 did = (id[1] << 8) + id[2];
258 else if (id[0] == 0 && id[1] == SPINAND_MFR_GIGADEVICE)
259 did = id[2];
260 else
212 return 0; 261 return 0;
213 262
214 ret = spinand_match_and_init(spinand, gigadevice_spinand_table, 263 ret = spinand_match_and_init(spinand, gigadevice_spinand_table,
215 ARRAY_SIZE(gigadevice_spinand_table), 264 ARRAY_SIZE(gigadevice_spinand_table),
216 id[2]); 265 did);
217 if (ret) 266 if (ret)
218 return ret; 267 return ret;
219 268
diff --git a/drivers/mtd/nand/spi/paragon.c b/drivers/mtd/nand/spi/paragon.c
new file mode 100644
index 000000000000..52307681cbd0
--- /dev/null
+++ b/drivers/mtd/nand/spi/paragon.c
@@ -0,0 +1,147 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2019 Jeff Kletsky
4 *
5 * Author: Jeff Kletsky <git-commits@allycomm.com>
6 */
7
8#include <linux/device.h>
9#include <linux/kernel.h>
10#include <linux/mtd/spinand.h>
11
12
13#define SPINAND_MFR_PARAGON 0xa1
14
15
16#define PN26G0XA_STATUS_ECC_BITMASK (3 << 4)
17
18#define PN26G0XA_STATUS_ECC_NONE_DETECTED (0 << 4)
19#define PN26G0XA_STATUS_ECC_1_7_CORRECTED (1 << 4)
20#define PN26G0XA_STATUS_ECC_ERRORED (2 << 4)
21#define PN26G0XA_STATUS_ECC_8_CORRECTED (3 << 4)
22
23
24static SPINAND_OP_VARIANTS(read_cache_variants,
25 SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
26 SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
27 SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
28 SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
29 SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
30 SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
31
32static SPINAND_OP_VARIANTS(write_cache_variants,
33 SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
34 SPINAND_PROG_LOAD(true, 0, NULL, 0));
35
36static SPINAND_OP_VARIANTS(update_cache_variants,
37 SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
38 SPINAND_PROG_LOAD(false, 0, NULL, 0));
39
40
41static int pn26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section,
42 struct mtd_oob_region *region)
43{
44 if (section > 3)
45 return -ERANGE;
46
47 region->offset = 6 + (15 * section); /* 4 BBM + 2 user bytes */
48 region->length = 13;
49
50 return 0;
51}
52
53static int pn26g0xa_ooblayout_free(struct mtd_info *mtd, int section,
54 struct mtd_oob_region *region)
55{
56 if (section > 4)
57 return -ERANGE;
58
59 if (section == 4) {
60 region->offset = 64;
61 region->length = 64;
62 } else {
63 region->offset = 4 + (15 * section);
64 region->length = 2;
65 }
66
67 return 0;
68}
69
70static int pn26g0xa_ecc_get_status(struct spinand_device *spinand,
71 u8 status)
72{
73 switch (status & PN26G0XA_STATUS_ECC_BITMASK) {
74 case PN26G0XA_STATUS_ECC_NONE_DETECTED:
75 return 0;
76
77 case PN26G0XA_STATUS_ECC_1_7_CORRECTED:
78 return 7; /* Return upper limit by convention */
79
80 case PN26G0XA_STATUS_ECC_8_CORRECTED:
81 return 8;
82
83 case PN26G0XA_STATUS_ECC_ERRORED:
84 return -EBADMSG;
85
86 default:
87 break;
88 }
89
90 return -EINVAL;
91}
92
93static const struct mtd_ooblayout_ops pn26g0xa_ooblayout = {
94 .ecc = pn26g0xa_ooblayout_ecc,
95 .free = pn26g0xa_ooblayout_free,
96};
97
98
99static const struct spinand_info paragon_spinand_table[] = {
100 SPINAND_INFO("PN26G01A", 0xe1,
101 NAND_MEMORG(1, 2048, 128, 64, 1024, 21, 1, 1, 1),
102 NAND_ECCREQ(8, 512),
103 SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
104 &write_cache_variants,
105 &update_cache_variants),
106 0,
107 SPINAND_ECCINFO(&pn26g0xa_ooblayout,
108 pn26g0xa_ecc_get_status)),
109 SPINAND_INFO("PN26G02A", 0xe2,
110 NAND_MEMORG(1, 2048, 128, 64, 2048, 41, 1, 1, 1),
111 NAND_ECCREQ(8, 512),
112 SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
113 &write_cache_variants,
114 &update_cache_variants),
115 0,
116 SPINAND_ECCINFO(&pn26g0xa_ooblayout,
117 pn26g0xa_ecc_get_status)),
118};
119
120static int paragon_spinand_detect(struct spinand_device *spinand)
121{
122 u8 *id = spinand->id.data;
123 int ret;
124
125 /* Read ID returns [0][MID][DID] */
126
127 if (id[1] != SPINAND_MFR_PARAGON)
128 return 0;
129
130 ret = spinand_match_and_init(spinand, paragon_spinand_table,
131 ARRAY_SIZE(paragon_spinand_table),
132 id[2]);
133 if (ret)
134 return ret;
135
136 return 1;
137}
138
139static const struct spinand_manufacturer_ops paragon_spinand_manuf_ops = {
140 .detect = paragon_spinand_detect,
141};
142
143const struct spinand_manufacturer paragon_spinand_manufacturer = {
144 .id = SPINAND_MFR_PARAGON,
145 .name = "Paragon",
146 .ops = &paragon_spinand_manuf_ops,
147};
diff --git a/drivers/mtd/parsers/afs.c b/drivers/mtd/parsers/afs.c
index f24d768eee30..752b6cf005f7 100644
--- a/drivers/mtd/parsers/afs.c
+++ b/drivers/mtd/parsers/afs.c
@@ -371,8 +371,7 @@ static int parse_afs_partitions(struct mtd_info *mtd,
371 371
372out_free_parts: 372out_free_parts:
373 while (i >= 0) { 373 while (i >= 0) {
374 if (parts[i].name) 374 kfree(parts[i].name);
375 kfree(parts[i].name);
376 i--; 375 i--;
377 } 376 }
378 kfree(parts); 377 kfree(parts);
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 8e14248d2720..6de83277ce8b 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -105,11 +105,4 @@ config SPI_INTEL_SPI_PLATFORM
105 To compile this driver as a module, choose M here: the module 105 To compile this driver as a module, choose M here: the module
106 will be called intel-spi-platform. 106 will be called intel-spi-platform.
107 107
108config SPI_STM32_QUADSPI
109 tristate "STM32 Quad SPI controller"
110 depends on ARCH_STM32 || COMPILE_TEST
111 help
112 This enables support for the STM32 Quad SPI controller.
113 We only connect the NOR to this controller.
114
115endif # MTD_SPI_NOR 108endif # MTD_SPI_NOR
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
index 189a15cca3ec..9c5ed03cdc19 100644
--- a/drivers/mtd/spi-nor/Makefile
+++ b/drivers/mtd/spi-nor/Makefile
@@ -8,4 +8,3 @@ obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o
8obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o 8obj-$(CONFIG_SPI_INTEL_SPI) += intel-spi.o
9obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o 9obj-$(CONFIG_SPI_INTEL_SPI_PCI) += intel-spi-pci.o
10obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o 10obj-$(CONFIG_SPI_INTEL_SPI_PLATFORM) += intel-spi-platform.o
11obj-$(CONFIG_SPI_STM32_QUADSPI) += stm32-quadspi.o
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index 67ade2c81b21..67f15a1f16fd 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -23,6 +23,7 @@
23#include <linux/of.h> 23#include <linux/of.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/pm_runtime.h> 25#include <linux/pm_runtime.h>
26#include <linux/reset.h>
26#include <linux/sched.h> 27#include <linux/sched.h>
27#include <linux/spi/spi.h> 28#include <linux/spi/spi.h>
28#include <linux/timer.h> 29#include <linux/timer.h>
@@ -1325,6 +1326,7 @@ static int cqspi_probe(struct platform_device *pdev)
1325 struct cqspi_st *cqspi; 1326 struct cqspi_st *cqspi;
1326 struct resource *res; 1327 struct resource *res;
1327 struct resource *res_ahb; 1328 struct resource *res_ahb;
1329 struct reset_control *rstc, *rstc_ocp;
1328 const struct cqspi_driver_platdata *ddata; 1330 const struct cqspi_driver_platdata *ddata;
1329 int ret; 1331 int ret;
1330 int irq; 1332 int irq;
@@ -1391,6 +1393,25 @@ static int cqspi_probe(struct platform_device *pdev)
1391 goto probe_clk_failed; 1393 goto probe_clk_failed;
1392 } 1394 }
1393 1395
1396 /* Obtain QSPI reset control */
1397 rstc = devm_reset_control_get_optional_exclusive(dev, "qspi");
1398 if (IS_ERR(rstc)) {
1399 dev_err(dev, "Cannot get QSPI reset.\n");
1400 return PTR_ERR(rstc);
1401 }
1402
1403 rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp");
1404 if (IS_ERR(rstc_ocp)) {
1405 dev_err(dev, "Cannot get QSPI OCP reset.\n");
1406 return PTR_ERR(rstc_ocp);
1407 }
1408
1409 reset_control_assert(rstc);
1410 reset_control_deassert(rstc);
1411
1412 reset_control_assert(rstc_ocp);
1413 reset_control_deassert(rstc_ocp);
1414
1394 cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk); 1415 cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
1395 ddata = of_device_get_match_data(dev); 1416 ddata = of_device_get_match_data(dev);
1396 if (ddata && (ddata->quirks & CQSPI_NEEDS_WR_DELAY)) 1417 if (ddata && (ddata->quirks & CQSPI_NEEDS_WR_DELAY))
diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c
index 5e2344768d53..b83c4ab6cd9f 100644
--- a/drivers/mtd/spi-nor/intel-spi-pci.c
+++ b/drivers/mtd/spi-nor/intel-spi-pci.c
@@ -64,6 +64,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
64 { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info }, 64 { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
65 { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info }, 65 { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
66 { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info }, 66 { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
67 { PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info },
67 { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info }, 68 { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
68 { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info }, 69 { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
69 { }, 70 { },
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 0c2ec1c21434..03cc788511d5 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -200,7 +200,7 @@ struct sfdp_header {
200 * register does not modify status register 2. 200 * register does not modify status register 2.
201 * - 101b: QE is bit 1 of status register 2. Status register 1 is read using 201 * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
202 * Read Status instruction 05h. Status register2 is read using 202 * Read Status instruction 05h. Status register2 is read using
203 * instruction 35h. QE is set via Writ Status instruction 01h with 203 * instruction 35h. QE is set via Write Status instruction 01h with
204 * two data bytes where bit 1 of the second byte is one. 204 * two data bytes where bit 1 of the second byte is one.
205 * [...] 205 * [...]
206 */ 206 */
@@ -1776,6 +1776,28 @@ static int spi_nor_spansion_clear_sr_bp(struct spi_nor *nor)
1776 .flags = SPI_NOR_NO_FR | SPI_S3AN, 1776 .flags = SPI_NOR_NO_FR | SPI_S3AN,
1777 1777
1778static int 1778static int
1779is25lp256_post_bfpt_fixups(struct spi_nor *nor,
1780 const struct sfdp_parameter_header *bfpt_header,
1781 const struct sfdp_bfpt *bfpt,
1782 struct spi_nor_flash_parameter *params)
1783{
1784 /*
1785 * IS25LP256 supports 4B opcodes, but the BFPT advertises a
1786 * BFPT_DWORD1_ADDRESS_BYTES_3_ONLY address width.
1787 * Overwrite the address width advertised by the BFPT.
1788 */
1789 if ((bfpt->dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) ==
1790 BFPT_DWORD1_ADDRESS_BYTES_3_ONLY)
1791 nor->addr_width = 4;
1792
1793 return 0;
1794}
1795
1796static struct spi_nor_fixups is25lp256_fixups = {
1797 .post_bfpt = is25lp256_post_bfpt_fixups,
1798};
1799
1800static int
1779mx25l25635_post_bfpt_fixups(struct spi_nor *nor, 1801mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
1780 const struct sfdp_parameter_header *bfpt_header, 1802 const struct sfdp_parameter_header *bfpt_header,
1781 const struct sfdp_bfpt *bfpt, 1803 const struct sfdp_bfpt *bfpt,
@@ -1916,7 +1938,8 @@ static const struct flash_info spi_nor_ids[] = {
1916 SECT_4K | SPI_NOR_DUAL_READ) }, 1938 SECT_4K | SPI_NOR_DUAL_READ) },
1917 { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512, 1939 { "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512,
1918 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | 1940 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
1919 SPI_NOR_4B_OPCODES) }, 1941 SPI_NOR_4B_OPCODES)
1942 .fixups = &is25lp256_fixups },
1920 { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64, 1943 { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
1921 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, 1944 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
1922 { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128, 1945 { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
@@ -1969,6 +1992,9 @@ static const struct flash_info spi_nor_ids[] = {
1969 { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, 1992 { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
1970 { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, 1993 { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
1971 { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, 1994 { "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
1995 { "mt25ql02g", INFO(0x20ba22, 0, 64 * 1024, 4096,
1996 SECT_4K | USE_FSR | SPI_NOR_QUAD_READ |
1997 NO_CHIP_ERASE) },
1972 { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, 1998 { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
1973 1999
1974 /* Micron */ 2000 /* Micron */
@@ -2085,6 +2111,11 @@ static const struct flash_info spi_nor_ids[] = {
2085 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) 2111 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2086 }, 2112 },
2087 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, 2113 { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
2114 {
2115 "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024, 32,
2116 SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
2117 SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
2118 },
2088 { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) }, 2119 { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) },
2089 { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) }, 2120 { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) },
2090 { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) }, 2121 { "w25q20ew", INFO(0xef6012, 0, 64 * 1024, 4, SECT_4K) },
@@ -2151,7 +2182,7 @@ static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
2151 2182
2152 tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN); 2183 tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
2153 if (tmp < 0) { 2184 if (tmp < 0) {
2154 dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp); 2185 dev_err(nor->dev, "error %d reading JEDEC ID\n", tmp);
2155 return ERR_PTR(tmp); 2186 return ERR_PTR(tmp);
2156 } 2187 }
2157 2188
diff --git a/drivers/mtd/spi-nor/stm32-quadspi.c b/drivers/mtd/spi-nor/stm32-quadspi.c
deleted file mode 100644
index 33534f9e296b..000000000000
--- a/drivers/mtd/spi-nor/stm32-quadspi.c
+++ /dev/null
@@ -1,707 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Driver for stm32 quadspi controller
4 *
5 * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
6 * Author(s): Ludovic Barre author <ludovic.barre@st.com>.
7 */
8#include <linux/clk.h>
9#include <linux/errno.h>
10#include <linux/io.h>
11#include <linux/iopoll.h>
12#include <linux/interrupt.h>
13#include <linux/module.h>
14#include <linux/mtd/mtd.h>
15#include <linux/mtd/partitions.h>
16#include <linux/mtd/spi-nor.h>
17#include <linux/mutex.h>
18#include <linux/of.h>
19#include <linux/of_device.h>
20#include <linux/platform_device.h>
21#include <linux/reset.h>
22#include <linux/sizes.h>
23
24#define QUADSPI_CR 0x00
25#define CR_EN BIT(0)
26#define CR_ABORT BIT(1)
27#define CR_DMAEN BIT(2)
28#define CR_TCEN BIT(3)
29#define CR_SSHIFT BIT(4)
30#define CR_DFM BIT(6)
31#define CR_FSEL BIT(7)
32#define CR_FTHRES_SHIFT 8
33#define CR_FTHRES_MASK GENMASK(12, 8)
34#define CR_FTHRES(n) (((n) << CR_FTHRES_SHIFT) & CR_FTHRES_MASK)
35#define CR_TEIE BIT(16)
36#define CR_TCIE BIT(17)
37#define CR_FTIE BIT(18)
38#define CR_SMIE BIT(19)
39#define CR_TOIE BIT(20)
40#define CR_PRESC_SHIFT 24
41#define CR_PRESC_MASK GENMASK(31, 24)
42#define CR_PRESC(n) (((n) << CR_PRESC_SHIFT) & CR_PRESC_MASK)
43
44#define QUADSPI_DCR 0x04
45#define DCR_CSHT_SHIFT 8
46#define DCR_CSHT_MASK GENMASK(10, 8)
47#define DCR_CSHT(n) (((n) << DCR_CSHT_SHIFT) & DCR_CSHT_MASK)
48#define DCR_FSIZE_SHIFT 16
49#define DCR_FSIZE_MASK GENMASK(20, 16)
50#define DCR_FSIZE(n) (((n) << DCR_FSIZE_SHIFT) & DCR_FSIZE_MASK)
51
52#define QUADSPI_SR 0x08
53#define SR_TEF BIT(0)
54#define SR_TCF BIT(1)
55#define SR_FTF BIT(2)
56#define SR_SMF BIT(3)
57#define SR_TOF BIT(4)
58#define SR_BUSY BIT(5)
59#define SR_FLEVEL_SHIFT 8
60#define SR_FLEVEL_MASK GENMASK(13, 8)
61
62#define QUADSPI_FCR 0x0c
63#define FCR_CTCF BIT(1)
64
65#define QUADSPI_DLR 0x10
66
67#define QUADSPI_CCR 0x14
68#define CCR_INST_SHIFT 0
69#define CCR_INST_MASK GENMASK(7, 0)
70#define CCR_INST(n) (((n) << CCR_INST_SHIFT) & CCR_INST_MASK)
71#define CCR_IMODE_NONE (0U << 8)
72#define CCR_IMODE_1 (1U << 8)
73#define CCR_IMODE_2 (2U << 8)
74#define CCR_IMODE_4 (3U << 8)
75#define CCR_ADMODE_NONE (0U << 10)
76#define CCR_ADMODE_1 (1U << 10)
77#define CCR_ADMODE_2 (2U << 10)
78#define CCR_ADMODE_4 (3U << 10)
79#define CCR_ADSIZE_SHIFT 12
80#define CCR_ADSIZE_MASK GENMASK(13, 12)
81#define CCR_ADSIZE(n) (((n) << CCR_ADSIZE_SHIFT) & CCR_ADSIZE_MASK)
82#define CCR_ABMODE_NONE (0U << 14)
83#define CCR_ABMODE_1 (1U << 14)
84#define CCR_ABMODE_2 (2U << 14)
85#define CCR_ABMODE_4 (3U << 14)
86#define CCR_ABSIZE_8 (0U << 16)
87#define CCR_ABSIZE_16 (1U << 16)
88#define CCR_ABSIZE_24 (2U << 16)
89#define CCR_ABSIZE_32 (3U << 16)
90#define CCR_DCYC_SHIFT 18
91#define CCR_DCYC_MASK GENMASK(22, 18)
92#define CCR_DCYC(n) (((n) << CCR_DCYC_SHIFT) & CCR_DCYC_MASK)
93#define CCR_DMODE_NONE (0U << 24)
94#define CCR_DMODE_1 (1U << 24)
95#define CCR_DMODE_2 (2U << 24)
96#define CCR_DMODE_4 (3U << 24)
97#define CCR_FMODE_INDW (0U << 26)
98#define CCR_FMODE_INDR (1U << 26)
99#define CCR_FMODE_APM (2U << 26)
100#define CCR_FMODE_MM (3U << 26)
101
102#define QUADSPI_AR 0x18
103#define QUADSPI_ABR 0x1c
104#define QUADSPI_DR 0x20
105#define QUADSPI_PSMKR 0x24
106#define QUADSPI_PSMAR 0x28
107#define QUADSPI_PIR 0x2c
108#define QUADSPI_LPTR 0x30
109#define LPTR_DFT_TIMEOUT 0x10
110
111#define FSIZE_VAL(size) (__fls(size) - 1)
112
113#define STM32_MAX_MMAP_SZ SZ_256M
114#define STM32_MAX_NORCHIP 2
115
116#define STM32_QSPI_FIFO_SZ 32
117#define STM32_QSPI_FIFO_TIMEOUT_US 30000
118#define STM32_QSPI_BUSY_TIMEOUT_US 100000
119
120struct stm32_qspi_flash {
121 struct spi_nor nor;
122 struct stm32_qspi *qspi;
123 u32 cs;
124 u32 fsize;
125 u32 presc;
126 u32 read_mode;
127 bool registered;
128 u32 prefetch_limit;
129};
130
131struct stm32_qspi {
132 struct device *dev;
133 void __iomem *io_base;
134 void __iomem *mm_base;
135 resource_size_t mm_size;
136 u32 nor_num;
137 struct clk *clk;
138 u32 clk_rate;
139 struct stm32_qspi_flash flash[STM32_MAX_NORCHIP];
140 struct completion cmd_completion;
141
142 /*
143 * to protect device configuration, could be different between
144 * 2 flash access (bk1, bk2)
145 */
146 struct mutex lock;
147};
148
149struct stm32_qspi_cmd {
150 u8 addr_width;
151 u8 dummy;
152 bool tx_data;
153 u8 opcode;
154 u32 framemode;
155 u32 qspimode;
156 u32 addr;
157 size_t len;
158 void *buf;
159};
160
161static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi)
162{
163 u32 cr;
164 int err = 0;
165
166 if (readl_relaxed(qspi->io_base + QUADSPI_SR) & SR_TCF)
167 return 0;
168
169 reinit_completion(&qspi->cmd_completion);
170 cr = readl_relaxed(qspi->io_base + QUADSPI_CR);
171 writel_relaxed(cr | CR_TCIE, qspi->io_base + QUADSPI_CR);
172
173 if (!wait_for_completion_interruptible_timeout(&qspi->cmd_completion,
174 msecs_to_jiffies(1000)))
175 err = -ETIMEDOUT;
176
177 writel_relaxed(cr, qspi->io_base + QUADSPI_CR);
178 return err;
179}
180
181static int stm32_qspi_wait_nobusy(struct stm32_qspi *qspi)
182{
183 u32 sr;
184
185 return readl_relaxed_poll_timeout(qspi->io_base + QUADSPI_SR, sr,
186 !(sr & SR_BUSY), 10,
187 STM32_QSPI_BUSY_TIMEOUT_US);
188}
189
190static void stm32_qspi_set_framemode(struct spi_nor *nor,
191 struct stm32_qspi_cmd *cmd, bool read)
192{
193 u32 dmode = CCR_DMODE_1;
194
195 cmd->framemode = CCR_IMODE_1;
196
197 if (read) {
198 switch (nor->read_proto) {
199 default:
200 case SNOR_PROTO_1_1_1:
201 dmode = CCR_DMODE_1;
202 break;
203 case SNOR_PROTO_1_1_2:
204 dmode = CCR_DMODE_2;
205 break;
206 case SNOR_PROTO_1_1_4:
207 dmode = CCR_DMODE_4;
208 break;
209 }
210 }
211
212 cmd->framemode |= cmd->tx_data ? dmode : 0;
213 cmd->framemode |= cmd->addr_width ? CCR_ADMODE_1 : 0;
214}
215
216static void stm32_qspi_read_fifo(u8 *val, void __iomem *addr)
217{
218 *val = readb_relaxed(addr);
219}
220
221static void stm32_qspi_write_fifo(u8 *val, void __iomem *addr)
222{
223 writeb_relaxed(*val, addr);
224}
225
226static int stm32_qspi_tx_poll(struct stm32_qspi *qspi,
227 const struct stm32_qspi_cmd *cmd)
228{
229 void (*tx_fifo)(u8 *, void __iomem *);
230 u32 len = cmd->len, sr;
231 u8 *buf = cmd->buf;
232 int ret;
233
234 if (cmd->qspimode == CCR_FMODE_INDW)
235 tx_fifo = stm32_qspi_write_fifo;
236 else
237 tx_fifo = stm32_qspi_read_fifo;
238
239 while (len--) {
240 ret = readl_relaxed_poll_timeout(qspi->io_base + QUADSPI_SR,
241 sr, (sr & SR_FTF), 10,
242 STM32_QSPI_FIFO_TIMEOUT_US);
243 if (ret) {
244 dev_err(qspi->dev, "fifo timeout (stat:%#x)\n", sr);
245 return ret;
246 }
247 tx_fifo(buf++, qspi->io_base + QUADSPI_DR);
248 }
249
250 return 0;
251}
252
253static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
254 const struct stm32_qspi_cmd *cmd)
255{
256 memcpy_fromio(cmd->buf, qspi->mm_base + cmd->addr, cmd->len);
257 return 0;
258}
259
260static int stm32_qspi_tx(struct stm32_qspi *qspi,
261 const struct stm32_qspi_cmd *cmd)
262{
263 if (!cmd->tx_data)
264 return 0;
265
266 if (cmd->qspimode == CCR_FMODE_MM)
267 return stm32_qspi_tx_mm(qspi, cmd);
268
269 return stm32_qspi_tx_poll(qspi, cmd);
270}
271
272static int stm32_qspi_send(struct stm32_qspi_flash *flash,
273 const struct stm32_qspi_cmd *cmd)
274{
275 struct stm32_qspi *qspi = flash->qspi;
276 u32 ccr, dcr, cr;
277 u32 last_byte;
278 int err;
279
280 err = stm32_qspi_wait_nobusy(qspi);
281 if (err)
282 goto abort;
283
284 dcr = readl_relaxed(qspi->io_base + QUADSPI_DCR) & ~DCR_FSIZE_MASK;
285 dcr |= DCR_FSIZE(flash->fsize);
286 writel_relaxed(dcr, qspi->io_base + QUADSPI_DCR);
287
288 cr = readl_relaxed(qspi->io_base + QUADSPI_CR);
289 cr &= ~CR_PRESC_MASK & ~CR_FSEL;
290 cr |= CR_PRESC(flash->presc);
291 cr |= flash->cs ? CR_FSEL : 0;
292 writel_relaxed(cr, qspi->io_base + QUADSPI_CR);
293
294 if (cmd->tx_data)
295 writel_relaxed(cmd->len - 1, qspi->io_base + QUADSPI_DLR);
296
297 ccr = cmd->framemode | cmd->qspimode;
298
299 if (cmd->dummy)
300 ccr |= CCR_DCYC(cmd->dummy);
301
302 if (cmd->addr_width)
303 ccr |= CCR_ADSIZE(cmd->addr_width - 1);
304
305 ccr |= CCR_INST(cmd->opcode);
306 writel_relaxed(ccr, qspi->io_base + QUADSPI_CCR);
307
308 if (cmd->addr_width && cmd->qspimode != CCR_FMODE_MM)
309 writel_relaxed(cmd->addr, qspi->io_base + QUADSPI_AR);
310
311 err = stm32_qspi_tx(qspi, cmd);
312 if (err)
313 goto abort;
314
315 if (cmd->qspimode != CCR_FMODE_MM) {
316 err = stm32_qspi_wait_cmd(qspi);
317 if (err)
318 goto abort;
319 writel_relaxed(FCR_CTCF, qspi->io_base + QUADSPI_FCR);
320 } else {
321 last_byte = cmd->addr + cmd->len;
322 if (last_byte > flash->prefetch_limit)
323 goto abort;
324 }
325
326 return err;
327
328abort:
329 cr = readl_relaxed(qspi->io_base + QUADSPI_CR) | CR_ABORT;
330 writel_relaxed(cr, qspi->io_base + QUADSPI_CR);
331
332 if (err)
333 dev_err(qspi->dev, "%s abort err:%d\n", __func__, err);
334
335 return err;
336}
337
338static int stm32_qspi_read_reg(struct spi_nor *nor,
339 u8 opcode, u8 *buf, int len)
340{
341 struct stm32_qspi_flash *flash = nor->priv;
342 struct device *dev = flash->qspi->dev;
343 struct stm32_qspi_cmd cmd;
344
345 dev_dbg(dev, "read_reg: cmd:%#.2x buf:%pK len:%#x\n", opcode, buf, len);
346
347 memset(&cmd, 0, sizeof(cmd));
348 cmd.opcode = opcode;
349 cmd.tx_data = true;
350 cmd.len = len;
351 cmd.buf = buf;
352 cmd.qspimode = CCR_FMODE_INDR;
353
354 stm32_qspi_set_framemode(nor, &cmd, false);
355
356 return stm32_qspi_send(flash, &cmd);
357}
358
359static int stm32_qspi_write_reg(struct spi_nor *nor, u8 opcode,
360 u8 *buf, int len)
361{
362 struct stm32_qspi_flash *flash = nor->priv;
363 struct device *dev = flash->qspi->dev;
364 struct stm32_qspi_cmd cmd;
365
366 dev_dbg(dev, "write_reg: cmd:%#.2x buf:%pK len:%#x\n", opcode, buf, len);
367
368 memset(&cmd, 0, sizeof(cmd));
369 cmd.opcode = opcode;
370 cmd.tx_data = !!(buf && len > 0);
371 cmd.len = len;
372 cmd.buf = buf;
373 cmd.qspimode = CCR_FMODE_INDW;
374
375 stm32_qspi_set_framemode(nor, &cmd, false);
376
377 return stm32_qspi_send(flash, &cmd);
378}
379
380static ssize_t stm32_qspi_read(struct spi_nor *nor, loff_t from, size_t len,
381 u_char *buf)
382{
383 struct stm32_qspi_flash *flash = nor->priv;
384 struct stm32_qspi *qspi = flash->qspi;
385 struct stm32_qspi_cmd cmd;
386 int err;
387
388 dev_dbg(qspi->dev, "read(%#.2x): buf:%pK from:%#.8x len:%#zx\n",
389 nor->read_opcode, buf, (u32)from, len);
390
391 memset(&cmd, 0, sizeof(cmd));
392 cmd.opcode = nor->read_opcode;
393 cmd.addr_width = nor->addr_width;
394 cmd.addr = (u32)from;
395 cmd.tx_data = true;
396 cmd.dummy = nor->read_dummy;
397 cmd.len = len;
398 cmd.buf = buf;
399 cmd.qspimode = flash->read_mode;
400
401 stm32_qspi_set_framemode(nor, &cmd, true);
402 err = stm32_qspi_send(flash, &cmd);
403
404 return err ? err : len;
405}
406
407static ssize_t stm32_qspi_write(struct spi_nor *nor, loff_t to, size_t len,
408 const u_char *buf)
409{
410 struct stm32_qspi_flash *flash = nor->priv;
411 struct device *dev = flash->qspi->dev;
412 struct stm32_qspi_cmd cmd;
413 int err;
414
415 dev_dbg(dev, "write(%#.2x): buf:%p to:%#.8x len:%#zx\n",
416 nor->program_opcode, buf, (u32)to, len);
417
418 memset(&cmd, 0, sizeof(cmd));
419 cmd.opcode = nor->program_opcode;
420 cmd.addr_width = nor->addr_width;
421 cmd.addr = (u32)to;
422 cmd.tx_data = true;
423 cmd.len = len;
424 cmd.buf = (void *)buf;
425 cmd.qspimode = CCR_FMODE_INDW;
426
427 stm32_qspi_set_framemode(nor, &cmd, false);
428 err = stm32_qspi_send(flash, &cmd);
429
430 return err ? err : len;
431}
432
433static int stm32_qspi_erase(struct spi_nor *nor, loff_t offs)
434{
435 struct stm32_qspi_flash *flash = nor->priv;
436 struct device *dev = flash->qspi->dev;
437 struct stm32_qspi_cmd cmd;
438
439 dev_dbg(dev, "erase(%#.2x):offs:%#x\n", nor->erase_opcode, (u32)offs);
440
441 memset(&cmd, 0, sizeof(cmd));
442 cmd.opcode = nor->erase_opcode;
443 cmd.addr_width = nor->addr_width;
444 cmd.addr = (u32)offs;
445 cmd.qspimode = CCR_FMODE_INDW;
446
447 stm32_qspi_set_framemode(nor, &cmd, false);
448
449 return stm32_qspi_send(flash, &cmd);
450}
451
452static irqreturn_t stm32_qspi_irq(int irq, void *dev_id)
453{
454 struct stm32_qspi *qspi = (struct stm32_qspi *)dev_id;
455 u32 cr, sr, fcr = 0;
456
457 cr = readl_relaxed(qspi->io_base + QUADSPI_CR);
458 sr = readl_relaxed(qspi->io_base + QUADSPI_SR);
459
460 if ((cr & CR_TCIE) && (sr & SR_TCF)) {
461 /* tx complete */
462 fcr |= FCR_CTCF;
463 complete(&qspi->cmd_completion);
464 } else {
465 dev_info_ratelimited(qspi->dev, "spurious interrupt\n");
466 }
467
468 writel_relaxed(fcr, qspi->io_base + QUADSPI_FCR);
469
470 return IRQ_HANDLED;
471}
472
473static int stm32_qspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
474{
475 struct stm32_qspi_flash *flash = nor->priv;
476 struct stm32_qspi *qspi = flash->qspi;
477
478 mutex_lock(&qspi->lock);
479 return 0;
480}
481
482static void stm32_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
483{
484 struct stm32_qspi_flash *flash = nor->priv;
485 struct stm32_qspi *qspi = flash->qspi;
486
487 mutex_unlock(&qspi->lock);
488}
489
490static int stm32_qspi_flash_setup(struct stm32_qspi *qspi,
491 struct device_node *np)
492{
493 struct spi_nor_hwcaps hwcaps = {
494 .mask = SNOR_HWCAPS_READ |
495 SNOR_HWCAPS_READ_FAST |
496 SNOR_HWCAPS_PP,
497 };
498 u32 width, presc, cs_num, max_rate = 0;
499 struct stm32_qspi_flash *flash;
500 struct mtd_info *mtd;
501 int ret;
502
503 of_property_read_u32(np, "reg", &cs_num);
504 if (cs_num >= STM32_MAX_NORCHIP)
505 return -EINVAL;
506
507 of_property_read_u32(np, "spi-max-frequency", &max_rate);
508 if (!max_rate)
509 return -EINVAL;
510
511 presc = DIV_ROUND_UP(qspi->clk_rate, max_rate) - 1;
512
513 if (of_property_read_u32(np, "spi-rx-bus-width", &width))
514 width = 1;
515
516 if (width == 4)
517 hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
518 else if (width == 2)
519 hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
520 else if (width != 1)
521 return -EINVAL;
522
523 flash = &qspi->flash[cs_num];
524 flash->qspi = qspi;
525 flash->cs = cs_num;
526 flash->presc = presc;
527
528 flash->nor.dev = qspi->dev;
529 spi_nor_set_flash_node(&flash->nor, np);
530 flash->nor.priv = flash;
531 mtd = &flash->nor.mtd;
532
533 flash->nor.read = stm32_qspi_read;
534 flash->nor.write = stm32_qspi_write;
535 flash->nor.erase = stm32_qspi_erase;
536 flash->nor.read_reg = stm32_qspi_read_reg;
537 flash->nor.write_reg = stm32_qspi_write_reg;
538 flash->nor.prepare = stm32_qspi_prep;
539 flash->nor.unprepare = stm32_qspi_unprep;
540
541 writel_relaxed(LPTR_DFT_TIMEOUT, qspi->io_base + QUADSPI_LPTR);
542
543 writel_relaxed(CR_PRESC(presc) | CR_FTHRES(3) | CR_TCEN | CR_SSHIFT
544 | CR_EN, qspi->io_base + QUADSPI_CR);
545
546 /*
547 * in stm32 qspi controller, QUADSPI_DCR register has a fsize field
548 * which define the size of nor flash.
549 * if fsize is NULL, the controller can't sent spi-nor command.
550 * set a temporary value just to discover the nor flash with
551 * "spi_nor_scan". After, the right value (mtd->size) can be set.
552 */
553 flash->fsize = FSIZE_VAL(SZ_1K);
554
555 ret = spi_nor_scan(&flash->nor, NULL, &hwcaps);
556 if (ret) {
557 dev_err(qspi->dev, "device scan failed\n");
558 return ret;
559 }
560
561 flash->fsize = FSIZE_VAL(mtd->size);
562 flash->prefetch_limit = mtd->size - STM32_QSPI_FIFO_SZ;
563
564 flash->read_mode = CCR_FMODE_MM;
565 if (mtd->size > qspi->mm_size)
566 flash->read_mode = CCR_FMODE_INDR;
567
568 writel_relaxed(DCR_CSHT(1), qspi->io_base + QUADSPI_DCR);
569
570 ret = mtd_device_register(mtd, NULL, 0);
571 if (ret) {
572 dev_err(qspi->dev, "mtd device parse failed\n");
573 return ret;
574 }
575
576 flash->registered = true;
577
578 dev_dbg(qspi->dev, "read mm:%s cs:%d bus:%d\n",
579 flash->read_mode == CCR_FMODE_MM ? "yes" : "no", cs_num, width);
580
581 return 0;
582}
583
584static void stm32_qspi_mtd_free(struct stm32_qspi *qspi)
585{
586 int i;
587
588 for (i = 0; i < STM32_MAX_NORCHIP; i++)
589 if (qspi->flash[i].registered)
590 mtd_device_unregister(&qspi->flash[i].nor.mtd);
591}
592
593static int stm32_qspi_probe(struct platform_device *pdev)
594{
595 struct device *dev = &pdev->dev;
596 struct device_node *flash_np;
597 struct reset_control *rstc;
598 struct stm32_qspi *qspi;
599 struct resource *res;
600 int ret, irq;
601
602 qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL);
603 if (!qspi)
604 return -ENOMEM;
605
606 qspi->nor_num = of_get_child_count(dev->of_node);
607 if (!qspi->nor_num || qspi->nor_num > STM32_MAX_NORCHIP)
608 return -ENODEV;
609
610 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi");
611 qspi->io_base = devm_ioremap_resource(dev, res);
612 if (IS_ERR(qspi->io_base))
613 return PTR_ERR(qspi->io_base);
614
615 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm");
616 qspi->mm_base = devm_ioremap_resource(dev, res);
617 if (IS_ERR(qspi->mm_base))
618 return PTR_ERR(qspi->mm_base);
619
620 qspi->mm_size = resource_size(res);
621
622 irq = platform_get_irq(pdev, 0);
623 ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
624 dev_name(dev), qspi);
625 if (ret) {
626 dev_err(dev, "failed to request irq\n");
627 return ret;
628 }
629
630 init_completion(&qspi->cmd_completion);
631
632 qspi->clk = devm_clk_get(dev, NULL);
633 if (IS_ERR(qspi->clk))
634 return PTR_ERR(qspi->clk);
635
636 qspi->clk_rate = clk_get_rate(qspi->clk);
637 if (!qspi->clk_rate)
638 return -EINVAL;
639
640 ret = clk_prepare_enable(qspi->clk);
641 if (ret) {
642 dev_err(dev, "can not enable the clock\n");
643 return ret;
644 }
645
646 rstc = devm_reset_control_get_exclusive(dev, NULL);
647 if (!IS_ERR(rstc)) {
648 reset_control_assert(rstc);
649 udelay(2);
650 reset_control_deassert(rstc);
651 }
652
653 qspi->dev = dev;
654 platform_set_drvdata(pdev, qspi);
655 mutex_init(&qspi->lock);
656
657 for_each_available_child_of_node(dev->of_node, flash_np) {
658 ret = stm32_qspi_flash_setup(qspi, flash_np);
659 if (ret) {
660 dev_err(dev, "unable to setup flash chip\n");
661 goto err_flash;
662 }
663 }
664
665 return 0;
666
667err_flash:
668 mutex_destroy(&qspi->lock);
669 stm32_qspi_mtd_free(qspi);
670
671 clk_disable_unprepare(qspi->clk);
672 return ret;
673}
674
675static int stm32_qspi_remove(struct platform_device *pdev)
676{
677 struct stm32_qspi *qspi = platform_get_drvdata(pdev);
678
679 /* disable qspi */
680 writel_relaxed(0, qspi->io_base + QUADSPI_CR);
681
682 stm32_qspi_mtd_free(qspi);
683 mutex_destroy(&qspi->lock);
684
685 clk_disable_unprepare(qspi->clk);
686 return 0;
687}
688
689static const struct of_device_id stm32_qspi_match[] = {
690 {.compatible = "st,stm32f469-qspi"},
691 {}
692};
693MODULE_DEVICE_TABLE(of, stm32_qspi_match);
694
695static struct platform_driver stm32_qspi_driver = {
696 .probe = stm32_qspi_probe,
697 .remove = stm32_qspi_remove,
698 .driver = {
699 .name = "stm32-quadspi",
700 .of_match_table = stm32_qspi_match,
701 },
702};
703module_platform_driver(stm32_qspi_driver);
704
705MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
706MODULE_DESCRIPTION("STMicroelectronics STM32 quad spi driver");
707MODULE_LICENSE("GPL v2");
diff --git a/include/linux/dma/mxs-dma.h b/include/linux/dma/mxs-dma.h
new file mode 100644
index 000000000000..069d9f5a609e
--- /dev/null
+++ b/include/linux/dma/mxs-dma.h
@@ -0,0 +1,24 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _MXS_DMA_H_
3#define _MXS_DMA_H_
4
5#include <linux/dmaengine.h>
6
7#define MXS_DMA_CTRL_WAIT4END BIT(31)
8#define MXS_DMA_CTRL_WAIT4RDY BIT(30)
9
10/*
11 * The mxs dmaengine can do PIO transfers. We pass a pointer to the PIO words
12 * in the second argument to dmaengine_prep_slave_sg when the direction is
13 * set to DMA_TRANS_NONE. To make this clear and to prevent users from doing
14 * the error prone casting we have this wrapper function
15 */
16static inline struct dma_async_tx_descriptor *mxs_dmaengine_prep_pio(
17 struct dma_chan *chan, u32 *pio, unsigned int npio,
18 enum dma_transfer_direction dir, unsigned long flags)
19{
20 return dmaengine_prep_slave_sg(chan, (struct scatterlist *)pio, npio,
21 dir, flags);
22}
23
24#endif /* _MXS_DMA_H_ */
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 208c87cf2e3e..c98a21108688 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -219,6 +219,13 @@ struct cfi_pri_amdstd {
219 uint8_t VppMin; 219 uint8_t VppMin;
220 uint8_t VppMax; 220 uint8_t VppMax;
221 uint8_t TopBottom; 221 uint8_t TopBottom;
222 /* Below field are added from version 1.5 */
223 uint8_t ProgramSuspend;
224 uint8_t UnlockBypass;
225 uint8_t SecureSiliconSector;
226 uint8_t SoftwareFeatures;
227#define CFI_POLL_STATUS_REG BIT(0)
228#define CFI_POLL_DQ BIT(1)
222} __packed; 229} __packed;
223 230
224/* Vendor-Specific PRI for Atmel chips (command set 0x0002) */ 231/* Vendor-Specific PRI for Atmel chips (command set 0x0002) */
diff --git a/include/linux/mtd/hyperbus.h b/include/linux/mtd/hyperbus.h
new file mode 100644
index 000000000000..2dfe65964f6e
--- /dev/null
+++ b/include/linux/mtd/hyperbus.h
@@ -0,0 +1,84 @@
1/* SPDX-License-Identifier: GPL-2.0
2 *
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
4 */
5
6#ifndef __LINUX_MTD_HYPERBUS_H__
7#define __LINUX_MTD_HYPERBUS_H__
8
9#include <linux/mtd/map.h>
10
11enum hyperbus_memtype {
12 HYPERFLASH,
13 HYPERRAM,
14};
15
16/**
17 * struct hyperbus_device - struct representing HyperBus slave device
18 * @map: map_info struct for accessing MMIO HyperBus flash memory
19 * @np: pointer to HyperBus slave device node
20 * @mtd: pointer to MTD struct
21 * @ctlr: pointer to HyperBus controller struct
22 * @memtype: type of memory device: HyperFlash or HyperRAM
23 */
24
25struct hyperbus_device {
26 struct map_info map;
27 struct device_node *np;
28 struct mtd_info *mtd;
29 struct hyperbus_ctlr *ctlr;
30 enum hyperbus_memtype memtype;
31};
32
33/**
34 * struct hyperbus_ops - struct representing custom HyperBus operations
35 * @read16: read 16 bit of data from flash in a single burst. Used to read
36 * from non default address space, such as ID/CFI space
37 * @write16: write 16 bit of data to flash in a single burst. Used to
38 * send cmd to flash or write single 16 bit word at a time.
39 * @copy_from: copy data from flash memory
40 * @copy_to: copy data to flash memory
41 * @calibrate: calibrate HyperBus controller
42 */
43
44struct hyperbus_ops {
45 u16 (*read16)(struct hyperbus_device *hbdev, unsigned long addr);
46 void (*write16)(struct hyperbus_device *hbdev,
47 unsigned long addr, u16 val);
48 void (*copy_from)(struct hyperbus_device *hbdev, void *to,
49 unsigned long from, ssize_t len);
50 void (*copy_to)(struct hyperbus_device *dev, unsigned long to,
51 const void *from, ssize_t len);
52 int (*calibrate)(struct hyperbus_device *dev);
53};
54
55/**
56 * struct hyperbus_ctlr - struct representing HyperBus controller
57 * @dev: pointer to HyperBus controller device
58 * @calibrated: flag to indicate ctlr calibration sequence is complete
59 * @ops: HyperBus controller ops
60 */
61struct hyperbus_ctlr {
62 struct device *dev;
63 bool calibrated;
64
65 const struct hyperbus_ops *ops;
66};
67
68/**
69 * hyperbus_register_device - probe and register a HyperBus slave memory device
70 * @hbdev: hyperbus_device struct with dev, np and ctlr field populated
71 *
72 * Return: 0 for success, others for failure.
73 */
74int hyperbus_register_device(struct hyperbus_device *hbdev);
75
76/**
77 * hyperbus_unregister_device - deregister HyperBus slave memory device
78 * @hbdev: hyperbus_device to be unregistered
79 *
80 * Return: 0 for success, others for failure.
81 */
82int hyperbus_unregister_device(struct hyperbus_device *hbdev);
83
84#endif /* __LINUX_MTD_HYPERBUS_H__ */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 936a3fdb48b5..4ca8c1c845fb 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -316,6 +316,12 @@ struct mtd_info {
316 int (*_get_device) (struct mtd_info *mtd); 316 int (*_get_device) (struct mtd_info *mtd);
317 void (*_put_device) (struct mtd_info *mtd); 317 void (*_put_device) (struct mtd_info *mtd);
318 318
319 /*
320 * flag indicates a panic write, low level drivers can take appropriate
321 * action if required to ensure writes go through
322 */
323 bool oops_panic_write;
324
319 struct notifier_block reboot_notifier; /* default mode before reboot */ 325 struct notifier_block reboot_notifier; /* default mode before reboot */
320 326
321 /* ECC status information */ 327 /* ECC status information */
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index 2d12a1b18742..5f728407a579 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -77,6 +77,7 @@
77#define ONENAND_DEVICE_DENSITY_1Gb (0x003) 77#define ONENAND_DEVICE_DENSITY_1Gb (0x003)
78#define ONENAND_DEVICE_DENSITY_2Gb (0x004) 78#define ONENAND_DEVICE_DENSITY_2Gb (0x004)
79#define ONENAND_DEVICE_DENSITY_4Gb (0x005) 79#define ONENAND_DEVICE_DENSITY_4Gb (0x005)
80#define ONENAND_DEVICE_DENSITY_8Gb (0x006)
80 81
81/* 82/*
82 * Version ID Register F002h (R) 83 * Version ID Register F002h (R)
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index ac3884a28dea..4ab9bccfcde0 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -874,6 +874,42 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
874 const struct nand_op_parser *parser, 874 const struct nand_op_parser *parser,
875 const struct nand_operation *op, bool check_only); 875 const struct nand_operation *op, bool check_only);
876 876
877static inline void nand_op_trace(const char *prefix,
878 const struct nand_op_instr *instr)
879{
880#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
881 switch (instr->type) {
882 case NAND_OP_CMD_INSTR:
883 pr_debug("%sCMD [0x%02x]\n", prefix,
884 instr->ctx.cmd.opcode);
885 break;
886 case NAND_OP_ADDR_INSTR:
887 pr_debug("%sADDR [%d cyc: %*ph]\n", prefix,
888 instr->ctx.addr.naddrs,
889 instr->ctx.addr.naddrs < 64 ?
890 instr->ctx.addr.naddrs : 64,
891 instr->ctx.addr.addrs);
892 break;
893 case NAND_OP_DATA_IN_INSTR:
894 pr_debug("%sDATA_IN [%d B%s]\n", prefix,
895 instr->ctx.data.len,
896 instr->ctx.data.force_8bit ?
897 ", force 8-bit" : "");
898 break;
899 case NAND_OP_DATA_OUT_INSTR:
900 pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
901 instr->ctx.data.len,
902 instr->ctx.data.force_8bit ?
903 ", force 8-bit" : "");
904 break;
905 case NAND_OP_WAITRDY_INSTR:
906 pr_debug("%sWAITRDY [max %d ms]\n", prefix,
907 instr->ctx.waitrdy.timeout_ms);
908 break;
909 }
910#endif
911}
912
877/** 913/**
878 * struct nand_controller_ops - Controller operations 914 * struct nand_controller_ops - Controller operations
879 * 915 *
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 507f7e289bd1..4ea558bd3c46 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -68,30 +68,60 @@
68 SPI_MEM_OP_DUMMY(ndummy, 1), \ 68 SPI_MEM_OP_DUMMY(ndummy, 1), \
69 SPI_MEM_OP_DATA_IN(len, buf, 1)) 69 SPI_MEM_OP_DATA_IN(len, buf, 1))
70 70
71#define SPINAND_PAGE_READ_FROM_CACHE_OP_3A(fast, addr, ndummy, buf, len) \
72 SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
73 SPI_MEM_OP_ADDR(3, addr, 1), \
74 SPI_MEM_OP_DUMMY(ndummy, 1), \
75 SPI_MEM_OP_DATA_IN(len, buf, 1))
76
71#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \ 77#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \
72 SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \ 78 SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
73 SPI_MEM_OP_ADDR(2, addr, 1), \ 79 SPI_MEM_OP_ADDR(2, addr, 1), \
74 SPI_MEM_OP_DUMMY(ndummy, 1), \ 80 SPI_MEM_OP_DUMMY(ndummy, 1), \
75 SPI_MEM_OP_DATA_IN(len, buf, 2)) 81 SPI_MEM_OP_DATA_IN(len, buf, 2))
76 82
83#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP_3A(addr, ndummy, buf, len) \
84 SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
85 SPI_MEM_OP_ADDR(3, addr, 1), \
86 SPI_MEM_OP_DUMMY(ndummy, 1), \
87 SPI_MEM_OP_DATA_IN(len, buf, 2))
88
77#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \ 89#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \
78 SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \ 90 SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
79 SPI_MEM_OP_ADDR(2, addr, 1), \ 91 SPI_MEM_OP_ADDR(2, addr, 1), \
80 SPI_MEM_OP_DUMMY(ndummy, 1), \ 92 SPI_MEM_OP_DUMMY(ndummy, 1), \
81 SPI_MEM_OP_DATA_IN(len, buf, 4)) 93 SPI_MEM_OP_DATA_IN(len, buf, 4))
82 94
95#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP_3A(addr, ndummy, buf, len) \
96 SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
97 SPI_MEM_OP_ADDR(3, addr, 1), \
98 SPI_MEM_OP_DUMMY(ndummy, 1), \
99 SPI_MEM_OP_DATA_IN(len, buf, 4))
100
83#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \ 101#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \
84 SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \ 102 SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
85 SPI_MEM_OP_ADDR(2, addr, 2), \ 103 SPI_MEM_OP_ADDR(2, addr, 2), \
86 SPI_MEM_OP_DUMMY(ndummy, 2), \ 104 SPI_MEM_OP_DUMMY(ndummy, 2), \
87 SPI_MEM_OP_DATA_IN(len, buf, 2)) 105 SPI_MEM_OP_DATA_IN(len, buf, 2))
88 106
107#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP_3A(addr, ndummy, buf, len) \
108 SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
109 SPI_MEM_OP_ADDR(3, addr, 2), \
110 SPI_MEM_OP_DUMMY(ndummy, 2), \
111 SPI_MEM_OP_DATA_IN(len, buf, 2))
112
89#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \ 113#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \
90 SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \ 114 SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
91 SPI_MEM_OP_ADDR(2, addr, 4), \ 115 SPI_MEM_OP_ADDR(2, addr, 4), \
92 SPI_MEM_OP_DUMMY(ndummy, 4), \ 116 SPI_MEM_OP_DUMMY(ndummy, 4), \
93 SPI_MEM_OP_DATA_IN(len, buf, 4)) 117 SPI_MEM_OP_DATA_IN(len, buf, 4))
94 118
119#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP_3A(addr, ndummy, buf, len) \
120 SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
121 SPI_MEM_OP_ADDR(3, addr, 4), \
122 SPI_MEM_OP_DUMMY(ndummy, 4), \
123 SPI_MEM_OP_DATA_IN(len, buf, 4))
124
95#define SPINAND_PROG_EXEC_OP(addr) \ 125#define SPINAND_PROG_EXEC_OP(addr) \
96 SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \ 126 SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \
97 SPI_MEM_OP_ADDR(3, addr, 1), \ 127 SPI_MEM_OP_ADDR(3, addr, 1), \
@@ -197,6 +227,7 @@ struct spinand_manufacturer {
197extern const struct spinand_manufacturer gigadevice_spinand_manufacturer; 227extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
198extern const struct spinand_manufacturer macronix_spinand_manufacturer; 228extern const struct spinand_manufacturer macronix_spinand_manufacturer;
199extern const struct spinand_manufacturer micron_spinand_manufacturer; 229extern const struct spinand_manufacturer micron_spinand_manufacturer;
230extern const struct spinand_manufacturer paragon_spinand_manufacturer;
200extern const struct spinand_manufacturer toshiba_spinand_manufacturer; 231extern const struct spinand_manufacturer toshiba_spinand_manufacturer;
201extern const struct spinand_manufacturer winbond_spinand_manufacturer; 232extern const struct spinand_manufacturer winbond_spinand_manufacturer;
202 233
@@ -260,7 +291,7 @@ struct spinand_ecc_info {
260 */ 291 */
261struct spinand_info { 292struct spinand_info {
262 const char *model; 293 const char *model;
263 u8 devid; 294 u16 devid;
264 u32 flags; 295 u32 flags;
265 struct nand_memory_organization memorg; 296 struct nand_memory_organization memorg;
266 struct nand_ecc_req eccreq; 297 struct nand_ecc_req eccreq;
@@ -422,7 +453,7 @@ static inline void spinand_set_of_node(struct spinand_device *spinand,
422 453
423int spinand_match_and_init(struct spinand_device *dev, 454int spinand_match_and_init(struct spinand_device *dev,
424 const struct spinand_info *table, 455 const struct spinand_info *table,
425 unsigned int table_size, u8 devid); 456 unsigned int table_size, u16 devid);
426 457
427int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val); 458int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
428int spinand_select_target(struct spinand_device *spinand, unsigned int target); 459int spinand_select_target(struct spinand_device *spinand, unsigned int target);
diff --git a/include/uapi/mtd/mtd-abi.h b/include/uapi/mtd/mtd-abi.h
index aff5b5e59845..47ffe3208c27 100644
--- a/include/uapi/mtd/mtd-abi.h
+++ b/include/uapi/mtd/mtd-abi.h
@@ -113,11 +113,11 @@ struct mtd_write_req {
113#define MTD_CAP_NVRAM (MTD_WRITEABLE | MTD_BIT_WRITEABLE | MTD_NO_ERASE) 113#define MTD_CAP_NVRAM (MTD_WRITEABLE | MTD_BIT_WRITEABLE | MTD_NO_ERASE)
114 114
115/* Obsolete ECC byte placement modes (used with obsolete MEMGETOOBSEL) */ 115/* Obsolete ECC byte placement modes (used with obsolete MEMGETOOBSEL) */
116#define MTD_NANDECC_OFF 0 // Switch off ECC (Not recommended) 116#define MTD_NANDECC_OFF 0 /* Switch off ECC (Not recommended) */
117#define MTD_NANDECC_PLACE 1 // Use the given placement in the structure (YAFFS1 legacy mode) 117#define MTD_NANDECC_PLACE 1 /* Use the given placement in the structure (YAFFS1 legacy mode) */
118#define MTD_NANDECC_AUTOPLACE 2 // Use the default placement scheme 118#define MTD_NANDECC_AUTOPLACE 2 /* Use the default placement scheme */
119#define MTD_NANDECC_PLACEONLY 3 // Use the given placement in the structure (Do not store ecc result on read) 119#define MTD_NANDECC_PLACEONLY 3 /* Use the given placement in the structure (Do not store ecc result on read) */
120#define MTD_NANDECC_AUTOPL_USR 4 // Use the given autoplacement scheme rather than using the default 120#define MTD_NANDECC_AUTOPL_USR 4 /* Use the given autoplacement scheme rather than using the default */
121 121
122/* OTP mode selection */ 122/* OTP mode selection */
123#define MTD_OTP_OFF 0 123#define MTD_OTP_OFF 0