aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2019-03-04 10:32:51 -0500
committerMark Brown <broonie@kernel.org>2019-03-04 10:32:51 -0500
commit14dbfb417bd80c96ec700c7a8414bb6f5db7ecd2 (patch)
treef79fb71b6206ef40aa34c52649053a2aaa493a3c /drivers/spi
parentb50c6ac8b6fb3e7b5cf843afd789c8553a910254 (diff)
parent0e836c3bea7da04cd4e2ed22d8c20654d5a09273 (diff)
Merge branch 'spi-5.1' into spi-next
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig36
-rw-r--r--drivers/spi/Makefile3
-rw-r--r--drivers/spi/atmel-quadspi.c270
-rw-r--r--drivers/spi/spi-ath79.c114
-rw-r--r--drivers/spi/spi-atmel.c102
-rw-r--r--drivers/spi/spi-bcm2835aux.c2
-rw-r--r--drivers/spi/spi-bitbang.c13
-rw-r--r--drivers/spi/spi-cadence.c84
-rw-r--r--drivers/spi/spi-clps711x.c23
-rw-r--r--drivers/spi/spi-davinci.c54
-rw-r--r--drivers/spi/spi-dw-mmio.c22
-rw-r--r--drivers/spi/spi-dw.c18
-rw-r--r--drivers/spi/spi-fsl-dspi.c40
-rw-r--r--drivers/spi/spi-fsl-qspi.c966
-rw-r--r--drivers/spi/spi-geni-qcom.c56
-rw-r--r--drivers/spi/spi-mem.c69
-rw-r--r--drivers/spi/spi-mxs.c5
-rw-r--r--drivers/spi/spi-nxp-fspi.c1106
-rw-r--r--drivers/spi/spi-pl022.c30
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c58
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c4
-rw-r--r--drivers/spi/spi-pxa2xx.c158
-rw-r--r--drivers/spi/spi-pxa2xx.h4
-rw-r--r--drivers/spi/spi-rspi.c170
-rw-r--r--drivers/spi/spi-sh-hspi.c39
-rw-r--r--drivers/spi/spi-sh-msiof.c184
-rw-r--r--drivers/spi/spi-sifive.c448
-rw-r--r--drivers/spi/spi-sprd.c344
-rw-r--r--drivers/spi/spi-stm32.c1403
-rw-r--r--drivers/spi/spi.c115
30 files changed, 4799 insertions, 1141 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 9f89cb134549..f761655e2a36 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -63,7 +63,7 @@ config SPI_ALTERA
63 63
64config SPI_ATH79 64config SPI_ATH79
65 tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver" 65 tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver"
66 depends on ATH79 && GPIOLIB 66 depends on ATH79 || COMPILE_TEST
67 select SPI_BITBANG 67 select SPI_BITBANG
68 help 68 help
69 This enables support for the SPI controller present on the 69 This enables support for the SPI controller present on the
@@ -268,6 +268,27 @@ config SPI_FSL_LPSPI
268 help 268 help
269 This enables Freescale i.MX LPSPI controllers in master mode. 269 This enables Freescale i.MX LPSPI controllers in master mode.
270 270
271config SPI_FSL_QUADSPI
272 tristate "Freescale QSPI controller"
273 depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
274 depends on HAS_IOMEM
275 help
276 This enables support for the Quad SPI controller in master mode.
277 Up to four flash chips can be connected on two buses with two
278 chipselects each.
279 This controller does not support generic SPI messages. It only
280 supports the high-level SPI memory interface.
281
282config SPI_NXP_FLEXSPI
283 tristate "NXP Flex SPI controller"
284 depends on ARCH_LAYERSCAPE || HAS_IOMEM
285 help
286 This enables support for the Flex SPI controller in master mode.
287 Up to four slave devices can be connected on two buses with two
288 chipselects each.
289 This controller does not support generic SPI messages and only
290 supports the high-level SPI memory interface.
291
271config SPI_GPIO 292config SPI_GPIO
272 tristate "GPIO-based bitbanging SPI Master" 293 tristate "GPIO-based bitbanging SPI Master"
273 depends on GPIOLIB || COMPILE_TEST 294 depends on GPIOLIB || COMPILE_TEST
@@ -296,8 +317,7 @@ config SPI_IMX
296 depends on ARCH_MXC || COMPILE_TEST 317 depends on ARCH_MXC || COMPILE_TEST
297 select SPI_BITBANG 318 select SPI_BITBANG
298 help 319 help
299 This enables using the Freescale i.MX SPI controllers in master 320 This enables support for the Freescale i.MX SPI controllers.
300 mode.
301 321
302config SPI_JCORE 322config SPI_JCORE
303 tristate "J-Core SPI Master" 323 tristate "J-Core SPI Master"
@@ -372,7 +392,7 @@ config SPI_FSL_DSPI
372 depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || M5441x || COMPILE_TEST 392 depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || M5441x || COMPILE_TEST
373 help 393 help
374 This enables support for the Freescale DSPI controller in master 394 This enables support for the Freescale DSPI controller in master
375 mode. VF610 platform uses the controller. 395 mode. VF610, LS1021A and ColdFire platforms uses the controller.
376 396
377config SPI_FSL_ESPI 397config SPI_FSL_ESPI
378 tristate "Freescale eSPI controller" 398 tristate "Freescale eSPI controller"
@@ -631,6 +651,12 @@ config SPI_SH_HSPI
631 help 651 help
632 SPI driver for SuperH HSPI blocks. 652 SPI driver for SuperH HSPI blocks.
633 653
654config SPI_SIFIVE
655 tristate "SiFive SPI controller"
656 depends on HAS_IOMEM
657 help
658 This exposes the SPI controller IP from SiFive.
659
634config SPI_SIRF 660config SPI_SIRF
635 tristate "CSR SiRFprimaII SPI controller" 661 tristate "CSR SiRFprimaII SPI controller"
636 depends on SIRF_DMA 662 depends on SIRF_DMA
@@ -665,7 +691,7 @@ config SPI_STM32
665 tristate "STMicroelectronics STM32 SPI controller" 691 tristate "STMicroelectronics STM32 SPI controller"
666 depends on ARCH_STM32 || COMPILE_TEST 692 depends on ARCH_STM32 || COMPILE_TEST
667 help 693 help
668 SPI driver for STMicroelectonics STM32 SoCs. 694 SPI driver for STMicroelectronics STM32 SoCs.
669 695
670 STM32 SPI controller supports DMA and PIO modes. When DMA 696 STM32 SPI controller supports DMA and PIO modes. When DMA
671 is not available, the driver automatically falls back to 697 is not available, the driver automatically falls back to
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index f29627040dfb..d8fc03c9faa2 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_SPI_FSL_DSPI) += spi-fsl-dspi.o
45obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o 45obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o
46obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o 46obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o
47obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o 47obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o
48obj-$(CONFIG_SPI_FSL_QUADSPI) += spi-fsl-qspi.o
48obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o 49obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
49obj-$(CONFIG_SPI_GPIO) += spi-gpio.o 50obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
50obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o 51obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
@@ -63,6 +64,7 @@ obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
63obj-$(CONFIG_SPI_MXS) += spi-mxs.o 64obj-$(CONFIG_SPI_MXS) += spi-mxs.o
64obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o 65obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o
65obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o 66obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o
67obj-$(CONFIG_SPI_NXP_FLEXSPI) += spi-nxp-fspi.o
66obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o 68obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o
67spi-octeon-objs := spi-cavium.o spi-cavium-octeon.o 69spi-octeon-objs := spi-cavium.o spi-cavium-octeon.o
68obj-$(CONFIG_SPI_OCTEON) += spi-octeon.o 70obj-$(CONFIG_SPI_OCTEON) += spi-octeon.o
@@ -93,6 +95,7 @@ obj-$(CONFIG_SPI_SH) += spi-sh.o
93obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o 95obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
94obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o 96obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
95obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o 97obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
98obj-$(CONFIG_SPI_SIFIVE) += spi-sifive.o
96obj-$(CONFIG_SPI_SIRF) += spi-sirf.o 99obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
97obj-$(CONFIG_SPI_SLAVE_MT27XX) += spi-slave-mt27xx.o 100obj-$(CONFIG_SPI_SLAVE_MT27XX) += spi-slave-mt27xx.o
98obj-$(CONFIG_SPI_SPRD) += spi-sprd.o 101obj-$(CONFIG_SPI_SPRD) += spi-sprd.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index ddc712410812..fffc21cd5f79 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Driver for Atmel QSPI Controller 3 * Driver for Atmel QSPI Controller
3 * 4 *
@@ -7,31 +8,19 @@
7 * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com> 8 * Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
8 * Author: Piotr Bugalski <bugalski.piotr@gmail.com> 9 * Author: Piotr Bugalski <bugalski.piotr@gmail.com>
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program. If not, see <http://www.gnu.org/licenses/>.
21 *
22 * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale. 11 * This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
23 */ 12 */
24 13
25#include <linux/kernel.h>
26#include <linux/clk.h> 14#include <linux/clk.h>
27#include <linux/module.h>
28#include <linux/platform_device.h>
29#include <linux/delay.h> 15#include <linux/delay.h>
30#include <linux/err.h> 16#include <linux/err.h>
31#include <linux/interrupt.h> 17#include <linux/interrupt.h>
32#include <linux/of.h>
33
34#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/of.h>
22#include <linux/of_platform.h>
23#include <linux/platform_device.h>
35#include <linux/spi/spi-mem.h> 24#include <linux/spi/spi-mem.h>
36 25
37/* QSPI register offsets */ 26/* QSPI register offsets */
@@ -47,7 +36,9 @@
47 36
48#define QSPI_IAR 0x0030 /* Instruction Address Register */ 37#define QSPI_IAR 0x0030 /* Instruction Address Register */
49#define QSPI_ICR 0x0034 /* Instruction Code Register */ 38#define QSPI_ICR 0x0034 /* Instruction Code Register */
39#define QSPI_WICR 0x0034 /* Write Instruction Code Register */
50#define QSPI_IFR 0x0038 /* Instruction Frame Register */ 40#define QSPI_IFR 0x0038 /* Instruction Frame Register */
41#define QSPI_RICR 0x003C /* Read Instruction Code Register */
51 42
52#define QSPI_SMR 0x0040 /* Scrambling Mode Register */ 43#define QSPI_SMR 0x0040 /* Scrambling Mode Register */
53#define QSPI_SKR 0x0044 /* Scrambling Key Register */ 44#define QSPI_SKR 0x0044 /* Scrambling Key Register */
@@ -100,7 +91,7 @@
100#define QSPI_SCR_DLYBS_MASK GENMASK(23, 16) 91#define QSPI_SCR_DLYBS_MASK GENMASK(23, 16)
101#define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK) 92#define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK)
102 93
103/* Bitfields in QSPI_ICR (Instruction Code Register) */ 94/* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */
104#define QSPI_ICR_INST_MASK GENMASK(7, 0) 95#define QSPI_ICR_INST_MASK GENMASK(7, 0)
105#define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK) 96#define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK)
106#define QSPI_ICR_OPT_MASK GENMASK(23, 16) 97#define QSPI_ICR_OPT_MASK GENMASK(23, 16)
@@ -125,14 +116,12 @@
125#define QSPI_IFR_OPTL_4BIT (2 << 8) 116#define QSPI_IFR_OPTL_4BIT (2 << 8)
126#define QSPI_IFR_OPTL_8BIT (3 << 8) 117#define QSPI_IFR_OPTL_8BIT (3 << 8)
127#define QSPI_IFR_ADDRL BIT(10) 118#define QSPI_IFR_ADDRL BIT(10)
128#define QSPI_IFR_TFRTYP_MASK GENMASK(13, 12) 119#define QSPI_IFR_TFRTYP_MEM BIT(12)
129#define QSPI_IFR_TFRTYP_TRSFR_READ (0 << 12) 120#define QSPI_IFR_SAMA5D2_WRITE_TRSFR BIT(13)
130#define QSPI_IFR_TFRTYP_TRSFR_READ_MEM (1 << 12)
131#define QSPI_IFR_TFRTYP_TRSFR_WRITE (2 << 12)
132#define QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM (3 << 13)
133#define QSPI_IFR_CRM BIT(14) 121#define QSPI_IFR_CRM BIT(14)
134#define QSPI_IFR_NBDUM_MASK GENMASK(20, 16) 122#define QSPI_IFR_NBDUM_MASK GENMASK(20, 16)
135#define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK) 123#define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK)
124#define QSPI_IFR_APBTFRTYP_READ BIT(24) /* Defined in SAM9X60 */
136 125
137/* Bitfields in QSPI_SMR (Scrambling Mode Register) */ 126/* Bitfields in QSPI_SMR (Scrambling Mode Register) */
138#define QSPI_SMR_SCREN BIT(0) 127#define QSPI_SMR_SCREN BIT(0)
@@ -148,24 +137,31 @@
148#define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8) 137#define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8)
149#define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC) 138#define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC)
150 139
140struct atmel_qspi_caps {
141 bool has_qspick;
142 bool has_ricr;
143};
151 144
152struct atmel_qspi { 145struct atmel_qspi {
153 void __iomem *regs; 146 void __iomem *regs;
154 void __iomem *mem; 147 void __iomem *mem;
155 struct clk *clk; 148 struct clk *pclk;
149 struct clk *qspick;
156 struct platform_device *pdev; 150 struct platform_device *pdev;
151 const struct atmel_qspi_caps *caps;
157 u32 pending; 152 u32 pending;
153 u32 mr;
158 struct completion cmd_completion; 154 struct completion cmd_completion;
159}; 155};
160 156
161struct qspi_mode { 157struct atmel_qspi_mode {
162 u8 cmd_buswidth; 158 u8 cmd_buswidth;
163 u8 addr_buswidth; 159 u8 addr_buswidth;
164 u8 data_buswidth; 160 u8 data_buswidth;
165 u32 config; 161 u32 config;
166}; 162};
167 163
168static const struct qspi_mode sama5d2_qspi_modes[] = { 164static const struct atmel_qspi_mode atmel_qspi_modes[] = {
169 { 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI }, 165 { 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
170 { 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT }, 166 { 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
171 { 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT }, 167 { 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
@@ -175,19 +171,8 @@ static const struct qspi_mode sama5d2_qspi_modes[] = {
175 { 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD }, 171 { 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
176}; 172};
177 173
178/* Register access functions */ 174static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op,
179static inline u32 qspi_readl(struct atmel_qspi *aq, u32 reg) 175 const struct atmel_qspi_mode *mode)
180{
181 return readl_relaxed(aq->regs + reg);
182}
183
184static inline void qspi_writel(struct atmel_qspi *aq, u32 reg, u32 value)
185{
186 writel_relaxed(value, aq->regs + reg);
187}
188
189static inline bool is_compatible(const struct spi_mem_op *op,
190 const struct qspi_mode *mode)
191{ 176{
192 if (op->cmd.buswidth != mode->cmd_buswidth) 177 if (op->cmd.buswidth != mode->cmd_buswidth)
193 return false; 178 return false;
@@ -201,21 +186,21 @@ static inline bool is_compatible(const struct spi_mem_op *op,
201 return true; 186 return true;
202} 187}
203 188
204static int find_mode(const struct spi_mem_op *op) 189static int atmel_qspi_find_mode(const struct spi_mem_op *op)
205{ 190{
206 u32 i; 191 u32 i;
207 192
208 for (i = 0; i < ARRAY_SIZE(sama5d2_qspi_modes); i++) 193 for (i = 0; i < ARRAY_SIZE(atmel_qspi_modes); i++)
209 if (is_compatible(op, &sama5d2_qspi_modes[i])) 194 if (atmel_qspi_is_compatible(op, &atmel_qspi_modes[i]))
210 return i; 195 return i;
211 196
212 return -1; 197 return -ENOTSUPP;
213} 198}
214 199
215static bool atmel_qspi_supports_op(struct spi_mem *mem, 200static bool atmel_qspi_supports_op(struct spi_mem *mem,
216 const struct spi_mem_op *op) 201 const struct spi_mem_op *op)
217{ 202{
218 if (find_mode(op) < 0) 203 if (atmel_qspi_find_mode(op) < 0)
219 return false; 204 return false;
220 205
221 /* special case not supported by hardware */ 206 /* special case not supported by hardware */
@@ -226,29 +211,37 @@ static bool atmel_qspi_supports_op(struct spi_mem *mem,
226 return true; 211 return true;
227} 212}
228 213
229static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 214static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
215 const struct spi_mem_op *op, u32 *offset)
230{ 216{
231 struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->master); 217 u32 iar, icr, ifr;
232 int mode;
233 u32 dummy_cycles = 0; 218 u32 dummy_cycles = 0;
234 u32 iar, icr, ifr, sr; 219 int mode;
235 int err = 0;
236 220
237 iar = 0; 221 iar = 0;
238 icr = QSPI_ICR_INST(op->cmd.opcode); 222 icr = QSPI_ICR_INST(op->cmd.opcode);
239 ifr = QSPI_IFR_INSTEN; 223 ifr = QSPI_IFR_INSTEN;
240 224
241 qspi_writel(aq, QSPI_MR, QSPI_MR_SMM); 225 mode = atmel_qspi_find_mode(op);
242
243 mode = find_mode(op);
244 if (mode < 0) 226 if (mode < 0)
245 return -ENOTSUPP; 227 return mode;
246 228 ifr |= atmel_qspi_modes[mode].config;
247 ifr |= sama5d2_qspi_modes[mode].config;
248 229
249 if (op->dummy.buswidth && op->dummy.nbytes) 230 if (op->dummy.buswidth && op->dummy.nbytes)
250 dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth; 231 dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
251 232
233 /*
234 * The controller allows 24 and 32-bit addressing while NAND-flash
235 * requires 16-bit long. Handling 8-bit long addresses is done using
236 * the option field. For the 16-bit addresses, the workaround depends
237 * of the number of requested dummy bits. If there are 8 or more dummy
238 * cycles, the address is shifted and sent with the first dummy byte.
239 * Otherwise opcode is disabled and the first byte of the address
240 * contains the command opcode (works only if the opcode and address
241 * use the same buswidth). The limitation is when the 16-bit address is
242 * used without enough dummy cycles and the opcode is using a different
243 * buswidth than the address.
244 */
252 if (op->addr.buswidth) { 245 if (op->addr.buswidth) {
253 switch (op->addr.nbytes) { 246 switch (op->addr.nbytes) {
254 case 0: 247 case 0:
@@ -282,6 +275,9 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
282 } 275 }
283 } 276 }
284 277
278 /* offset of the data access in the QSPI memory space */
279 *offset = iar;
280
285 /* Set number of dummy cycles */ 281 /* Set number of dummy cycles */
286 if (dummy_cycles) 282 if (dummy_cycles)
287 ifr |= QSPI_IFR_NBDUM(dummy_cycles); 283 ifr |= QSPI_IFR_NBDUM(dummy_cycles);
@@ -290,49 +286,82 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
290 if (op->data.nbytes) 286 if (op->data.nbytes)
291 ifr |= QSPI_IFR_DATAEN; 287 ifr |= QSPI_IFR_DATAEN;
292 288
293 if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes) 289 /*
294 ifr |= QSPI_IFR_TFRTYP_TRSFR_READ; 290 * If the QSPI controller is set in regular SPI mode, set it in
295 else 291 * Serial Memory Mode (SMM).
296 ifr |= QSPI_IFR_TFRTYP_TRSFR_WRITE; 292 */
293 if (aq->mr != QSPI_MR_SMM) {
294 writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR);
295 aq->mr = QSPI_MR_SMM;
296 }
297 297
298 /* Clear pending interrupts */ 298 /* Clear pending interrupts */
299 (void)qspi_readl(aq, QSPI_SR); 299 (void)readl_relaxed(aq->regs + QSPI_SR);
300
301 if (aq->caps->has_ricr) {
302 if (!op->addr.nbytes && op->data.dir == SPI_MEM_DATA_IN)
303 ifr |= QSPI_IFR_APBTFRTYP_READ;
300 304
301 /* Set QSPI Instruction Frame registers */ 305 /* Set QSPI Instruction Frame registers */
302 qspi_writel(aq, QSPI_IAR, iar); 306 writel_relaxed(iar, aq->regs + QSPI_IAR);
303 qspi_writel(aq, QSPI_ICR, icr); 307 if (op->data.dir == SPI_MEM_DATA_IN)
304 qspi_writel(aq, QSPI_IFR, ifr); 308 writel_relaxed(icr, aq->regs + QSPI_RICR);
309 else
310 writel_relaxed(icr, aq->regs + QSPI_WICR);
311 writel_relaxed(ifr, aq->regs + QSPI_IFR);
312 } else {
313 if (op->data.dir == SPI_MEM_DATA_OUT)
314 ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR;
315
316 /* Set QSPI Instruction Frame registers */
317 writel_relaxed(iar, aq->regs + QSPI_IAR);
318 writel_relaxed(icr, aq->regs + QSPI_ICR);
319 writel_relaxed(ifr, aq->regs + QSPI_IFR);
320 }
321
322 return 0;
323}
324
325static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
326{
327 struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->master);
328 u32 sr, offset;
329 int err;
330
331 err = atmel_qspi_set_cfg(aq, op, &offset);
332 if (err)
333 return err;
305 334
306 /* Skip to the final steps if there is no data */ 335 /* Skip to the final steps if there is no data */
307 if (op->data.nbytes) { 336 if (op->data.nbytes) {
308 /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */ 337 /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
309 (void)qspi_readl(aq, QSPI_IFR); 338 (void)readl_relaxed(aq->regs + QSPI_IFR);
310 339
311 /* Send/Receive data */ 340 /* Send/Receive data */
312 if (op->data.dir == SPI_MEM_DATA_IN) 341 if (op->data.dir == SPI_MEM_DATA_IN)
313 _memcpy_fromio(op->data.buf.in, 342 _memcpy_fromio(op->data.buf.in, aq->mem + offset,
314 aq->mem + iar, op->data.nbytes); 343 op->data.nbytes);
315 else 344 else
316 _memcpy_toio(aq->mem + iar, 345 _memcpy_toio(aq->mem + offset, op->data.buf.out,
317 op->data.buf.out, op->data.nbytes); 346 op->data.nbytes);
318 347
319 /* Release the chip-select */ 348 /* Release the chip-select */
320 qspi_writel(aq, QSPI_CR, QSPI_CR_LASTXFER); 349 writel_relaxed(QSPI_CR_LASTXFER, aq->regs + QSPI_CR);
321 } 350 }
322 351
323 /* Poll INSTRuction End status */ 352 /* Poll INSTRuction End status */
324 sr = qspi_readl(aq, QSPI_SR); 353 sr = readl_relaxed(aq->regs + QSPI_SR);
325 if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED) 354 if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
326 return err; 355 return err;
327 356
328 /* Wait for INSTRuction End interrupt */ 357 /* Wait for INSTRuction End interrupt */
329 reinit_completion(&aq->cmd_completion); 358 reinit_completion(&aq->cmd_completion);
330 aq->pending = sr & QSPI_SR_CMD_COMPLETED; 359 aq->pending = sr & QSPI_SR_CMD_COMPLETED;
331 qspi_writel(aq, QSPI_IER, QSPI_SR_CMD_COMPLETED); 360 writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IER);
332 if (!wait_for_completion_timeout(&aq->cmd_completion, 361 if (!wait_for_completion_timeout(&aq->cmd_completion,
333 msecs_to_jiffies(1000))) 362 msecs_to_jiffies(1000)))
334 err = -ETIMEDOUT; 363 err = -ETIMEDOUT;
335 qspi_writel(aq, QSPI_IDR, QSPI_SR_CMD_COMPLETED); 364 writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IDR);
336 365
337 return err; 366 return err;
338} 367}
@@ -361,7 +390,7 @@ static int atmel_qspi_setup(struct spi_device *spi)
361 if (!spi->max_speed_hz) 390 if (!spi->max_speed_hz)
362 return -EINVAL; 391 return -EINVAL;
363 392
364 src_rate = clk_get_rate(aq->clk); 393 src_rate = clk_get_rate(aq->pclk);
365 if (!src_rate) 394 if (!src_rate)
366 return -EINVAL; 395 return -EINVAL;
367 396
@@ -371,7 +400,7 @@ static int atmel_qspi_setup(struct spi_device *spi)
371 scbr--; 400 scbr--;
372 401
373 scr = QSPI_SCR_SCBR(scbr); 402 scr = QSPI_SCR_SCBR(scbr);
374 qspi_writel(aq, QSPI_SCR, scr); 403 writel_relaxed(scr, aq->regs + QSPI_SCR);
375 404
376 return 0; 405 return 0;
377} 406}
@@ -379,21 +408,25 @@ static int atmel_qspi_setup(struct spi_device *spi)
379static int atmel_qspi_init(struct atmel_qspi *aq) 408static int atmel_qspi_init(struct atmel_qspi *aq)
380{ 409{
381 /* Reset the QSPI controller */ 410 /* Reset the QSPI controller */
382 qspi_writel(aq, QSPI_CR, QSPI_CR_SWRST); 411 writel_relaxed(QSPI_CR_SWRST, aq->regs + QSPI_CR);
412
413 /* Set the QSPI controller by default in Serial Memory Mode */
414 writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR);
415 aq->mr = QSPI_MR_SMM;
383 416
384 /* Enable the QSPI controller */ 417 /* Enable the QSPI controller */
385 qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIEN); 418 writel_relaxed(QSPI_CR_QSPIEN, aq->regs + QSPI_CR);
386 419
387 return 0; 420 return 0;
388} 421}
389 422
390static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id) 423static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
391{ 424{
392 struct atmel_qspi *aq = (struct atmel_qspi *)dev_id; 425 struct atmel_qspi *aq = dev_id;
393 u32 status, mask, pending; 426 u32 status, mask, pending;
394 427
395 status = qspi_readl(aq, QSPI_SR); 428 status = readl_relaxed(aq->regs + QSPI_SR);
396 mask = qspi_readl(aq, QSPI_IMR); 429 mask = readl_relaxed(aq->regs + QSPI_IMR);
397 pending = status & mask; 430 pending = status & mask;
398 431
399 if (!pending) 432 if (!pending)
@@ -449,44 +482,74 @@ static int atmel_qspi_probe(struct platform_device *pdev)
449 } 482 }
450 483
451 /* Get the peripheral clock */ 484 /* Get the peripheral clock */
452 aq->clk = devm_clk_get(&pdev->dev, NULL); 485 aq->pclk = devm_clk_get(&pdev->dev, "pclk");
453 if (IS_ERR(aq->clk)) { 486 if (IS_ERR(aq->pclk))
487 aq->pclk = devm_clk_get(&pdev->dev, NULL);
488
489 if (IS_ERR(aq->pclk)) {
454 dev_err(&pdev->dev, "missing peripheral clock\n"); 490 dev_err(&pdev->dev, "missing peripheral clock\n");
455 err = PTR_ERR(aq->clk); 491 err = PTR_ERR(aq->pclk);
456 goto exit; 492 goto exit;
457 } 493 }
458 494
459 /* Enable the peripheral clock */ 495 /* Enable the peripheral clock */
460 err = clk_prepare_enable(aq->clk); 496 err = clk_prepare_enable(aq->pclk);
461 if (err) { 497 if (err) {
462 dev_err(&pdev->dev, "failed to enable the peripheral clock\n"); 498 dev_err(&pdev->dev, "failed to enable the peripheral clock\n");
463 goto exit; 499 goto exit;
464 } 500 }
465 501
502 aq->caps = of_device_get_match_data(&pdev->dev);
503 if (!aq->caps) {
504 dev_err(&pdev->dev, "Could not retrieve QSPI caps\n");
505 err = -EINVAL;
506 goto exit;
507 }
508
509 if (aq->caps->has_qspick) {
510 /* Get the QSPI system clock */
511 aq->qspick = devm_clk_get(&pdev->dev, "qspick");
512 if (IS_ERR(aq->qspick)) {
513 dev_err(&pdev->dev, "missing system clock\n");
514 err = PTR_ERR(aq->qspick);
515 goto disable_pclk;
516 }
517
518 /* Enable the QSPI system clock */
519 err = clk_prepare_enable(aq->qspick);
520 if (err) {
521 dev_err(&pdev->dev,
522 "failed to enable the QSPI system clock\n");
523 goto disable_pclk;
524 }
525 }
526
466 /* Request the IRQ */ 527 /* Request the IRQ */
467 irq = platform_get_irq(pdev, 0); 528 irq = platform_get_irq(pdev, 0);
468 if (irq < 0) { 529 if (irq < 0) {
469 dev_err(&pdev->dev, "missing IRQ\n"); 530 dev_err(&pdev->dev, "missing IRQ\n");
470 err = irq; 531 err = irq;
471 goto disable_clk; 532 goto disable_qspick;
472 } 533 }
473 err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt, 534 err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt,
474 0, dev_name(&pdev->dev), aq); 535 0, dev_name(&pdev->dev), aq);
475 if (err) 536 if (err)
476 goto disable_clk; 537 goto disable_qspick;
477 538
478 err = atmel_qspi_init(aq); 539 err = atmel_qspi_init(aq);
479 if (err) 540 if (err)
480 goto disable_clk; 541 goto disable_qspick;
481 542
482 err = spi_register_controller(ctrl); 543 err = spi_register_controller(ctrl);
483 if (err) 544 if (err)
484 goto disable_clk; 545 goto disable_qspick;
485 546
486 return 0; 547 return 0;
487 548
488disable_clk: 549disable_qspick:
489 clk_disable_unprepare(aq->clk); 550 clk_disable_unprepare(aq->qspick);
551disable_pclk:
552 clk_disable_unprepare(aq->pclk);
490exit: 553exit:
491 spi_controller_put(ctrl); 554 spi_controller_put(ctrl);
492 555
@@ -499,8 +562,9 @@ static int atmel_qspi_remove(struct platform_device *pdev)
499 struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); 562 struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
500 563
501 spi_unregister_controller(ctrl); 564 spi_unregister_controller(ctrl);
502 qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIDIS); 565 writel_relaxed(QSPI_CR_QSPIDIS, aq->regs + QSPI_CR);
503 clk_disable_unprepare(aq->clk); 566 clk_disable_unprepare(aq->qspick);
567 clk_disable_unprepare(aq->pclk);
504 return 0; 568 return 0;
505} 569}
506 570
@@ -508,7 +572,8 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev)
508{ 572{
509 struct atmel_qspi *aq = dev_get_drvdata(dev); 573 struct atmel_qspi *aq = dev_get_drvdata(dev);
510 574
511 clk_disable_unprepare(aq->clk); 575 clk_disable_unprepare(aq->qspick);
576 clk_disable_unprepare(aq->pclk);
512 577
513 return 0; 578 return 0;
514} 579}
@@ -517,7 +582,8 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev)
517{ 582{
518 struct atmel_qspi *aq = dev_get_drvdata(dev); 583 struct atmel_qspi *aq = dev_get_drvdata(dev);
519 584
520 clk_prepare_enable(aq->clk); 585 clk_prepare_enable(aq->pclk);
586 clk_prepare_enable(aq->qspick);
521 587
522 return atmel_qspi_init(aq); 588 return atmel_qspi_init(aq);
523} 589}
@@ -525,8 +591,22 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev)
525static SIMPLE_DEV_PM_OPS(atmel_qspi_pm_ops, atmel_qspi_suspend, 591static SIMPLE_DEV_PM_OPS(atmel_qspi_pm_ops, atmel_qspi_suspend,
526 atmel_qspi_resume); 592 atmel_qspi_resume);
527 593
594static const struct atmel_qspi_caps atmel_sama5d2_qspi_caps = {};
595
596static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = {
597 .has_qspick = true,
598 .has_ricr = true,
599};
600
528static const struct of_device_id atmel_qspi_dt_ids[] = { 601static const struct of_device_id atmel_qspi_dt_ids[] = {
529 { .compatible = "atmel,sama5d2-qspi" }, 602 {
603 .compatible = "atmel,sama5d2-qspi",
604 .data = &atmel_sama5d2_qspi_caps,
605 },
606 {
607 .compatible = "microchip,sam9x60-qspi",
608 .data = &atmel_sam9x60_qspi_caps,
609 },
530 { /* sentinel */ } 610 { /* sentinel */ }
531}; 611};
532 612
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 3f6b657394de..847f354ebef1 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -21,18 +21,26 @@
21#include <linux/spi/spi.h> 21#include <linux/spi/spi.h>
22#include <linux/spi/spi_bitbang.h> 22#include <linux/spi/spi_bitbang.h>
23#include <linux/bitops.h> 23#include <linux/bitops.h>
24#include <linux/gpio.h>
25#include <linux/clk.h> 24#include <linux/clk.h>
26#include <linux/err.h> 25#include <linux/err.h>
27 26#include <linux/platform_data/spi-ath79.h>
28#include <asm/mach-ath79/ar71xx_regs.h>
29#include <asm/mach-ath79/ath79_spi_platform.h>
30 27
31#define DRV_NAME "ath79-spi" 28#define DRV_NAME "ath79-spi"
32 29
33#define ATH79_SPI_RRW_DELAY_FACTOR 12000 30#define ATH79_SPI_RRW_DELAY_FACTOR 12000
34#define MHZ (1000 * 1000) 31#define MHZ (1000 * 1000)
35 32
33#define AR71XX_SPI_REG_FS 0x00 /* Function Select */
34#define AR71XX_SPI_REG_CTRL 0x04 /* SPI Control */
35#define AR71XX_SPI_REG_IOC 0x08 /* SPI I/O Control */
36#define AR71XX_SPI_REG_RDS 0x0c /* Read Data Shift */
37
38#define AR71XX_SPI_FS_GPIO BIT(0) /* Enable GPIO mode */
39
40#define AR71XX_SPI_IOC_DO BIT(0) /* Data Out pin */
41#define AR71XX_SPI_IOC_CLK BIT(8) /* CLK pin */
42#define AR71XX_SPI_IOC_CS(n) BIT(16 + (n))
43
36struct ath79_spi { 44struct ath79_spi {
37 struct spi_bitbang bitbang; 45 struct spi_bitbang bitbang;
38 u32 ioc_base; 46 u32 ioc_base;
@@ -67,31 +75,14 @@ static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
67{ 75{
68 struct ath79_spi *sp = ath79_spidev_to_sp(spi); 76 struct ath79_spi *sp = ath79_spidev_to_sp(spi);
69 int cs_high = (spi->mode & SPI_CS_HIGH) ? is_active : !is_active; 77 int cs_high = (spi->mode & SPI_CS_HIGH) ? is_active : !is_active;
78 u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select);
70 79
71 if (is_active) { 80 if (cs_high)
72 /* set initial clock polarity */ 81 sp->ioc_base |= cs_bit;
73 if (spi->mode & SPI_CPOL) 82 else
74 sp->ioc_base |= AR71XX_SPI_IOC_CLK; 83 sp->ioc_base &= ~cs_bit;
75 else
76 sp->ioc_base &= ~AR71XX_SPI_IOC_CLK;
77
78 ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
79 }
80
81 if (gpio_is_valid(spi->cs_gpio)) {
82 /* SPI is normally active-low */
83 gpio_set_value_cansleep(spi->cs_gpio, cs_high);
84 } else {
85 u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select);
86
87 if (cs_high)
88 sp->ioc_base |= cs_bit;
89 else
90 sp->ioc_base &= ~cs_bit;
91
92 ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
93 }
94 84
85 ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
95} 86}
96 87
97static void ath79_spi_enable(struct ath79_spi *sp) 88static void ath79_spi_enable(struct ath79_spi *sp)
@@ -103,6 +94,9 @@ static void ath79_spi_enable(struct ath79_spi *sp)
103 sp->reg_ctrl = ath79_spi_rr(sp, AR71XX_SPI_REG_CTRL); 94 sp->reg_ctrl = ath79_spi_rr(sp, AR71XX_SPI_REG_CTRL);
104 sp->ioc_base = ath79_spi_rr(sp, AR71XX_SPI_REG_IOC); 95 sp->ioc_base = ath79_spi_rr(sp, AR71XX_SPI_REG_IOC);
105 96
97 /* clear clk and mosi in the base state */
98 sp->ioc_base &= ~(AR71XX_SPI_IOC_DO | AR71XX_SPI_IOC_CLK);
99
106 /* TODO: setup speed? */ 100 /* TODO: setup speed? */
107 ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43); 101 ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43);
108} 102}
@@ -115,66 +109,6 @@ static void ath79_spi_disable(struct ath79_spi *sp)
115 ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0); 109 ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
116} 110}
117 111
118static int ath79_spi_setup_cs(struct spi_device *spi)
119{
120 struct ath79_spi *sp = ath79_spidev_to_sp(spi);
121 int status;
122
123 status = 0;
124 if (gpio_is_valid(spi->cs_gpio)) {
125 unsigned long flags;
126
127 flags = GPIOF_DIR_OUT;
128 if (spi->mode & SPI_CS_HIGH)
129 flags |= GPIOF_INIT_LOW;
130 else
131 flags |= GPIOF_INIT_HIGH;
132
133 status = gpio_request_one(spi->cs_gpio, flags,
134 dev_name(&spi->dev));
135 } else {
136 u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select);
137
138 if (spi->mode & SPI_CS_HIGH)
139 sp->ioc_base &= ~cs_bit;
140 else
141 sp->ioc_base |= cs_bit;
142
143 ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
144 }
145
146 return status;
147}
148
149static void ath79_spi_cleanup_cs(struct spi_device *spi)
150{
151 if (gpio_is_valid(spi->cs_gpio))
152 gpio_free(spi->cs_gpio);
153}
154
155static int ath79_spi_setup(struct spi_device *spi)
156{
157 int status = 0;
158
159 if (!spi->controller_state) {
160 status = ath79_spi_setup_cs(spi);
161 if (status)
162 return status;
163 }
164
165 status = spi_bitbang_setup(spi);
166 if (status && !spi->controller_state)
167 ath79_spi_cleanup_cs(spi);
168
169 return status;
170}
171
172static void ath79_spi_cleanup(struct spi_device *spi)
173{
174 ath79_spi_cleanup_cs(spi);
175 spi_bitbang_cleanup(spi);
176}
177
178static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned int nsecs, 112static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned int nsecs,
179 u32 word, u8 bits, unsigned flags) 113 u32 word, u8 bits, unsigned flags)
180{ 114{
@@ -225,9 +159,10 @@ static int ath79_spi_probe(struct platform_device *pdev)
225 159
226 pdata = dev_get_platdata(&pdev->dev); 160 pdata = dev_get_platdata(&pdev->dev);
227 161
162 master->use_gpio_descriptors = true;
228 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32); 163 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
229 master->setup = ath79_spi_setup; 164 master->setup = spi_bitbang_setup;
230 master->cleanup = ath79_spi_cleanup; 165 master->cleanup = spi_bitbang_cleanup;
231 if (pdata) { 166 if (pdata) {
232 master->bus_num = pdata->bus_num; 167 master->bus_num = pdata->bus_num;
233 master->num_chipselect = pdata->num_chipselect; 168 master->num_chipselect = pdata->num_chipselect;
@@ -236,7 +171,6 @@ static int ath79_spi_probe(struct platform_device *pdev)
236 sp->bitbang.master = master; 171 sp->bitbang.master = master;
237 sp->bitbang.chipselect = ath79_spi_chipselect; 172 sp->bitbang.chipselect = ath79_spi_chipselect;
238 sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0; 173 sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0;
239 sp->bitbang.setup_transfer = spi_bitbang_setup_transfer;
240 sp->bitbang.flags = SPI_CS_HIGH; 174 sp->bitbang.flags = SPI_CS_HIGH;
241 175
242 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 176 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 74fddcd3282b..4954f0ab1606 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -23,8 +23,7 @@
23#include <linux/of.h> 23#include <linux/of.h>
24 24
25#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/gpio.h> 26#include <linux/gpio/consumer.h>
27#include <linux/of_gpio.h>
28#include <linux/pinctrl/consumer.h> 27#include <linux/pinctrl/consumer.h>
29#include <linux/pm_runtime.h> 28#include <linux/pm_runtime.h>
30 29
@@ -312,7 +311,7 @@ struct atmel_spi {
312 311
313/* Controller-specific per-slave state */ 312/* Controller-specific per-slave state */
314struct atmel_spi_device { 313struct atmel_spi_device {
315 unsigned int npcs_pin; 314 struct gpio_desc *npcs_pin;
316 u32 csr; 315 u32 csr;
317}; 316};
318 317
@@ -355,7 +354,6 @@ static bool atmel_spi_is_v2(struct atmel_spi *as)
355static void cs_activate(struct atmel_spi *as, struct spi_device *spi) 354static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
356{ 355{
357 struct atmel_spi_device *asd = spi->controller_state; 356 struct atmel_spi_device *asd = spi->controller_state;
358 unsigned active = spi->mode & SPI_CS_HIGH;
359 u32 mr; 357 u32 mr;
360 358
361 if (atmel_spi_is_v2(as)) { 359 if (atmel_spi_is_v2(as)) {
@@ -379,7 +377,7 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
379 377
380 mr = spi_readl(as, MR); 378 mr = spi_readl(as, MR);
381 if (as->use_cs_gpios) 379 if (as->use_cs_gpios)
382 gpio_set_value(asd->npcs_pin, active); 380 gpiod_set_value(asd->npcs_pin, 1);
383 } else { 381 } else {
384 u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0; 382 u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
385 int i; 383 int i;
@@ -396,19 +394,16 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
396 mr = spi_readl(as, MR); 394 mr = spi_readl(as, MR);
397 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr); 395 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
398 if (as->use_cs_gpios && spi->chip_select != 0) 396 if (as->use_cs_gpios && spi->chip_select != 0)
399 gpio_set_value(asd->npcs_pin, active); 397 gpiod_set_value(asd->npcs_pin, 1);
400 spi_writel(as, MR, mr); 398 spi_writel(as, MR, mr);
401 } 399 }
402 400
403 dev_dbg(&spi->dev, "activate %u%s, mr %08x\n", 401 dev_dbg(&spi->dev, "activate NPCS, mr %08x\n", mr);
404 asd->npcs_pin, active ? " (high)" : "",
405 mr);
406} 402}
407 403
408static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi) 404static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
409{ 405{
410 struct atmel_spi_device *asd = spi->controller_state; 406 struct atmel_spi_device *asd = spi->controller_state;
411 unsigned active = spi->mode & SPI_CS_HIGH;
412 u32 mr; 407 u32 mr;
413 408
414 /* only deactivate *this* device; sometimes transfers to 409 /* only deactivate *this* device; sometimes transfers to
@@ -420,14 +415,12 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
420 spi_writel(as, MR, mr); 415 spi_writel(as, MR, mr);
421 } 416 }
422 417
423 dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n", 418 dev_dbg(&spi->dev, "DEactivate NPCS, mr %08x\n", mr);
424 asd->npcs_pin, active ? " (low)" : "",
425 mr);
426 419
427 if (!as->use_cs_gpios) 420 if (!as->use_cs_gpios)
428 spi_writel(as, CR, SPI_BIT(LASTXFER)); 421 spi_writel(as, CR, SPI_BIT(LASTXFER));
429 else if (atmel_spi_is_v2(as) || spi->chip_select != 0) 422 else if (atmel_spi_is_v2(as) || spi->chip_select != 0)
430 gpio_set_value(asd->npcs_pin, !active); 423 gpiod_set_value(asd->npcs_pin, 0);
431} 424}
432 425
433static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock) 426static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock)
@@ -1188,7 +1181,6 @@ static int atmel_spi_setup(struct spi_device *spi)
1188 struct atmel_spi_device *asd; 1181 struct atmel_spi_device *asd;
1189 u32 csr; 1182 u32 csr;
1190 unsigned int bits = spi->bits_per_word; 1183 unsigned int bits = spi->bits_per_word;
1191 unsigned int npcs_pin;
1192 1184
1193 as = spi_master_get_devdata(spi->master); 1185 as = spi_master_get_devdata(spi->master);
1194 1186
@@ -1209,21 +1201,14 @@ static int atmel_spi_setup(struct spi_device *spi)
1209 csr |= SPI_BIT(CSAAT); 1201 csr |= SPI_BIT(CSAAT);
1210 1202
1211 /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs. 1203 /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs.
1212 *
1213 * DLYBCT would add delays between words, slowing down transfers.
1214 * It could potentially be useful to cope with DMA bottlenecks, but
1215 * in those cases it's probably best to just use a lower bitrate.
1216 */ 1204 */
1217 csr |= SPI_BF(DLYBS, 0); 1205 csr |= SPI_BF(DLYBS, 0);
1218 csr |= SPI_BF(DLYBCT, 0);
1219
1220 /* chipselect must have been muxed as GPIO (e.g. in board setup) */
1221 npcs_pin = (unsigned long)spi->controller_data;
1222 1206
1223 if (!as->use_cs_gpios) 1207 /* DLYBCT adds delays between words. This is useful for slow devices
1224 npcs_pin = spi->chip_select; 1208 * that need a bit of time to setup the next transfer.
1225 else if (gpio_is_valid(spi->cs_gpio)) 1209 */
1226 npcs_pin = spi->cs_gpio; 1210 csr |= SPI_BF(DLYBCT,
1211 (as->spi_clk / 1000000 * spi->word_delay_usecs) >> 5);
1227 1212
1228 asd = spi->controller_state; 1213 asd = spi->controller_state;
1229 if (!asd) { 1214 if (!asd) {
@@ -1231,11 +1216,21 @@ static int atmel_spi_setup(struct spi_device *spi)
1231 if (!asd) 1216 if (!asd)
1232 return -ENOMEM; 1217 return -ENOMEM;
1233 1218
1234 if (as->use_cs_gpios) 1219 /*
1235 gpio_direction_output(npcs_pin, 1220 * If use_cs_gpios is true this means that we have "cs-gpios"
1236 !(spi->mode & SPI_CS_HIGH)); 1221 * defined in the device tree node so we should have
1222 * gotten the GPIO lines from the device tree inside the
1223 * SPI core. Warn if this is not the case but continue since
1224 * CS GPIOs are after all optional.
1225 */
1226 if (as->use_cs_gpios) {
1227 if (!spi->cs_gpiod) {
1228 dev_err(&spi->dev,
1229 "host claims to use CS GPIOs but no CS found in DT by the SPI core\n");
1230 }
1231 asd->npcs_pin = spi->cs_gpiod;
1232 }
1237 1233
1238 asd->npcs_pin = npcs_pin;
1239 spi->controller_state = asd; 1234 spi->controller_state = asd;
1240 } 1235 }
1241 1236
@@ -1473,41 +1468,6 @@ static void atmel_get_caps(struct atmel_spi *as)
1473 as->caps.has_pdc_support = version < 0x212; 1468 as->caps.has_pdc_support = version < 0x212;
1474} 1469}
1475 1470
1476/*-------------------------------------------------------------------------*/
1477static int atmel_spi_gpio_cs(struct platform_device *pdev)
1478{
1479 struct spi_master *master = platform_get_drvdata(pdev);
1480 struct atmel_spi *as = spi_master_get_devdata(master);
1481 struct device_node *np = master->dev.of_node;
1482 int i;
1483 int ret = 0;
1484 int nb = 0;
1485
1486 if (!as->use_cs_gpios)
1487 return 0;
1488
1489 if (!np)
1490 return 0;
1491
1492 nb = of_gpio_named_count(np, "cs-gpios");
1493 for (i = 0; i < nb; i++) {
1494 int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
1495 "cs-gpios", i);
1496
1497 if (cs_gpio == -EPROBE_DEFER)
1498 return cs_gpio;
1499
1500 if (gpio_is_valid(cs_gpio)) {
1501 ret = devm_gpio_request(&pdev->dev, cs_gpio,
1502 dev_name(&pdev->dev));
1503 if (ret)
1504 return ret;
1505 }
1506 }
1507
1508 return 0;
1509}
1510
1511static void atmel_spi_init(struct atmel_spi *as) 1471static void atmel_spi_init(struct atmel_spi *as)
1512{ 1472{
1513 spi_writel(as, CR, SPI_BIT(SWRST)); 1473 spi_writel(as, CR, SPI_BIT(SWRST));
@@ -1560,6 +1520,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
1560 goto out_free; 1520 goto out_free;
1561 1521
1562 /* the spi->mode bits understood by this driver: */ 1522 /* the spi->mode bits understood by this driver: */
1523 master->use_gpio_descriptors = true;
1563 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1524 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1564 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16); 1525 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
1565 master->dev.of_node = pdev->dev.of_node; 1526 master->dev.of_node = pdev->dev.of_node;
@@ -1592,6 +1553,11 @@ static int atmel_spi_probe(struct platform_device *pdev)
1592 1553
1593 atmel_get_caps(as); 1554 atmel_get_caps(as);
1594 1555
1556 /*
1557 * If there are chip selects in the device tree, those will be
1558 * discovered by the SPI core when registering the SPI master
1559 * and assigned to each SPI device.
1560 */
1595 as->use_cs_gpios = true; 1561 as->use_cs_gpios = true;
1596 if (atmel_spi_is_v2(as) && 1562 if (atmel_spi_is_v2(as) &&
1597 pdev->dev.of_node && 1563 pdev->dev.of_node &&
@@ -1600,10 +1566,6 @@ static int atmel_spi_probe(struct platform_device *pdev)
1600 master->num_chipselect = 4; 1566 master->num_chipselect = 4;
1601 } 1567 }
1602 1568
1603 ret = atmel_spi_gpio_cs(pdev);
1604 if (ret)
1605 goto out_unmap_regs;
1606
1607 as->use_dma = false; 1569 as->use_dma = false;
1608 as->use_pdc = false; 1570 as->use_pdc = false;
1609 if (as->caps.has_dma_support) { 1571 if (as->caps.has_dma_support) {
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 671e374e1b01..f7e054848ca5 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -456,7 +456,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
456 } 456 }
457 457
458 bs->clk = devm_clk_get(&pdev->dev, NULL); 458 bs->clk = devm_clk_get(&pdev->dev, NULL);
459 if ((!bs->clk) || (IS_ERR(bs->clk))) { 459 if (IS_ERR(bs->clk)) {
460 err = PTR_ERR(bs->clk); 460 err = PTR_ERR(bs->clk);
461 dev_err(&pdev->dev, "could not get clk: %d\n", err); 461 dev_err(&pdev->dev, "could not get clk: %d\n", err);
462 goto out_master_put; 462 goto out_master_put;
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index f29176000b8d..dd9a8c54a693 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -213,19 +213,6 @@ int spi_bitbang_setup(struct spi_device *spi)
213 213
214 dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs); 214 dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
215 215
216 /* NOTE we _need_ to call chipselect() early, ideally with adapter
217 * setup, unless the hardware defaults cooperate to avoid confusion
218 * between normal (active low) and inverted chipselects.
219 */
220
221 /* deselect chip (low or high) */
222 mutex_lock(&bitbang->lock);
223 if (!bitbang->busy) {
224 bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
225 ndelay(cs->nsecs);
226 }
227 mutex_unlock(&bitbang->lock);
228
229 return 0; 216 return 0;
230} 217}
231EXPORT_SYMBOL_GPL(spi_bitbang_setup); 218EXPORT_SYMBOL_GPL(spi_bitbang_setup);
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
index 7c88f74f7f47..43d0e79842ac 100644
--- a/drivers/spi/spi-cadence.c
+++ b/drivers/spi/spi-cadence.c
@@ -13,7 +13,7 @@
13 13
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/gpio.h> 16#include <linux/gpio/consumer.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/module.h> 19#include <linux/module.h>
@@ -128,10 +128,6 @@ struct cdns_spi {
128 u32 is_decoded_cs; 128 u32 is_decoded_cs;
129}; 129};
130 130
131struct cdns_spi_device_data {
132 bool gpio_requested;
133};
134
135/* Macros for the SPI controller read/write */ 131/* Macros for the SPI controller read/write */
136static inline u32 cdns_spi_read(struct cdns_spi *xspi, u32 offset) 132static inline u32 cdns_spi_read(struct cdns_spi *xspi, u32 offset)
137{ 133{
@@ -176,16 +172,16 @@ static void cdns_spi_init_hw(struct cdns_spi *xspi)
176/** 172/**
177 * cdns_spi_chipselect - Select or deselect the chip select line 173 * cdns_spi_chipselect - Select or deselect the chip select line
178 * @spi: Pointer to the spi_device structure 174 * @spi: Pointer to the spi_device structure
179 * @is_high: Select(0) or deselect (1) the chip select line 175 * @enable: Select (1) or deselect (0) the chip select line
180 */ 176 */
181static void cdns_spi_chipselect(struct spi_device *spi, bool is_high) 177static void cdns_spi_chipselect(struct spi_device *spi, bool enable)
182{ 178{
183 struct cdns_spi *xspi = spi_master_get_devdata(spi->master); 179 struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
184 u32 ctrl_reg; 180 u32 ctrl_reg;
185 181
186 ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR); 182 ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
187 183
188 if (is_high) { 184 if (!enable) {
189 /* Deselect the slave */ 185 /* Deselect the slave */
190 ctrl_reg |= CDNS_SPI_CR_SSCTRL; 186 ctrl_reg |= CDNS_SPI_CR_SSCTRL;
191 } else { 187 } else {
@@ -469,64 +465,6 @@ static int cdns_unprepare_transfer_hardware(struct spi_master *master)
469 return 0; 465 return 0;
470} 466}
471 467
472static int cdns_spi_setup(struct spi_device *spi)
473{
474
475 int ret = -EINVAL;
476 struct cdns_spi_device_data *cdns_spi_data = spi_get_ctldata(spi);
477
478 /* this is a pin managed by the controller, leave it alone */
479 if (spi->cs_gpio == -ENOENT)
480 return 0;
481
482 /* this seems to be the first time we're here */
483 if (!cdns_spi_data) {
484 cdns_spi_data = kzalloc(sizeof(*cdns_spi_data), GFP_KERNEL);
485 if (!cdns_spi_data)
486 return -ENOMEM;
487 cdns_spi_data->gpio_requested = false;
488 spi_set_ctldata(spi, cdns_spi_data);
489 }
490
491 /* if we haven't done so, grab the gpio */
492 if (!cdns_spi_data->gpio_requested && gpio_is_valid(spi->cs_gpio)) {
493 ret = gpio_request_one(spi->cs_gpio,
494 (spi->mode & SPI_CS_HIGH) ?
495 GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH,
496 dev_name(&spi->dev));
497 if (ret)
498 dev_err(&spi->dev, "can't request chipselect gpio %d\n",
499 spi->cs_gpio);
500 else
501 cdns_spi_data->gpio_requested = true;
502 } else {
503 if (gpio_is_valid(spi->cs_gpio)) {
504 int mode = ((spi->mode & SPI_CS_HIGH) ?
505 GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH);
506
507 ret = gpio_direction_output(spi->cs_gpio, mode);
508 if (ret)
509 dev_err(&spi->dev, "chipselect gpio %d setup failed (%d)\n",
510 spi->cs_gpio, ret);
511 }
512 }
513
514 return ret;
515}
516
517static void cdns_spi_cleanup(struct spi_device *spi)
518{
519 struct cdns_spi_device_data *cdns_spi_data = spi_get_ctldata(spi);
520
521 if (cdns_spi_data) {
522 if (cdns_spi_data->gpio_requested)
523 gpio_free(spi->cs_gpio);
524 kfree(cdns_spi_data);
525 spi_set_ctldata(spi, NULL);
526 }
527
528}
529
530/** 468/**
531 * cdns_spi_probe - Probe method for the SPI driver 469 * cdns_spi_probe - Probe method for the SPI driver
532 * @pdev: Pointer to the platform_device structure 470 * @pdev: Pointer to the platform_device structure
@@ -584,11 +522,6 @@ static int cdns_spi_probe(struct platform_device *pdev)
584 goto clk_dis_apb; 522 goto clk_dis_apb;
585 } 523 }
586 524
587 pm_runtime_use_autosuspend(&pdev->dev);
588 pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
589 pm_runtime_set_active(&pdev->dev);
590 pm_runtime_enable(&pdev->dev);
591
592 ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs); 525 ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
593 if (ret < 0) 526 if (ret < 0)
594 master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS; 527 master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
@@ -603,8 +536,10 @@ static int cdns_spi_probe(struct platform_device *pdev)
603 /* SPI controller initializations */ 536 /* SPI controller initializations */
604 cdns_spi_init_hw(xspi); 537 cdns_spi_init_hw(xspi);
605 538
606 pm_runtime_mark_last_busy(&pdev->dev); 539 pm_runtime_set_active(&pdev->dev);
607 pm_runtime_put_autosuspend(&pdev->dev); 540 pm_runtime_enable(&pdev->dev);
541 pm_runtime_use_autosuspend(&pdev->dev);
542 pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
608 543
609 irq = platform_get_irq(pdev, 0); 544 irq = platform_get_irq(pdev, 0);
610 if (irq <= 0) { 545 if (irq <= 0) {
@@ -621,13 +556,12 @@ static int cdns_spi_probe(struct platform_device *pdev)
621 goto clk_dis_all; 556 goto clk_dis_all;
622 } 557 }
623 558
559 master->use_gpio_descriptors = true;
624 master->prepare_transfer_hardware = cdns_prepare_transfer_hardware; 560 master->prepare_transfer_hardware = cdns_prepare_transfer_hardware;
625 master->prepare_message = cdns_prepare_message; 561 master->prepare_message = cdns_prepare_message;
626 master->transfer_one = cdns_transfer_one; 562 master->transfer_one = cdns_transfer_one;
627 master->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware; 563 master->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware;
628 master->set_cs = cdns_spi_chipselect; 564 master->set_cs = cdns_spi_chipselect;
629 master->setup = cdns_spi_setup;
630 master->cleanup = cdns_spi_cleanup;
631 master->auto_runtime_pm = true; 565 master->auto_runtime_pm = true;
632 master->mode_bits = SPI_CPOL | SPI_CPHA; 566 master->mode_bits = SPI_CPOL | SPI_CPHA;
633 567
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index 18193df2eba8..8c03c409fc07 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -11,7 +11,7 @@
11 11
12#include <linux/io.h> 12#include <linux/io.h>
13#include <linux/clk.h> 13#include <linux/clk.h>
14#include <linux/gpio.h> 14#include <linux/gpio/consumer.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
@@ -36,25 +36,6 @@ struct spi_clps711x_data {
36 int len; 36 int len;
37}; 37};
38 38
39static int spi_clps711x_setup(struct spi_device *spi)
40{
41 if (!spi->controller_state) {
42 int ret;
43
44 ret = devm_gpio_request(&spi->master->dev, spi->cs_gpio,
45 dev_name(&spi->master->dev));
46 if (ret)
47 return ret;
48
49 spi->controller_state = spi;
50 }
51
52 /* We are expect that SPI-device is not selected */
53 gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
54
55 return 0;
56}
57
58static int spi_clps711x_prepare_message(struct spi_master *master, 39static int spi_clps711x_prepare_message(struct spi_master *master,
59 struct spi_message *msg) 40 struct spi_message *msg)
60{ 41{
@@ -125,11 +106,11 @@ static int spi_clps711x_probe(struct platform_device *pdev)
125 if (!master) 106 if (!master)
126 return -ENOMEM; 107 return -ENOMEM;
127 108
109 master->use_gpio_descriptors = true;
128 master->bus_num = -1; 110 master->bus_num = -1;
129 master->mode_bits = SPI_CPHA | SPI_CS_HIGH; 111 master->mode_bits = SPI_CPHA | SPI_CS_HIGH;
130 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8); 112 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8);
131 master->dev.of_node = pdev->dev.of_node; 113 master->dev.of_node = pdev->dev.of_node;
132 master->setup = spi_clps711x_setup;
133 master->prepare_message = spi_clps711x_prepare_message; 114 master->prepare_message = spi_clps711x_prepare_message;
134 master->transfer_one = spi_clps711x_transfer_one; 115 master->transfer_one = spi_clps711x_transfer_one;
135 116
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 56adec83f8fc..eb246ebcfa3a 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -15,7 +15,7 @@
15 15
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/gpio.h> 18#include <linux/gpio/consumer.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
@@ -25,7 +25,6 @@
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
26#include <linux/of.h> 26#include <linux/of.h>
27#include <linux/of_device.h> 27#include <linux/of_device.h>
28#include <linux/of_gpio.h>
29#include <linux/spi/spi.h> 28#include <linux/spi/spi.h>
30#include <linux/spi/spi_bitbang.h> 29#include <linux/spi/spi_bitbang.h>
31#include <linux/slab.h> 30#include <linux/slab.h>
@@ -222,12 +221,17 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
222 * Board specific chip select logic decides the polarity and cs 221 * Board specific chip select logic decides the polarity and cs
223 * line for the controller 222 * line for the controller
224 */ 223 */
225 if (spi->cs_gpio >= 0) { 224 if (spi->cs_gpiod) {
225 /*
226 * FIXME: is this code ever executed? This host does not
227 * set SPI_MASTER_GPIO_SS so this chipselect callback should
228 * not get called from the SPI core when we are using
229 * GPIOs for chip select.
230 */
226 if (value == BITBANG_CS_ACTIVE) 231 if (value == BITBANG_CS_ACTIVE)
227 gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH); 232 gpiod_set_value(spi->cs_gpiod, 1);
228 else 233 else
229 gpio_set_value(spi->cs_gpio, 234 gpiod_set_value(spi->cs_gpiod, 0);
230 !(spi->mode & SPI_CS_HIGH));
231 } else { 235 } else {
232 if (value == BITBANG_CS_ACTIVE) { 236 if (value == BITBANG_CS_ACTIVE) {
233 if (!(spi->mode & SPI_CS_WORD)) 237 if (!(spi->mode & SPI_CS_WORD))
@@ -418,30 +422,18 @@ static int davinci_spi_of_setup(struct spi_device *spi)
418 */ 422 */
419static int davinci_spi_setup(struct spi_device *spi) 423static int davinci_spi_setup(struct spi_device *spi)
420{ 424{
421 int retval = 0;
422 struct davinci_spi *dspi; 425 struct davinci_spi *dspi;
423 struct spi_master *master = spi->master;
424 struct device_node *np = spi->dev.of_node; 426 struct device_node *np = spi->dev.of_node;
425 bool internal_cs = true; 427 bool internal_cs = true;
426 428
427 dspi = spi_master_get_devdata(spi->master); 429 dspi = spi_master_get_devdata(spi->master);
428 430
429 if (!(spi->mode & SPI_NO_CS)) { 431 if (!(spi->mode & SPI_NO_CS)) {
430 if (np && (master->cs_gpios != NULL) && (spi->cs_gpio >= 0)) { 432 if (np && spi->cs_gpiod)
431 retval = gpio_direction_output(
432 spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
433 internal_cs = false; 433 internal_cs = false;
434 }
435
436 if (retval) {
437 dev_err(&spi->dev, "GPIO %d setup failed (%d)\n",
438 spi->cs_gpio, retval);
439 return retval;
440 }
441 434
442 if (internal_cs) { 435 if (internal_cs)
443 set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select); 436 set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select);
444 }
445 } 437 }
446 438
447 if (spi->mode & SPI_READY) 439 if (spi->mode & SPI_READY)
@@ -962,6 +954,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
962 if (ret) 954 if (ret)
963 goto free_master; 955 goto free_master;
964 956
957 master->use_gpio_descriptors = true;
965 master->dev.of_node = pdev->dev.of_node; 958 master->dev.of_node = pdev->dev.of_node;
966 master->bus_num = pdev->id; 959 master->bus_num = pdev->id;
967 master->num_chipselect = pdata->num_chipselect; 960 master->num_chipselect = pdata->num_chipselect;
@@ -980,27 +973,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
980 if (dspi->version == SPI_VERSION_2) 973 if (dspi->version == SPI_VERSION_2)
981 dspi->bitbang.flags |= SPI_READY; 974 dspi->bitbang.flags |= SPI_READY;
982 975
983 if (pdev->dev.of_node) {
984 int i;
985
986 for (i = 0; i < pdata->num_chipselect; i++) {
987 int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
988 "cs-gpios", i);
989
990 if (cs_gpio == -EPROBE_DEFER) {
991 ret = cs_gpio;
992 goto free_clk;
993 }
994
995 if (gpio_is_valid(cs_gpio)) {
996 ret = devm_gpio_request(&pdev->dev, cs_gpio,
997 dev_name(&pdev->dev));
998 if (ret)
999 goto free_clk;
1000 }
1001 }
1002 }
1003
1004 dspi->bitbang.txrx_bufs = davinci_spi_bufs; 976 dspi->bitbang.txrx_bufs = davinci_spi_bufs;
1005 977
1006 ret = davinci_spi_request_dma(dspi); 978 ret = davinci_spi_request_dma(dspi);
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index d0dd7814e997..4bd59a93d988 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -18,7 +18,6 @@
18#include <linux/mfd/syscon.h> 18#include <linux/mfd/syscon.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/of_gpio.h>
22#include <linux/of_platform.h> 21#include <linux/of_platform.h>
23#include <linux/acpi.h> 22#include <linux/acpi.h>
24#include <linux/property.h> 23#include <linux/property.h>
@@ -185,27 +184,6 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
185 184
186 dws->num_cs = num_cs; 185 dws->num_cs = num_cs;
187 186
188 if (pdev->dev.of_node) {
189 int i;
190
191 for (i = 0; i < dws->num_cs; i++) {
192 int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
193 "cs-gpios", i);
194
195 if (cs_gpio == -EPROBE_DEFER) {
196 ret = cs_gpio;
197 goto out;
198 }
199
200 if (gpio_is_valid(cs_gpio)) {
201 ret = devm_gpio_request(&pdev->dev, cs_gpio,
202 dev_name(&pdev->dev));
203 if (ret)
204 goto out;
205 }
206 }
207 }
208
209 init_func = device_get_match_data(&pdev->dev); 187 init_func = device_get_match_data(&pdev->dev);
210 if (init_func) { 188 if (init_func) {
211 ret = init_func(pdev, dwsmmio); 189 ret = init_func(pdev, dwsmmio);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 4c9deb434b3a..ac81025f86ab 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -20,7 +20,6 @@
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/spi/spi.h> 22#include <linux/spi/spi.h>
23#include <linux/gpio.h>
24 23
25#include "spi-dw.h" 24#include "spi-dw.h"
26 25
@@ -138,11 +137,10 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable)
138 struct dw_spi *dws = spi_controller_get_devdata(spi->controller); 137 struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
139 struct chip_data *chip = spi_get_ctldata(spi); 138 struct chip_data *chip = spi_get_ctldata(spi);
140 139
141 /* Chip select logic is inverted from spi_set_cs() */
142 if (chip && chip->cs_control) 140 if (chip && chip->cs_control)
143 chip->cs_control(!enable); 141 chip->cs_control(enable);
144 142
145 if (!enable) 143 if (enable)
146 dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select)); 144 dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
147 else if (dws->cs_override) 145 else if (dws->cs_override)
148 dw_writel(dws, DW_SPI_SER, 0); 146 dw_writel(dws, DW_SPI_SER, 0);
@@ -317,7 +315,8 @@ static int dw_spi_transfer_one(struct spi_controller *master,
317 /* Default SPI mode is SCPOL = 0, SCPH = 0 */ 315 /* Default SPI mode is SCPOL = 0, SCPH = 0 */
318 cr0 = (transfer->bits_per_word - 1) 316 cr0 = (transfer->bits_per_word - 1)
319 | (chip->type << SPI_FRF_OFFSET) 317 | (chip->type << SPI_FRF_OFFSET)
320 | (spi->mode << SPI_MODE_OFFSET) 318 | ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) |
319 (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET))
321 | (chip->tmode << SPI_TMOD_OFFSET); 320 | (chip->tmode << SPI_TMOD_OFFSET);
322 321
323 /* 322 /*
@@ -397,7 +396,6 @@ static int dw_spi_setup(struct spi_device *spi)
397{ 396{
398 struct dw_spi_chip *chip_info = NULL; 397 struct dw_spi_chip *chip_info = NULL;
399 struct chip_data *chip; 398 struct chip_data *chip;
400 int ret;
401 399
402 /* Only alloc on first setup */ 400 /* Only alloc on first setup */
403 chip = spi_get_ctldata(spi); 401 chip = spi_get_ctldata(spi);
@@ -425,13 +423,6 @@ static int dw_spi_setup(struct spi_device *spi)
425 423
426 chip->tmode = SPI_TMOD_TR; 424 chip->tmode = SPI_TMOD_TR;
427 425
428 if (gpio_is_valid(spi->cs_gpio)) {
429 ret = gpio_direction_output(spi->cs_gpio,
430 !(spi->mode & SPI_CS_HIGH));
431 if (ret)
432 return ret;
433 }
434
435 return 0; 426 return 0;
436} 427}
437 428
@@ -496,6 +487,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
496 goto err_free_master; 487 goto err_free_master;
497 } 488 }
498 489
490 master->use_gpio_descriptors = true;
499 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP; 491 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
500 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); 492 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
501 master->bus_num = dws->bus_num; 493 master->bus_num = dws->bus_num;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 7b605f95dbef..53335ccc98f6 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -233,6 +233,9 @@ static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
233{ 233{
234 u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi); 234 u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
235 235
236 if (spi_controller_is_slave(dspi->master))
237 return data;
238
236 if (dspi->len > 0) 239 if (dspi->len > 0)
237 cmd |= SPI_PUSHR_CMD_CONT; 240 cmd |= SPI_PUSHR_CMD_CONT;
238 return cmd << 16 | data; 241 return cmd << 16 | data;
@@ -329,6 +332,11 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
329 dma_async_issue_pending(dma->chan_rx); 332 dma_async_issue_pending(dma->chan_rx);
330 dma_async_issue_pending(dma->chan_tx); 333 dma_async_issue_pending(dma->chan_tx);
331 334
335 if (spi_controller_is_slave(dspi->master)) {
336 wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete);
337 return 0;
338 }
339
332 time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete, 340 time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
333 DMA_COMPLETION_TIMEOUT); 341 DMA_COMPLETION_TIMEOUT);
334 if (time_left == 0) { 342 if (time_left == 0) {
@@ -798,14 +806,18 @@ static int dspi_setup(struct spi_device *spi)
798 ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate); 806 ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
799 807
800 chip->ctar_val = SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0) 808 chip->ctar_val = SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
801 | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0) 809 | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0);
802 | SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0) 810
803 | SPI_CTAR_PCSSCK(pcssck) 811 if (!spi_controller_is_slave(dspi->master)) {
804 | SPI_CTAR_CSSCK(cssck) 812 chip->ctar_val |= SPI_CTAR_LSBFE(spi->mode &
805 | SPI_CTAR_PASC(pasc) 813 SPI_LSB_FIRST ? 1 : 0)
806 | SPI_CTAR_ASC(asc) 814 | SPI_CTAR_PCSSCK(pcssck)
807 | SPI_CTAR_PBR(pbr) 815 | SPI_CTAR_CSSCK(cssck)
808 | SPI_CTAR_BR(br); 816 | SPI_CTAR_PASC(pasc)
817 | SPI_CTAR_ASC(asc)
818 | SPI_CTAR_PBR(pbr)
819 | SPI_CTAR_BR(br);
820 }
809 821
810 spi_set_ctldata(spi, chip); 822 spi_set_ctldata(spi, chip);
811 823
@@ -970,8 +982,13 @@ static const struct regmap_config dspi_xspi_regmap_config[] = {
970 982
971static void dspi_init(struct fsl_dspi *dspi) 983static void dspi_init(struct fsl_dspi *dspi)
972{ 984{
973 regmap_write(dspi->regmap, SPI_MCR, SPI_MCR_MASTER | SPI_MCR_PCSIS | 985 unsigned int mcr = SPI_MCR_PCSIS |
974 (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0)); 986 (dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0);
987
988 if (!spi_controller_is_slave(dspi->master))
989 mcr |= SPI_MCR_MASTER;
990
991 regmap_write(dspi->regmap, SPI_MCR, mcr);
975 regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); 992 regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
976 if (dspi->devtype_data->xspi_mode) 993 if (dspi->devtype_data->xspi_mode)
977 regmap_write(dspi->regmap, SPI_CTARE(0), 994 regmap_write(dspi->regmap, SPI_CTARE(0),
@@ -1027,6 +1044,9 @@ static int dspi_probe(struct platform_device *pdev)
1027 } 1044 }
1028 master->bus_num = bus_num; 1045 master->bus_num = bus_num;
1029 1046
1047 if (of_property_read_bool(np, "spi-slave"))
1048 master->slave = true;
1049
1030 dspi->devtype_data = of_device_get_match_data(&pdev->dev); 1050 dspi->devtype_data = of_device_get_match_data(&pdev->dev);
1031 if (!dspi->devtype_data) { 1051 if (!dspi->devtype_data) {
1032 dev_err(&pdev->dev, "can't get devtype_data\n"); 1052 dev_err(&pdev->dev, "can't get devtype_data\n");
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
new file mode 100644
index 000000000000..6a713f78a62e
--- /dev/null
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -0,0 +1,966 @@
1// SPDX-License-Identifier: GPL-2.0+
2
3/*
4 * Freescale QuadSPI driver.
5 *
6 * Copyright (C) 2013 Freescale Semiconductor, Inc.
7 * Copyright (C) 2018 Bootlin
8 * Copyright (C) 2018 exceet electronics GmbH
9 * Copyright (C) 2018 Kontron Electronics GmbH
10 *
11 * Transition to SPI MEM interface:
12 * Authors:
13 * Boris Brezillon <bbrezillon@kernel.org>
14 * Frieder Schrempf <frieder.schrempf@kontron.de>
15 * Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
16 * Suresh Gupta <suresh.gupta@nxp.com>
17 *
18 * Based on the original fsl-quadspi.c spi-nor driver:
19 * Author: Freescale Semiconductor, Inc.
20 *
21 */
22
23#include <linux/bitops.h>
24#include <linux/clk.h>
25#include <linux/completion.h>
26#include <linux/delay.h>
27#include <linux/err.h>
28#include <linux/errno.h>
29#include <linux/interrupt.h>
30#include <linux/io.h>
31#include <linux/iopoll.h>
32#include <linux/jiffies.h>
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/mutex.h>
36#include <linux/of.h>
37#include <linux/of_device.h>
38#include <linux/platform_device.h>
39#include <linux/pm_qos.h>
40#include <linux/sizes.h>
41
42#include <linux/spi/spi.h>
43#include <linux/spi/spi-mem.h>
44
45/*
46 * The driver only uses one single LUT entry, that is updated on
47 * each call of exec_op(). Index 0 is preset at boot with a basic
48 * read operation, so let's use the last entry (15).
49 */
50#define SEQID_LUT 15
51
52/* Registers used by the driver */
53#define QUADSPI_MCR 0x00
54#define QUADSPI_MCR_RESERVED_MASK GENMASK(19, 16)
55#define QUADSPI_MCR_MDIS_MASK BIT(14)
56#define QUADSPI_MCR_CLR_TXF_MASK BIT(11)
57#define QUADSPI_MCR_CLR_RXF_MASK BIT(10)
58#define QUADSPI_MCR_DDR_EN_MASK BIT(7)
59#define QUADSPI_MCR_END_CFG_MASK GENMASK(3, 2)
60#define QUADSPI_MCR_SWRSTHD_MASK BIT(1)
61#define QUADSPI_MCR_SWRSTSD_MASK BIT(0)
62
63#define QUADSPI_IPCR 0x08
64#define QUADSPI_IPCR_SEQID(x) ((x) << 24)
65
66#define QUADSPI_BUF3CR 0x1c
67#define QUADSPI_BUF3CR_ALLMST_MASK BIT(31)
68#define QUADSPI_BUF3CR_ADATSZ(x) ((x) << 8)
69#define QUADSPI_BUF3CR_ADATSZ_MASK GENMASK(15, 8)
70
71#define QUADSPI_BFGENCR 0x20
72#define QUADSPI_BFGENCR_SEQID(x) ((x) << 12)
73
74#define QUADSPI_BUF0IND 0x30
75#define QUADSPI_BUF1IND 0x34
76#define QUADSPI_BUF2IND 0x38
77#define QUADSPI_SFAR 0x100
78
79#define QUADSPI_SMPR 0x108
80#define QUADSPI_SMPR_DDRSMP_MASK GENMASK(18, 16)
81#define QUADSPI_SMPR_FSDLY_MASK BIT(6)
82#define QUADSPI_SMPR_FSPHS_MASK BIT(5)
83#define QUADSPI_SMPR_HSENA_MASK BIT(0)
84
85#define QUADSPI_RBCT 0x110
86#define QUADSPI_RBCT_WMRK_MASK GENMASK(4, 0)
87#define QUADSPI_RBCT_RXBRD_USEIPS BIT(8)
88
89#define QUADSPI_TBDR 0x154
90
91#define QUADSPI_SR 0x15c
92#define QUADSPI_SR_IP_ACC_MASK BIT(1)
93#define QUADSPI_SR_AHB_ACC_MASK BIT(2)
94
95#define QUADSPI_FR 0x160
96#define QUADSPI_FR_TFF_MASK BIT(0)
97
98#define QUADSPI_SPTRCLR 0x16c
99#define QUADSPI_SPTRCLR_IPPTRC BIT(8)
100#define QUADSPI_SPTRCLR_BFPTRC BIT(0)
101
102#define QUADSPI_SFA1AD 0x180
103#define QUADSPI_SFA2AD 0x184
104#define QUADSPI_SFB1AD 0x188
105#define QUADSPI_SFB2AD 0x18c
106#define QUADSPI_RBDR(x) (0x200 + ((x) * 4))
107
108#define QUADSPI_LUTKEY 0x300
109#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0
110
111#define QUADSPI_LCKCR 0x304
112#define QUADSPI_LCKER_LOCK BIT(0)
113#define QUADSPI_LCKER_UNLOCK BIT(1)
114
115#define QUADSPI_RSER 0x164
116#define QUADSPI_RSER_TFIE BIT(0)
117
118#define QUADSPI_LUT_BASE 0x310
119#define QUADSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
120#define QUADSPI_LUT_REG(idx) \
121 (QUADSPI_LUT_BASE + QUADSPI_LUT_OFFSET + (idx) * 4)
122
123/* Instruction set for the LUT register */
124#define LUT_STOP 0
125#define LUT_CMD 1
126#define LUT_ADDR 2
127#define LUT_DUMMY 3
128#define LUT_MODE 4
129#define LUT_MODE2 5
130#define LUT_MODE4 6
131#define LUT_FSL_READ 7
132#define LUT_FSL_WRITE 8
133#define LUT_JMP_ON_CS 9
134#define LUT_ADDR_DDR 10
135#define LUT_MODE_DDR 11
136#define LUT_MODE2_DDR 12
137#define LUT_MODE4_DDR 13
138#define LUT_FSL_READ_DDR 14
139#define LUT_FSL_WRITE_DDR 15
140#define LUT_DATA_LEARN 16
141
142/*
143 * The PAD definitions for LUT register.
144 *
145 * The pad stands for the number of IO lines [0:3].
146 * For example, the quad read needs four IO lines,
147 * so you should use LUT_PAD(4).
148 */
149#define LUT_PAD(x) (fls(x) - 1)
150
151/*
152 * Macro for constructing the LUT entries with the following
153 * register layout:
154 *
155 * ---------------------------------------------------
156 * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
157 * ---------------------------------------------------
158 */
159#define LUT_DEF(idx, ins, pad, opr) \
160 ((((ins) << 10) | ((pad) << 8) | (opr)) << (((idx) % 2) * 16))
161
162/* Controller needs driver to swap endianness */
163#define QUADSPI_QUIRK_SWAP_ENDIAN BIT(0)
164
165/* Controller needs 4x internal clock */
166#define QUADSPI_QUIRK_4X_INT_CLK BIT(1)
167
168/*
169 * TKT253890, the controller needs the driver to fill the txfifo with
170 * 16 bytes at least to trigger a data transfer, even though the extra
171 * data won't be transferred.
172 */
173#define QUADSPI_QUIRK_TKT253890 BIT(2)
174
175/* TKT245618, the controller cannot wake up from wait mode */
176#define QUADSPI_QUIRK_TKT245618 BIT(3)
177
178/*
179 * Controller adds QSPI_AMBA_BASE (base address of the mapped memory)
180 * internally. No need to add it when setting SFXXAD and SFAR registers
181 */
182#define QUADSPI_QUIRK_BASE_INTERNAL BIT(4)
183
184struct fsl_qspi_devtype_data {
185 unsigned int rxfifo;
186 unsigned int txfifo;
187 unsigned int ahb_buf_size;
188 unsigned int quirks;
189 bool little_endian;
190};
191
192static const struct fsl_qspi_devtype_data vybrid_data = {
193 .rxfifo = SZ_128,
194 .txfifo = SZ_64,
195 .ahb_buf_size = SZ_1K,
196 .quirks = QUADSPI_QUIRK_SWAP_ENDIAN,
197 .little_endian = true,
198};
199
200static const struct fsl_qspi_devtype_data imx6sx_data = {
201 .rxfifo = SZ_128,
202 .txfifo = SZ_512,
203 .ahb_buf_size = SZ_1K,
204 .quirks = QUADSPI_QUIRK_4X_INT_CLK | QUADSPI_QUIRK_TKT245618,
205 .little_endian = true,
206};
207
208static const struct fsl_qspi_devtype_data imx7d_data = {
209 .rxfifo = SZ_512,
210 .txfifo = SZ_512,
211 .ahb_buf_size = SZ_1K,
212 .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
213 .little_endian = true,
214};
215
216static const struct fsl_qspi_devtype_data imx6ul_data = {
217 .rxfifo = SZ_128,
218 .txfifo = SZ_512,
219 .ahb_buf_size = SZ_1K,
220 .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
221 .little_endian = true,
222};
223
224static const struct fsl_qspi_devtype_data ls1021a_data = {
225 .rxfifo = SZ_128,
226 .txfifo = SZ_64,
227 .ahb_buf_size = SZ_1K,
228 .quirks = 0,
229 .little_endian = false,
230};
231
232static const struct fsl_qspi_devtype_data ls2080a_data = {
233 .rxfifo = SZ_128,
234 .txfifo = SZ_64,
235 .ahb_buf_size = SZ_1K,
236 .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_BASE_INTERNAL,
237 .little_endian = true,
238};
239
240struct fsl_qspi {
241 void __iomem *iobase;
242 void __iomem *ahb_addr;
243 u32 memmap_phy;
244 struct clk *clk, *clk_en;
245 struct device *dev;
246 struct completion c;
247 const struct fsl_qspi_devtype_data *devtype_data;
248 struct mutex lock;
249 struct pm_qos_request pm_qos_req;
250 int selected;
251};
252
253static inline int needs_swap_endian(struct fsl_qspi *q)
254{
255 return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN;
256}
257
258static inline int needs_4x_clock(struct fsl_qspi *q)
259{
260 return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK;
261}
262
263static inline int needs_fill_txfifo(struct fsl_qspi *q)
264{
265 return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890;
266}
267
268static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
269{
270 return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618;
271}
272
273static inline int needs_amba_base_offset(struct fsl_qspi *q)
274{
275 return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL);
276}
277
278/*
279 * An IC bug makes it necessary to rearrange the 32-bit data.
280 * Later chips, such as IMX6SLX, have fixed this bug.
281 */
282static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
283{
284 return needs_swap_endian(q) ? __swab32(a) : a;
285}
286
287/*
288 * R/W functions for big- or little-endian registers:
289 * The QSPI controller's endianness is independent of
290 * the CPU core's endianness. So far, although the CPU
291 * core is little-endian the QSPI controller can use
292 * big-endian or little-endian.
293 */
294static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
295{
296 if (q->devtype_data->little_endian)
297 iowrite32(val, addr);
298 else
299 iowrite32be(val, addr);
300}
301
302static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
303{
304 if (q->devtype_data->little_endian)
305 return ioread32(addr);
306
307 return ioread32be(addr);
308}
309
310static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id)
311{
312 struct fsl_qspi *q = dev_id;
313 u32 reg;
314
315 /* clear interrupt */
316 reg = qspi_readl(q, q->iobase + QUADSPI_FR);
317 qspi_writel(q, reg, q->iobase + QUADSPI_FR);
318
319 if (reg & QUADSPI_FR_TFF_MASK)
320 complete(&q->c);
321
322 dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", 0, reg);
323 return IRQ_HANDLED;
324}
325
326static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width)
327{
328 switch (width) {
329 case 1:
330 case 2:
331 case 4:
332 return 0;
333 }
334
335 return -ENOTSUPP;
336}
337
338static bool fsl_qspi_supports_op(struct spi_mem *mem,
339 const struct spi_mem_op *op)
340{
341 struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
342 int ret;
343
344 ret = fsl_qspi_check_buswidth(q, op->cmd.buswidth);
345
346 if (op->addr.nbytes)
347 ret |= fsl_qspi_check_buswidth(q, op->addr.buswidth);
348
349 if (op->dummy.nbytes)
350 ret |= fsl_qspi_check_buswidth(q, op->dummy.buswidth);
351
352 if (op->data.nbytes)
353 ret |= fsl_qspi_check_buswidth(q, op->data.buswidth);
354
355 if (ret)
356 return false;
357
358 /*
359 * The number of instructions needed for the op, needs
360 * to fit into a single LUT entry.
361 */
362 if (op->addr.nbytes +
363 (op->dummy.nbytes ? 1:0) +
364 (op->data.nbytes ? 1:0) > 6)
365 return false;
366
367 /* Max 64 dummy clock cycles supported */
368 if (op->dummy.nbytes &&
369 (op->dummy.nbytes * 8 / op->dummy.buswidth > 64))
370 return false;
371
372 /* Max data length, check controller limits and alignment */
373 if (op->data.dir == SPI_MEM_DATA_IN &&
374 (op->data.nbytes > q->devtype_data->ahb_buf_size ||
375 (op->data.nbytes > q->devtype_data->rxfifo - 4 &&
376 !IS_ALIGNED(op->data.nbytes, 8))))
377 return false;
378
379 if (op->data.dir == SPI_MEM_DATA_OUT &&
380 op->data.nbytes > q->devtype_data->txfifo)
381 return false;
382
383 return true;
384}
385
386static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
387 const struct spi_mem_op *op)
388{
389 void __iomem *base = q->iobase;
390 u32 lutval[4] = {};
391 int lutidx = 1, i;
392
393 lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
394 op->cmd.opcode);
395
396 /*
397 * For some unknown reason, using LUT_ADDR doesn't work in some
398 * cases (at least with only one byte long addresses), so
399 * let's use LUT_MODE to write the address bytes one by one
400 */
401 for (i = 0; i < op->addr.nbytes; i++) {
402 u8 addrbyte = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
403
404 lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_MODE,
405 LUT_PAD(op->addr.buswidth),
406 addrbyte);
407 lutidx++;
408 }
409
410 if (op->dummy.nbytes) {
411 lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY,
412 LUT_PAD(op->dummy.buswidth),
413 op->dummy.nbytes * 8 /
414 op->dummy.buswidth);
415 lutidx++;
416 }
417
418 if (op->data.nbytes) {
419 lutval[lutidx / 2] |= LUT_DEF(lutidx,
420 op->data.dir == SPI_MEM_DATA_IN ?
421 LUT_FSL_READ : LUT_FSL_WRITE,
422 LUT_PAD(op->data.buswidth),
423 0);
424 lutidx++;
425 }
426
427 lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0);
428
429 /* unlock LUT */
430 qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
431 qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
432
433 /* fill LUT */
434 for (i = 0; i < ARRAY_SIZE(lutval); i++)
435 qspi_writel(q, lutval[i], base + QUADSPI_LUT_REG(i));
436
437 /* lock LUT */
438 qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
439 qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
440}
441
442static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
443{
444 int ret;
445
446 ret = clk_prepare_enable(q->clk_en);
447 if (ret)
448 return ret;
449
450 ret = clk_prepare_enable(q->clk);
451 if (ret) {
452 clk_disable_unprepare(q->clk_en);
453 return ret;
454 }
455
456 if (needs_wakeup_wait_mode(q))
457 pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0);
458
459 return 0;
460}
461
462static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
463{
464 if (needs_wakeup_wait_mode(q))
465 pm_qos_remove_request(&q->pm_qos_req);
466
467 clk_disable_unprepare(q->clk);
468 clk_disable_unprepare(q->clk_en);
469}
470
471/*
472 * If we have changed the content of the flash by writing or erasing, or if we
473 * read from flash with a different offset into the page buffer, we need to
474 * invalidate the AHB buffer. If we do not do so, we may read out the wrong
475 * data. The spec tells us reset the AHB domain and Serial Flash domain at
476 * the same time.
477 */
478static void fsl_qspi_invalidate(struct fsl_qspi *q)
479{
480 u32 reg;
481
482 reg = qspi_readl(q, q->iobase + QUADSPI_MCR);
483 reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK;
484 qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
485
486 /*
487 * The minimum delay : 1 AHB + 2 SFCK clocks.
488 * Delay 1 us is enough.
489 */
490 udelay(1);
491
492 reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK);
493 qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
494}
495
496static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi)
497{
498 unsigned long rate = spi->max_speed_hz;
499 int ret;
500
501 if (q->selected == spi->chip_select)
502 return;
503
504 if (needs_4x_clock(q))
505 rate *= 4;
506
507 fsl_qspi_clk_disable_unprep(q);
508
509 ret = clk_set_rate(q->clk, rate);
510 if (ret)
511 return;
512
513 ret = fsl_qspi_clk_prep_enable(q);
514 if (ret)
515 return;
516
517 q->selected = spi->chip_select;
518
519 fsl_qspi_invalidate(q);
520}
521
522static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op)
523{
524 memcpy_fromio(op->data.buf.in,
525 q->ahb_addr + q->selected * q->devtype_data->ahb_buf_size,
526 op->data.nbytes);
527}
528
529static void fsl_qspi_fill_txfifo(struct fsl_qspi *q,
530 const struct spi_mem_op *op)
531{
532 void __iomem *base = q->iobase;
533 int i;
534 u32 val;
535
536 for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
537 memcpy(&val, op->data.buf.out + i, 4);
538 val = fsl_qspi_endian_xchg(q, val);
539 qspi_writel(q, val, base + QUADSPI_TBDR);
540 }
541
542 if (i < op->data.nbytes) {
543 memcpy(&val, op->data.buf.out + i, op->data.nbytes - i);
544 val = fsl_qspi_endian_xchg(q, val);
545 qspi_writel(q, val, base + QUADSPI_TBDR);
546 }
547
548 if (needs_fill_txfifo(q)) {
549 for (i = op->data.nbytes; i < 16; i += 4)
550 qspi_writel(q, 0, base + QUADSPI_TBDR);
551 }
552}
553
554static void fsl_qspi_read_rxfifo(struct fsl_qspi *q,
555 const struct spi_mem_op *op)
556{
557 void __iomem *base = q->iobase;
558 int i;
559 u8 *buf = op->data.buf.in;
560 u32 val;
561
562 for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
563 val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
564 val = fsl_qspi_endian_xchg(q, val);
565 memcpy(buf + i, &val, 4);
566 }
567
568 if (i < op->data.nbytes) {
569 val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
570 val = fsl_qspi_endian_xchg(q, val);
571 memcpy(buf + i, &val, op->data.nbytes - i);
572 }
573}
574
575static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op)
576{
577 void __iomem *base = q->iobase;
578 int err = 0;
579
580 init_completion(&q->c);
581
582 /*
583 * Always start the sequence at the same index since we update
584 * the LUT at each exec_op() call. And also specify the DATA
585 * length, since it's has not been specified in the LUT.
586 */
587 qspi_writel(q, op->data.nbytes | QUADSPI_IPCR_SEQID(SEQID_LUT),
588 base + QUADSPI_IPCR);
589
590 /* Wait for the interrupt. */
591 if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000)))
592 err = -ETIMEDOUT;
593
594 if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
595 fsl_qspi_read_rxfifo(q, op);
596
597 return err;
598}
599
600static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base,
601 u32 mask, u32 delay_us, u32 timeout_us)
602{
603 u32 reg;
604
605 if (!q->devtype_data->little_endian)
606 mask = (u32)cpu_to_be32(mask);
607
608 return readl_poll_timeout(base, reg, !(reg & mask), delay_us,
609 timeout_us);
610}
611
612static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
613{
614 struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
615 void __iomem *base = q->iobase;
616 u32 addr_offset = 0;
617 int err = 0;
618
619 mutex_lock(&q->lock);
620
621 /* wait for the controller being ready */
622 fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK |
623 QUADSPI_SR_AHB_ACC_MASK), 10, 1000);
624
625 fsl_qspi_select_mem(q, mem->spi);
626
627 if (needs_amba_base_offset(q))
628 addr_offset = q->memmap_phy;
629
630 qspi_writel(q,
631 q->selected * q->devtype_data->ahb_buf_size + addr_offset,
632 base + QUADSPI_SFAR);
633
634 qspi_writel(q, qspi_readl(q, base + QUADSPI_MCR) |
635 QUADSPI_MCR_CLR_RXF_MASK | QUADSPI_MCR_CLR_TXF_MASK,
636 base + QUADSPI_MCR);
637
638 qspi_writel(q, QUADSPI_SPTRCLR_BFPTRC | QUADSPI_SPTRCLR_IPPTRC,
639 base + QUADSPI_SPTRCLR);
640
641 fsl_qspi_prepare_lut(q, op);
642
643 /*
644 * If we have large chunks of data, we read them through the AHB bus
645 * by accessing the mapped memory. In all other cases we use
646 * IP commands to access the flash.
647 */
648 if (op->data.nbytes > (q->devtype_data->rxfifo - 4) &&
649 op->data.dir == SPI_MEM_DATA_IN) {
650 fsl_qspi_read_ahb(q, op);
651 } else {
652 qspi_writel(q, QUADSPI_RBCT_WMRK_MASK |
653 QUADSPI_RBCT_RXBRD_USEIPS, base + QUADSPI_RBCT);
654
655 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
656 fsl_qspi_fill_txfifo(q, op);
657
658 err = fsl_qspi_do_op(q, op);
659 }
660
661 /* Invalidate the data in the AHB buffer. */
662 fsl_qspi_invalidate(q);
663
664 mutex_unlock(&q->lock);
665
666 return err;
667}
668
669static int fsl_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
670{
671 struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
672
673 if (op->data.dir == SPI_MEM_DATA_OUT) {
674 if (op->data.nbytes > q->devtype_data->txfifo)
675 op->data.nbytes = q->devtype_data->txfifo;
676 } else {
677 if (op->data.nbytes > q->devtype_data->ahb_buf_size)
678 op->data.nbytes = q->devtype_data->ahb_buf_size;
679 else if (op->data.nbytes > (q->devtype_data->rxfifo - 4))
680 op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8);
681 }
682
683 return 0;
684}
685
686static int fsl_qspi_default_setup(struct fsl_qspi *q)
687{
688 void __iomem *base = q->iobase;
689 u32 reg, addr_offset = 0;
690 int ret;
691
692 /* disable and unprepare clock to avoid glitch pass to controller */
693 fsl_qspi_clk_disable_unprep(q);
694
695 /* the default frequency, we will change it later if necessary. */
696 ret = clk_set_rate(q->clk, 66000000);
697 if (ret)
698 return ret;
699
700 ret = fsl_qspi_clk_prep_enable(q);
701 if (ret)
702 return ret;
703
704 /* Reset the module */
705 qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
706 base + QUADSPI_MCR);
707 udelay(1);
708
709 /* Disable the module */
710 qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
711 base + QUADSPI_MCR);
712
713 reg = qspi_readl(q, base + QUADSPI_SMPR);
714 qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
715 | QUADSPI_SMPR_FSPHS_MASK
716 | QUADSPI_SMPR_HSENA_MASK
717 | QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR);
718
719 /* We only use the buffer3 for AHB read */
720 qspi_writel(q, 0, base + QUADSPI_BUF0IND);
721 qspi_writel(q, 0, base + QUADSPI_BUF1IND);
722 qspi_writel(q, 0, base + QUADSPI_BUF2IND);
723
724 qspi_writel(q, QUADSPI_BFGENCR_SEQID(SEQID_LUT),
725 q->iobase + QUADSPI_BFGENCR);
726 qspi_writel(q, QUADSPI_RBCT_WMRK_MASK, base + QUADSPI_RBCT);
727 qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
728 QUADSPI_BUF3CR_ADATSZ(q->devtype_data->ahb_buf_size / 8),
729 base + QUADSPI_BUF3CR);
730
731 if (needs_amba_base_offset(q))
732 addr_offset = q->memmap_phy;
733
734 /*
735 * In HW there can be a maximum of four chips on two buses with
736 * two chip selects on each bus. We use four chip selects in SW
737 * to differentiate between the four chips.
738 * We use ahb_buf_size for each chip and set SFA1AD, SFA2AD, SFB1AD,
739 * SFB2AD accordingly.
740 */
741 qspi_writel(q, q->devtype_data->ahb_buf_size + addr_offset,
742 base + QUADSPI_SFA1AD);
743 qspi_writel(q, q->devtype_data->ahb_buf_size * 2 + addr_offset,
744 base + QUADSPI_SFA2AD);
745 qspi_writel(q, q->devtype_data->ahb_buf_size * 3 + addr_offset,
746 base + QUADSPI_SFB1AD);
747 qspi_writel(q, q->devtype_data->ahb_buf_size * 4 + addr_offset,
748 base + QUADSPI_SFB2AD);
749
750 q->selected = -1;
751
752 /* Enable the module */
753 qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
754 base + QUADSPI_MCR);
755
756 /* clear all interrupt status */
757 qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR);
758
759 /* enable the interrupt */
760 qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
761
762 return 0;
763}
764
765static const char *fsl_qspi_get_name(struct spi_mem *mem)
766{
767 struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
768 struct device *dev = &mem->spi->dev;
769 const char *name;
770
771 /*
772 * In order to keep mtdparts compatible with the old MTD driver at
773 * mtd/spi-nor/fsl-quadspi.c, we set a custom name derived from the
774 * platform_device of the controller.
775 */
776 if (of_get_available_child_count(q->dev->of_node) == 1)
777 return dev_name(q->dev);
778
779 name = devm_kasprintf(dev, GFP_KERNEL,
780 "%s-%d", dev_name(q->dev),
781 mem->spi->chip_select);
782
783 if (!name) {
784 dev_err(dev, "failed to get memory for custom flash name\n");
785 return ERR_PTR(-ENOMEM);
786 }
787
788 return name;
789}
790
791static const struct spi_controller_mem_ops fsl_qspi_mem_ops = {
792 .adjust_op_size = fsl_qspi_adjust_op_size,
793 .supports_op = fsl_qspi_supports_op,
794 .exec_op = fsl_qspi_exec_op,
795 .get_name = fsl_qspi_get_name,
796};
797
798static int fsl_qspi_probe(struct platform_device *pdev)
799{
800 struct spi_controller *ctlr;
801 struct device *dev = &pdev->dev;
802 struct device_node *np = dev->of_node;
803 struct resource *res;
804 struct fsl_qspi *q;
805 int ret;
806
807 ctlr = spi_alloc_master(&pdev->dev, sizeof(*q));
808 if (!ctlr)
809 return -ENOMEM;
810
811 ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
812 SPI_TX_DUAL | SPI_TX_QUAD;
813
814 q = spi_controller_get_devdata(ctlr);
815 q->dev = dev;
816 q->devtype_data = of_device_get_match_data(dev);
817 if (!q->devtype_data) {
818 ret = -ENODEV;
819 goto err_put_ctrl;
820 }
821
822 platform_set_drvdata(pdev, q);
823
824 /* find the resources */
825 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
826 q->iobase = devm_ioremap_resource(dev, res);
827 if (IS_ERR(q->iobase)) {
828 ret = PTR_ERR(q->iobase);
829 goto err_put_ctrl;
830 }
831
832 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
833 "QuadSPI-memory");
834 q->ahb_addr = devm_ioremap_resource(dev, res);
835 if (IS_ERR(q->ahb_addr)) {
836 ret = PTR_ERR(q->ahb_addr);
837 goto err_put_ctrl;
838 }
839
840 q->memmap_phy = res->start;
841
842 /* find the clocks */
843 q->clk_en = devm_clk_get(dev, "qspi_en");
844 if (IS_ERR(q->clk_en)) {
845 ret = PTR_ERR(q->clk_en);
846 goto err_put_ctrl;
847 }
848
849 q->clk = devm_clk_get(dev, "qspi");
850 if (IS_ERR(q->clk)) {
851 ret = PTR_ERR(q->clk);
852 goto err_put_ctrl;
853 }
854
855 ret = fsl_qspi_clk_prep_enable(q);
856 if (ret) {
857 dev_err(dev, "can not enable the clock\n");
858 goto err_put_ctrl;
859 }
860
861 /* find the irq */
862 ret = platform_get_irq(pdev, 0);
863 if (ret < 0) {
864 dev_err(dev, "failed to get the irq: %d\n", ret);
865 goto err_disable_clk;
866 }
867
868 ret = devm_request_irq(dev, ret,
869 fsl_qspi_irq_handler, 0, pdev->name, q);
870 if (ret) {
871 dev_err(dev, "failed to request irq: %d\n", ret);
872 goto err_disable_clk;
873 }
874
875 mutex_init(&q->lock);
876
877 ctlr->bus_num = -1;
878 ctlr->num_chipselect = 4;
879 ctlr->mem_ops = &fsl_qspi_mem_ops;
880
881 fsl_qspi_default_setup(q);
882
883 ctlr->dev.of_node = np;
884
885 ret = spi_register_controller(ctlr);
886 if (ret)
887 goto err_destroy_mutex;
888
889 return 0;
890
891err_destroy_mutex:
892 mutex_destroy(&q->lock);
893
894err_disable_clk:
895 fsl_qspi_clk_disable_unprep(q);
896
897err_put_ctrl:
898 spi_controller_put(ctlr);
899
900 dev_err(dev, "Freescale QuadSPI probe failed\n");
901 return ret;
902}
903
904static int fsl_qspi_remove(struct platform_device *pdev)
905{
906 struct fsl_qspi *q = platform_get_drvdata(pdev);
907
908 /* disable the hardware */
909 qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
910 qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
911
912 fsl_qspi_clk_disable_unprep(q);
913
914 mutex_destroy(&q->lock);
915
916 return 0;
917}
918
919static int fsl_qspi_suspend(struct device *dev)
920{
921 return 0;
922}
923
924static int fsl_qspi_resume(struct device *dev)
925{
926 struct fsl_qspi *q = dev_get_drvdata(dev);
927
928 fsl_qspi_default_setup(q);
929
930 return 0;
931}
932
933static const struct of_device_id fsl_qspi_dt_ids[] = {
934 { .compatible = "fsl,vf610-qspi", .data = &vybrid_data, },
935 { .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, },
936 { .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, },
937 { .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, },
938 { .compatible = "fsl,ls1021a-qspi", .data = &ls1021a_data, },
939 { .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, },
940 { /* sentinel */ }
941};
942MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
943
944static const struct dev_pm_ops fsl_qspi_pm_ops = {
945 .suspend = fsl_qspi_suspend,
946 .resume = fsl_qspi_resume,
947};
948
949static struct platform_driver fsl_qspi_driver = {
950 .driver = {
951 .name = "fsl-quadspi",
952 .of_match_table = fsl_qspi_dt_ids,
953 .pm = &fsl_qspi_pm_ops,
954 },
955 .probe = fsl_qspi_probe,
956 .remove = fsl_qspi_remove,
957};
958module_platform_driver(fsl_qspi_driver);
959
960MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver");
961MODULE_AUTHOR("Freescale Semiconductor Inc.");
962MODULE_AUTHOR("Boris Brezillon <bbrezillon@kernel.org>");
963MODULE_AUTHOR("Frieder Schrempf <frieder.schrempf@kontron.de>");
964MODULE_AUTHOR("Yogesh Gaur <yogeshnarayan.gaur@nxp.com>");
965MODULE_AUTHOR("Suresh Gupta <suresh.gupta@nxp.com>");
966MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index fdb7cb88fb56..5f0b0d5bfef4 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -89,9 +89,6 @@ struct spi_geni_master {
89 int irq; 89 int irq;
90}; 90};
91 91
92static void handle_fifo_timeout(struct spi_master *spi,
93 struct spi_message *msg);
94
95static int get_spi_clk_cfg(unsigned int speed_hz, 92static int get_spi_clk_cfg(unsigned int speed_hz,
96 struct spi_geni_master *mas, 93 struct spi_geni_master *mas,
97 unsigned int *clk_idx, 94 unsigned int *clk_idx,
@@ -122,6 +119,32 @@ static int get_spi_clk_cfg(unsigned int speed_hz,
122 return ret; 119 return ret;
123} 120}
124 121
122static void handle_fifo_timeout(struct spi_master *spi,
123 struct spi_message *msg)
124{
125 struct spi_geni_master *mas = spi_master_get_devdata(spi);
126 unsigned long time_left, flags;
127 struct geni_se *se = &mas->se;
128
129 spin_lock_irqsave(&mas->lock, flags);
130 reinit_completion(&mas->xfer_done);
131 mas->cur_mcmd = CMD_CANCEL;
132 geni_se_cancel_m_cmd(se);
133 writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
134 spin_unlock_irqrestore(&mas->lock, flags);
135 time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
136 if (time_left)
137 return;
138
139 spin_lock_irqsave(&mas->lock, flags);
140 reinit_completion(&mas->xfer_done);
141 geni_se_abort_m_cmd(se);
142 spin_unlock_irqrestore(&mas->lock, flags);
143 time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
144 if (!time_left)
145 dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
146}
147
125static void spi_geni_set_cs(struct spi_device *slv, bool set_flag) 148static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
126{ 149{
127 struct spi_geni_master *mas = spi_master_get_devdata(slv->master); 150 struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
@@ -233,7 +256,6 @@ static int spi_geni_prepare_message(struct spi_master *spi,
233 struct geni_se *se = &mas->se; 256 struct geni_se *se = &mas->se;
234 257
235 geni_se_select_mode(se, GENI_SE_FIFO); 258 geni_se_select_mode(se, GENI_SE_FIFO);
236 reinit_completion(&mas->xfer_done);
237 ret = setup_fifo_params(spi_msg->spi, spi); 259 ret = setup_fifo_params(spi_msg->spi, spi);
238 if (ret) 260 if (ret)
239 dev_err(mas->dev, "Couldn't select mode %d\n", ret); 261 dev_err(mas->dev, "Couldn't select mode %d\n", ret);
@@ -357,32 +379,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
357 writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG); 379 writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
358} 380}
359 381
360static void handle_fifo_timeout(struct spi_master *spi,
361 struct spi_message *msg)
362{
363 struct spi_geni_master *mas = spi_master_get_devdata(spi);
364 unsigned long time_left, flags;
365 struct geni_se *se = &mas->se;
366
367 spin_lock_irqsave(&mas->lock, flags);
368 reinit_completion(&mas->xfer_done);
369 mas->cur_mcmd = CMD_CANCEL;
370 geni_se_cancel_m_cmd(se);
371 writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
372 spin_unlock_irqrestore(&mas->lock, flags);
373 time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
374 if (time_left)
375 return;
376
377 spin_lock_irqsave(&mas->lock, flags);
378 reinit_completion(&mas->xfer_done);
379 geni_se_abort_m_cmd(se);
380 spin_unlock_irqrestore(&mas->lock, flags);
381 time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
382 if (!time_left)
383 dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
384}
385
386static int spi_geni_transfer_one(struct spi_master *spi, 382static int spi_geni_transfer_one(struct spi_master *spi,
387 struct spi_device *slv, 383 struct spi_device *slv,
388 struct spi_transfer *xfer) 384 struct spi_transfer *xfer)
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 9487c9cd68bd..a4d8d19ecff9 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -552,6 +552,75 @@ void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
552} 552}
553EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy); 553EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
554 554
555static void devm_spi_mem_dirmap_release(struct device *dev, void *res)
556{
557 struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
558
559 spi_mem_dirmap_destroy(desc);
560}
561
562/**
563 * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
564 * it to a device
565 * @dev: device the dirmap desc will be attached to
566 * @mem: SPI mem device this direct mapping should be created for
567 * @info: direct mapping information
568 *
569 * devm_ variant of the spi_mem_dirmap_create() function. See
570 * spi_mem_dirmap_create() for more details.
571 *
572 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
573 */
574struct spi_mem_dirmap_desc *
575devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
576 const struct spi_mem_dirmap_info *info)
577{
578 struct spi_mem_dirmap_desc **ptr, *desc;
579
580 ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
581 GFP_KERNEL);
582 if (!ptr)
583 return ERR_PTR(-ENOMEM);
584
585 desc = spi_mem_dirmap_create(mem, info);
586 if (IS_ERR(desc)) {
587 devres_free(ptr);
588 } else {
589 *ptr = desc;
590 devres_add(dev, ptr);
591 }
592
593 return desc;
594}
595EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
596
597static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data)
598{
599 struct spi_mem_dirmap_desc **ptr = res;
600
601 if (WARN_ON(!ptr || !*ptr))
602 return 0;
603
604 return *ptr == data;
605}
606
607/**
608 * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
609 * to a device
610 * @dev: device the dirmap desc is attached to
611 * @desc: the direct mapping descriptor to destroy
612 *
613 * devm_ variant of the spi_mem_dirmap_destroy() function. See
614 * spi_mem_dirmap_destroy() for more details.
615 */
616void devm_spi_mem_dirmap_destroy(struct device *dev,
617 struct spi_mem_dirmap_desc *desc)
618{
619 devres_release(dev, devm_spi_mem_dirmap_release,
620 devm_spi_mem_dirmap_match, desc);
621}
622EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
623
555/** 624/**
556 * spi_mem_dirmap_dirmap_read() - Read data through a direct mapping 625 * spi_mem_dirmap_dirmap_read() - Read data through a direct mapping
557 * @desc: direct mapping descriptor 626 * @desc: direct mapping descriptor
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 6ac95a2a21ce..7bf53cfc25d6 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -39,6 +39,7 @@
39#include <linux/stmp_device.h> 39#include <linux/stmp_device.h>
40#include <linux/spi/spi.h> 40#include <linux/spi/spi.h>
41#include <linux/spi/mxs-spi.h> 41#include <linux/spi/mxs-spi.h>
42#include <trace/events/spi.h>
42 43
43#define DRIVER_NAME "mxs-spi" 44#define DRIVER_NAME "mxs-spi"
44 45
@@ -374,6 +375,8 @@ static int mxs_spi_transfer_one(struct spi_master *master,
374 375
375 list_for_each_entry(t, &m->transfers, transfer_list) { 376 list_for_each_entry(t, &m->transfers, transfer_list) {
376 377
378 trace_spi_transfer_start(m, t);
379
377 status = mxs_spi_setup_transfer(m->spi, t); 380 status = mxs_spi_setup_transfer(m->spi, t);
378 if (status) 381 if (status)
379 break; 382 break;
@@ -419,6 +422,8 @@ static int mxs_spi_transfer_one(struct spi_master *master,
419 flag); 422 flag);
420 } 423 }
421 424
425 trace_spi_transfer_stop(m, t);
426
422 if (status) { 427 if (status) {
423 stmp_reset_block(ssp->base); 428 stmp_reset_block(ssp->base);
424 break; 429 break;
diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
new file mode 100644
index 000000000000..8894f98cc99c
--- /dev/null
+++ b/drivers/spi/spi-nxp-fspi.c
@@ -0,0 +1,1106 @@
1// SPDX-License-Identifier: GPL-2.0+
2
3/*
4 * NXP FlexSPI(FSPI) controller driver.
5 *
6 * Copyright 2019 NXP.
7 *
8 * FlexSPI is a flexsible SPI host controller which supports two SPI
9 * channels and up to 4 external devices. Each channel supports
10 * Single/Dual/Quad/Octal mode data transfer (1/2/4/8 bidirectional
11 * data lines).
12 *
13 * FlexSPI controller is driven by the LUT(Look-up Table) registers
14 * LUT registers are a look-up-table for sequences of instructions.
15 * A valid sequence consists of four LUT registers.
16 * Maximum 32 LUT sequences can be programmed simultaneously.
17 *
18 * LUTs are being created at run-time based on the commands passed
19 * from the spi-mem framework, thus using single LUT index.
20 *
21 * Software triggered Flash read/write access by IP Bus.
22 *
23 * Memory mapped read access by AHB Bus.
24 *
25 * Based on SPI MEM interface and spi-fsl-qspi.c driver.
26 *
27 * Author:
28 * Yogesh Narayan Gaur <yogeshnarayan.gaur@nxp.com>
29 * Boris Brezillon <bbrezillon@kernel.org>
30 * Frieder Schrempf <frieder.schrempf@kontron.de>
31 */
32
33#include <linux/bitops.h>
34#include <linux/clk.h>
35#include <linux/completion.h>
36#include <linux/delay.h>
37#include <linux/err.h>
38#include <linux/errno.h>
39#include <linux/interrupt.h>
40#include <linux/io.h>
41#include <linux/iopoll.h>
42#include <linux/jiffies.h>
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/mutex.h>
46#include <linux/of.h>
47#include <linux/of_device.h>
48#include <linux/platform_device.h>
49#include <linux/pm_qos.h>
50#include <linux/sizes.h>
51
52#include <linux/spi/spi.h>
53#include <linux/spi/spi-mem.h>
54
55/*
56 * The driver only uses one single LUT entry, that is updated on
57 * each call of exec_op(). Index 0 is preset at boot with a basic
58 * read operation, so let's use the last entry (31).
59 */
60#define SEQID_LUT 31
61
62/* Registers used by the driver */
63#define FSPI_MCR0 0x00
64#define FSPI_MCR0_AHB_TIMEOUT(x) ((x) << 24)
65#define FSPI_MCR0_IP_TIMEOUT(x) ((x) << 16)
66#define FSPI_MCR0_LEARN_EN BIT(15)
67#define FSPI_MCR0_SCRFRUN_EN BIT(14)
68#define FSPI_MCR0_OCTCOMB_EN BIT(13)
69#define FSPI_MCR0_DOZE_EN BIT(12)
70#define FSPI_MCR0_HSEN BIT(11)
71#define FSPI_MCR0_SERCLKDIV BIT(8)
72#define FSPI_MCR0_ATDF_EN BIT(7)
73#define FSPI_MCR0_ARDF_EN BIT(6)
74#define FSPI_MCR0_RXCLKSRC(x) ((x) << 4)
75#define FSPI_MCR0_END_CFG(x) ((x) << 2)
76#define FSPI_MCR0_MDIS BIT(1)
77#define FSPI_MCR0_SWRST BIT(0)
78
79#define FSPI_MCR1 0x04
80#define FSPI_MCR1_SEQ_TIMEOUT(x) ((x) << 16)
81#define FSPI_MCR1_AHB_TIMEOUT(x) (x)
82
83#define FSPI_MCR2 0x08
84#define FSPI_MCR2_IDLE_WAIT(x) ((x) << 24)
85#define FSPI_MCR2_SAMEDEVICEEN BIT(15)
86#define FSPI_MCR2_CLRLRPHS BIT(14)
87#define FSPI_MCR2_ABRDATSZ BIT(8)
88#define FSPI_MCR2_ABRLEARN BIT(7)
89#define FSPI_MCR2_ABR_READ BIT(6)
90#define FSPI_MCR2_ABRWRITE BIT(5)
91#define FSPI_MCR2_ABRDUMMY BIT(4)
92#define FSPI_MCR2_ABR_MODE BIT(3)
93#define FSPI_MCR2_ABRCADDR BIT(2)
94#define FSPI_MCR2_ABRRADDR BIT(1)
95#define FSPI_MCR2_ABR_CMD BIT(0)
96
97#define FSPI_AHBCR 0x0c
98#define FSPI_AHBCR_RDADDROPT BIT(6)
99#define FSPI_AHBCR_PREF_EN BIT(5)
100#define FSPI_AHBCR_BUFF_EN BIT(4)
101#define FSPI_AHBCR_CACH_EN BIT(3)
102#define FSPI_AHBCR_CLRTXBUF BIT(2)
103#define FSPI_AHBCR_CLRRXBUF BIT(1)
104#define FSPI_AHBCR_PAR_EN BIT(0)
105
106#define FSPI_INTEN 0x10
107#define FSPI_INTEN_SCLKSBWR BIT(9)
108#define FSPI_INTEN_SCLKSBRD BIT(8)
109#define FSPI_INTEN_DATALRNFL BIT(7)
110#define FSPI_INTEN_IPTXWE BIT(6)
111#define FSPI_INTEN_IPRXWA BIT(5)
112#define FSPI_INTEN_AHBCMDERR BIT(4)
113#define FSPI_INTEN_IPCMDERR BIT(3)
114#define FSPI_INTEN_AHBCMDGE BIT(2)
115#define FSPI_INTEN_IPCMDGE BIT(1)
116#define FSPI_INTEN_IPCMDDONE BIT(0)
117
118#define FSPI_INTR 0x14
119#define FSPI_INTR_SCLKSBWR BIT(9)
120#define FSPI_INTR_SCLKSBRD BIT(8)
121#define FSPI_INTR_DATALRNFL BIT(7)
122#define FSPI_INTR_IPTXWE BIT(6)
123#define FSPI_INTR_IPRXWA BIT(5)
124#define FSPI_INTR_AHBCMDERR BIT(4)
125#define FSPI_INTR_IPCMDERR BIT(3)
126#define FSPI_INTR_AHBCMDGE BIT(2)
127#define FSPI_INTR_IPCMDGE BIT(1)
128#define FSPI_INTR_IPCMDDONE BIT(0)
129
130#define FSPI_LUTKEY 0x18
131#define FSPI_LUTKEY_VALUE 0x5AF05AF0
132
133#define FSPI_LCKCR 0x1C
134
135#define FSPI_LCKER_LOCK 0x1
136#define FSPI_LCKER_UNLOCK 0x2
137
138#define FSPI_BUFXCR_INVALID_MSTRID 0xE
139#define FSPI_AHBRX_BUF0CR0 0x20
140#define FSPI_AHBRX_BUF1CR0 0x24
141#define FSPI_AHBRX_BUF2CR0 0x28
142#define FSPI_AHBRX_BUF3CR0 0x2C
143#define FSPI_AHBRX_BUF4CR0 0x30
144#define FSPI_AHBRX_BUF5CR0 0x34
145#define FSPI_AHBRX_BUF6CR0 0x38
146#define FSPI_AHBRX_BUF7CR0 0x3C
147#define FSPI_AHBRXBUF0CR7_PREF BIT(31)
148
149#define FSPI_AHBRX_BUF0CR1 0x40
150#define FSPI_AHBRX_BUF1CR1 0x44
151#define FSPI_AHBRX_BUF2CR1 0x48
152#define FSPI_AHBRX_BUF3CR1 0x4C
153#define FSPI_AHBRX_BUF4CR1 0x50
154#define FSPI_AHBRX_BUF5CR1 0x54
155#define FSPI_AHBRX_BUF6CR1 0x58
156#define FSPI_AHBRX_BUF7CR1 0x5C
157
158#define FSPI_FLSHA1CR0 0x60
159#define FSPI_FLSHA2CR0 0x64
160#define FSPI_FLSHB1CR0 0x68
161#define FSPI_FLSHB2CR0 0x6C
162#define FSPI_FLSHXCR0_SZ_KB 10
163#define FSPI_FLSHXCR0_SZ(x) ((x) >> FSPI_FLSHXCR0_SZ_KB)
164
165#define FSPI_FLSHA1CR1 0x70
166#define FSPI_FLSHA2CR1 0x74
167#define FSPI_FLSHB1CR1 0x78
168#define FSPI_FLSHB2CR1 0x7C
169#define FSPI_FLSHXCR1_CSINTR(x) ((x) << 16)
170#define FSPI_FLSHXCR1_CAS(x) ((x) << 11)
171#define FSPI_FLSHXCR1_WA BIT(10)
172#define FSPI_FLSHXCR1_TCSH(x) ((x) << 5)
173#define FSPI_FLSHXCR1_TCSS(x) (x)
174
175#define FSPI_FLSHA1CR2 0x80
176#define FSPI_FLSHA2CR2 0x84
177#define FSPI_FLSHB1CR2 0x88
178#define FSPI_FLSHB2CR2 0x8C
179#define FSPI_FLSHXCR2_CLRINSP BIT(24)
180#define FSPI_FLSHXCR2_AWRWAIT BIT(16)
181#define FSPI_FLSHXCR2_AWRSEQN_SHIFT 13
182#define FSPI_FLSHXCR2_AWRSEQI_SHIFT 8
183#define FSPI_FLSHXCR2_ARDSEQN_SHIFT 5
184#define FSPI_FLSHXCR2_ARDSEQI_SHIFT 0
185
186#define FSPI_IPCR0 0xA0
187
188#define FSPI_IPCR1 0xA4
189#define FSPI_IPCR1_IPAREN BIT(31)
190#define FSPI_IPCR1_SEQNUM_SHIFT 24
191#define FSPI_IPCR1_SEQID_SHIFT 16
192#define FSPI_IPCR1_IDATSZ(x) (x)
193
194#define FSPI_IPCMD 0xB0
195#define FSPI_IPCMD_TRG BIT(0)
196
197#define FSPI_DLPR 0xB4
198
199#define FSPI_IPRXFCR 0xB8
200#define FSPI_IPRXFCR_CLR BIT(0)
201#define FSPI_IPRXFCR_DMA_EN BIT(1)
202#define FSPI_IPRXFCR_WMRK(x) ((x) << 2)
203
204#define FSPI_IPTXFCR 0xBC
205#define FSPI_IPTXFCR_CLR BIT(0)
206#define FSPI_IPTXFCR_DMA_EN BIT(1)
207#define FSPI_IPTXFCR_WMRK(x) ((x) << 2)
208
209#define FSPI_DLLACR 0xC0
210#define FSPI_DLLACR_OVRDEN BIT(8)
211
212#define FSPI_DLLBCR 0xC4
213#define FSPI_DLLBCR_OVRDEN BIT(8)
214
215#define FSPI_STS0 0xE0
216#define FSPI_STS0_DLPHB(x) ((x) << 8)
217#define FSPI_STS0_DLPHA(x) ((x) << 4)
218#define FSPI_STS0_CMD_SRC(x) ((x) << 2)
219#define FSPI_STS0_ARB_IDLE BIT(1)
220#define FSPI_STS0_SEQ_IDLE BIT(0)
221
222#define FSPI_STS1 0xE4
223#define FSPI_STS1_IP_ERRCD(x) ((x) << 24)
224#define FSPI_STS1_IP_ERRID(x) ((x) << 16)
225#define FSPI_STS1_AHB_ERRCD(x) ((x) << 8)
226#define FSPI_STS1_AHB_ERRID(x) (x)
227
228#define FSPI_AHBSPNST 0xEC
229#define FSPI_AHBSPNST_DATLFT(x) ((x) << 16)
230#define FSPI_AHBSPNST_BUFID(x) ((x) << 1)
231#define FSPI_AHBSPNST_ACTIVE BIT(0)
232
233#define FSPI_IPRXFSTS 0xF0
234#define FSPI_IPRXFSTS_RDCNTR(x) ((x) << 16)
235#define FSPI_IPRXFSTS_FILL(x) (x)
236
237#define FSPI_IPTXFSTS 0xF4
238#define FSPI_IPTXFSTS_WRCNTR(x) ((x) << 16)
239#define FSPI_IPTXFSTS_FILL(x) (x)
240
241#define FSPI_RFDR 0x100
242#define FSPI_TFDR 0x180
243
244#define FSPI_LUT_BASE 0x200
245#define FSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
246#define FSPI_LUT_REG(idx) \
247 (FSPI_LUT_BASE + FSPI_LUT_OFFSET + (idx) * 4)
248
249/* register map end */
250
251/* Instruction set for the LUT register. */
252#define LUT_STOP 0x00
253#define LUT_CMD 0x01
254#define LUT_ADDR 0x02
255#define LUT_CADDR_SDR 0x03
256#define LUT_MODE 0x04
257#define LUT_MODE2 0x05
258#define LUT_MODE4 0x06
259#define LUT_MODE8 0x07
260#define LUT_NXP_WRITE 0x08
261#define LUT_NXP_READ 0x09
262#define LUT_LEARN_SDR 0x0A
263#define LUT_DATSZ_SDR 0x0B
264#define LUT_DUMMY 0x0C
265#define LUT_DUMMY_RWDS_SDR 0x0D
266#define LUT_JMP_ON_CS 0x1F
267#define LUT_CMD_DDR 0x21
268#define LUT_ADDR_DDR 0x22
269#define LUT_CADDR_DDR 0x23
270#define LUT_MODE_DDR 0x24
271#define LUT_MODE2_DDR 0x25
272#define LUT_MODE4_DDR 0x26
273#define LUT_MODE8_DDR 0x27
274#define LUT_WRITE_DDR 0x28
275#define LUT_READ_DDR 0x29
276#define LUT_LEARN_DDR 0x2A
277#define LUT_DATSZ_DDR 0x2B
278#define LUT_DUMMY_DDR 0x2C
279#define LUT_DUMMY_RWDS_DDR 0x2D
280
281/*
282 * Calculate number of required PAD bits for LUT register.
283 *
284 * The pad stands for the number of IO lines [0:7].
285 * For example, the octal read needs eight IO lines,
286 * so you should use LUT_PAD(8). This macro
287 * returns 3 i.e. use eight (2^3) IP lines for read.
288 */
289#define LUT_PAD(x) (fls(x) - 1)
290
291/*
292 * Macro for constructing the LUT entries with the following
293 * register layout:
294 *
295 * ---------------------------------------------------
296 * | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
297 * ---------------------------------------------------
298 */
299#define PAD_SHIFT 8
300#define INSTR_SHIFT 10
301#define OPRND_SHIFT 16
302
303/* Macros for constructing the LUT register. */
304#define LUT_DEF(idx, ins, pad, opr) \
305 ((((ins) << INSTR_SHIFT) | ((pad) << PAD_SHIFT) | \
306 (opr)) << (((idx) % 2) * OPRND_SHIFT))
307
308#define POLL_TOUT 5000
309#define NXP_FSPI_MAX_CHIPSELECT 4
310
311struct nxp_fspi_devtype_data {
312 unsigned int rxfifo;
313 unsigned int txfifo;
314 unsigned int ahb_buf_size;
315 unsigned int quirks;
316 bool little_endian;
317};
318
319static const struct nxp_fspi_devtype_data lx2160a_data = {
320 .rxfifo = SZ_512, /* (64 * 64 bits) */
321 .txfifo = SZ_1K, /* (128 * 64 bits) */
322 .ahb_buf_size = SZ_2K, /* (256 * 64 bits) */
323 .quirks = 0,
324 .little_endian = true, /* little-endian */
325};
326
327struct nxp_fspi {
328 void __iomem *iobase;
329 void __iomem *ahb_addr;
330 u32 memmap_phy;
331 u32 memmap_phy_size;
332 struct clk *clk, *clk_en;
333 struct device *dev;
334 struct completion c;
335 const struct nxp_fspi_devtype_data *devtype_data;
336 struct mutex lock;
337 struct pm_qos_request pm_qos_req;
338 int selected;
339};
340
341/*
342 * R/W functions for big- or little-endian registers:
343 * The FSPI controller's endianness is independent of
344 * the CPU core's endianness. So far, although the CPU
345 * core is little-endian the FSPI controller can use
346 * big-endian or little-endian.
347 */
348static void fspi_writel(struct nxp_fspi *f, u32 val, void __iomem *addr)
349{
350 if (f->devtype_data->little_endian)
351 iowrite32(val, addr);
352 else
353 iowrite32be(val, addr);
354}
355
356static u32 fspi_readl(struct nxp_fspi *f, void __iomem *addr)
357{
358 if (f->devtype_data->little_endian)
359 return ioread32(addr);
360 else
361 return ioread32be(addr);
362}
363
364static irqreturn_t nxp_fspi_irq_handler(int irq, void *dev_id)
365{
366 struct nxp_fspi *f = dev_id;
367 u32 reg;
368
369 /* clear interrupt */
370 reg = fspi_readl(f, f->iobase + FSPI_INTR);
371 fspi_writel(f, FSPI_INTR_IPCMDDONE, f->iobase + FSPI_INTR);
372
373 if (reg & FSPI_INTR_IPCMDDONE)
374 complete(&f->c);
375
376 return IRQ_HANDLED;
377}
378
379static int nxp_fspi_check_buswidth(struct nxp_fspi *f, u8 width)
380{
381 switch (width) {
382 case 1:
383 case 2:
384 case 4:
385 case 8:
386 return 0;
387 }
388
389 return -ENOTSUPP;
390}
391
392static bool nxp_fspi_supports_op(struct spi_mem *mem,
393 const struct spi_mem_op *op)
394{
395 struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master);
396 int ret;
397
398 ret = nxp_fspi_check_buswidth(f, op->cmd.buswidth);
399
400 if (op->addr.nbytes)
401 ret |= nxp_fspi_check_buswidth(f, op->addr.buswidth);
402
403 if (op->dummy.nbytes)
404 ret |= nxp_fspi_check_buswidth(f, op->dummy.buswidth);
405
406 if (op->data.nbytes)
407 ret |= nxp_fspi_check_buswidth(f, op->data.buswidth);
408
409 if (ret)
410 return false;
411
412 /*
413 * The number of address bytes should be equal to or less than 4 bytes.
414 */
415 if (op->addr.nbytes > 4)
416 return false;
417
418 /*
419 * If requested address value is greater than controller assigned
420 * memory mapped space, return error as it didn't fit in the range
421 * of assigned address space.
422 */
423 if (op->addr.val >= f->memmap_phy_size)
424 return false;
425
426 /* Max 64 dummy clock cycles supported */
427 if (op->dummy.buswidth &&
428 (op->dummy.nbytes * 8 / op->dummy.buswidth > 64))
429 return false;
430
431 /* Max data length, check controller limits and alignment */
432 if (op->data.dir == SPI_MEM_DATA_IN &&
433 (op->data.nbytes > f->devtype_data->ahb_buf_size ||
434 (op->data.nbytes > f->devtype_data->rxfifo - 4 &&
435 !IS_ALIGNED(op->data.nbytes, 8))))
436 return false;
437
438 if (op->data.dir == SPI_MEM_DATA_OUT &&
439 op->data.nbytes > f->devtype_data->txfifo)
440 return false;
441
442 return true;
443}
444
445/* Instead of busy looping invoke readl_poll_timeout functionality. */
446static int fspi_readl_poll_tout(struct nxp_fspi *f, void __iomem *base,
447 u32 mask, u32 delay_us,
448 u32 timeout_us, bool c)
449{
450 u32 reg;
451
452 if (!f->devtype_data->little_endian)
453 mask = (u32)cpu_to_be32(mask);
454
455 if (c)
456 return readl_poll_timeout(base, reg, (reg & mask),
457 delay_us, timeout_us);
458 else
459 return readl_poll_timeout(base, reg, !(reg & mask),
460 delay_us, timeout_us);
461}
462
463/*
464 * If the slave device content being changed by Write/Erase, need to
465 * invalidate the AHB buffer. This can be achieved by doing the reset
466 * of controller after setting MCR0[SWRESET] bit.
467 */
468static inline void nxp_fspi_invalid(struct nxp_fspi *f)
469{
470 u32 reg;
471 int ret;
472
473 reg = fspi_readl(f, f->iobase + FSPI_MCR0);
474 fspi_writel(f, reg | FSPI_MCR0_SWRST, f->iobase + FSPI_MCR0);
475
476 /* w1c register, wait unit clear */
477 ret = fspi_readl_poll_tout(f, f->iobase + FSPI_MCR0,
478 FSPI_MCR0_SWRST, 0, POLL_TOUT, false);
479 WARN_ON(ret);
480}
481
482static void nxp_fspi_prepare_lut(struct nxp_fspi *f,
483 const struct spi_mem_op *op)
484{
485 void __iomem *base = f->iobase;
486 u32 lutval[4] = {};
487 int lutidx = 1, i;
488
489 /* cmd */
490 lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
491 op->cmd.opcode);
492
493 /* addr bytes */
494 if (op->addr.nbytes) {
495 lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_ADDR,
496 LUT_PAD(op->addr.buswidth),
497 op->addr.nbytes * 8);
498 lutidx++;
499 }
500
501 /* dummy bytes, if needed */
502 if (op->dummy.nbytes) {
503 lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY,
504 /*
505 * Due to FlexSPI controller limitation number of PAD for dummy
506 * buswidth needs to be programmed as equal to data buswidth.
507 */
508 LUT_PAD(op->data.buswidth),
509 op->dummy.nbytes * 8 /
510 op->dummy.buswidth);
511 lutidx++;
512 }
513
514 /* read/write data bytes */
515 if (op->data.nbytes) {
516 lutval[lutidx / 2] |= LUT_DEF(lutidx,
517 op->data.dir == SPI_MEM_DATA_IN ?
518 LUT_NXP_READ : LUT_NXP_WRITE,
519 LUT_PAD(op->data.buswidth),
520 0);
521 lutidx++;
522 }
523
524 /* stop condition. */
525 lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0);
526
527 /* unlock LUT */
528 fspi_writel(f, FSPI_LUTKEY_VALUE, f->iobase + FSPI_LUTKEY);
529 fspi_writel(f, FSPI_LCKER_UNLOCK, f->iobase + FSPI_LCKCR);
530
531 /* fill LUT */
532 for (i = 0; i < ARRAY_SIZE(lutval); i++)
533 fspi_writel(f, lutval[i], base + FSPI_LUT_REG(i));
534
535 dev_dbg(f->dev, "CMD[%x] lutval[0:%x \t 1:%x \t 2:%x \t 3:%x]\n",
536 op->cmd.opcode, lutval[0], lutval[1], lutval[2], lutval[3]);
537
538 /* lock LUT */
539 fspi_writel(f, FSPI_LUTKEY_VALUE, f->iobase + FSPI_LUTKEY);
540 fspi_writel(f, FSPI_LCKER_LOCK, f->iobase + FSPI_LCKCR);
541}
542
543static int nxp_fspi_clk_prep_enable(struct nxp_fspi *f)
544{
545 int ret;
546
547 ret = clk_prepare_enable(f->clk_en);
548 if (ret)
549 return ret;
550
551 ret = clk_prepare_enable(f->clk);
552 if (ret) {
553 clk_disable_unprepare(f->clk_en);
554 return ret;
555 }
556
557 return 0;
558}
559
560static void nxp_fspi_clk_disable_unprep(struct nxp_fspi *f)
561{
562 clk_disable_unprepare(f->clk);
563 clk_disable_unprepare(f->clk_en);
564}
565
566/*
567 * In FlexSPI controller, flash access is based on value of FSPI_FLSHXXCR0
568 * register and start base address of the slave device.
569 *
570 * (Higher address)
571 * -------- <-- FLSHB2CR0
572 * | B2 |
573 * | |
574 * B2 start address --> -------- <-- FLSHB1CR0
575 * | B1 |
576 * | |
577 * B1 start address --> -------- <-- FLSHA2CR0
578 * | A2 |
579 * | |
580 * A2 start address --> -------- <-- FLSHA1CR0
581 * | A1 |
582 * | |
583 * A1 start address --> -------- (Lower address)
584 *
585 *
586 * Start base address defines the starting address range for given CS and
587 * FSPI_FLSHXXCR0 defines the size of the slave device connected at given CS.
588 *
589 * But, different targets are having different combinations of number of CS,
590 * some targets only have single CS or two CS covering controller's full
591 * memory mapped space area.
592 * Thus, implementation is being done as independent of the size and number
593 * of the connected slave device.
594 * Assign controller memory mapped space size as the size to the connected
595 * slave device.
596 * Mark FLSHxxCR0 as zero initially and then assign value only to the selected
597 * chip-select Flash configuration register.
598 *
599 * For e.g. to access CS2 (B1), FLSHB1CR0 register would be equal to the
600 * memory mapped size of the controller.
601 * Value for rest of the CS FLSHxxCR0 register would be zero.
602 *
603 */
604static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi)
605{
606 unsigned long rate = spi->max_speed_hz;
607 int ret;
608 uint64_t size_kb;
609
610 /*
611 * Return, if previously selected slave device is same as current
612 * requested slave device.
613 */
614 if (f->selected == spi->chip_select)
615 return;
616
617 /* Reset FLSHxxCR0 registers */
618 fspi_writel(f, 0, f->iobase + FSPI_FLSHA1CR0);
619 fspi_writel(f, 0, f->iobase + FSPI_FLSHA2CR0);
620 fspi_writel(f, 0, f->iobase + FSPI_FLSHB1CR0);
621 fspi_writel(f, 0, f->iobase + FSPI_FLSHB2CR0);
622
623 /* Assign controller memory mapped space as size, KBytes, of flash. */
624 size_kb = FSPI_FLSHXCR0_SZ(f->memmap_phy_size);
625
626 fspi_writel(f, size_kb, f->iobase + FSPI_FLSHA1CR0 +
627 4 * spi->chip_select);
628
629 dev_dbg(f->dev, "Slave device [CS:%x] selected\n", spi->chip_select);
630
631 nxp_fspi_clk_disable_unprep(f);
632
633 ret = clk_set_rate(f->clk, rate);
634 if (ret)
635 return;
636
637 ret = nxp_fspi_clk_prep_enable(f);
638 if (ret)
639 return;
640
641 f->selected = spi->chip_select;
642}
643
644static void nxp_fspi_read_ahb(struct nxp_fspi *f, const struct spi_mem_op *op)
645{
646 u32 len = op->data.nbytes;
647
648 /* Read out the data directly from the AHB buffer. */
649 memcpy_fromio(op->data.buf.in, (f->ahb_addr + op->addr.val), len);
650}
651
652static void nxp_fspi_fill_txfifo(struct nxp_fspi *f,
653 const struct spi_mem_op *op)
654{
655 void __iomem *base = f->iobase;
656 int i, ret;
657 u8 *buf = (u8 *) op->data.buf.out;
658
659 /* clear the TX FIFO. */
660 fspi_writel(f, FSPI_IPTXFCR_CLR, base + FSPI_IPTXFCR);
661
662 /*
663 * Default value of water mark level is 8 bytes, hence in single
664 * write request controller can write max 8 bytes of data.
665 */
666
667 for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 8); i += 8) {
668 /* Wait for TXFIFO empty */
669 ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
670 FSPI_INTR_IPTXWE, 0,
671 POLL_TOUT, true);
672 WARN_ON(ret);
673
674 fspi_writel(f, *(u32 *) (buf + i), base + FSPI_TFDR);
675 fspi_writel(f, *(u32 *) (buf + i + 4), base + FSPI_TFDR + 4);
676 fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR);
677 }
678
679 if (i < op->data.nbytes) {
680 u32 data = 0;
681 int j;
682 /* Wait for TXFIFO empty */
683 ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
684 FSPI_INTR_IPTXWE, 0,
685 POLL_TOUT, true);
686 WARN_ON(ret);
687
688 for (j = 0; j < ALIGN(op->data.nbytes - i, 4); j += 4) {
689 memcpy(&data, buf + i + j, 4);
690 fspi_writel(f, data, base + FSPI_TFDR + j);
691 }
692 fspi_writel(f, FSPI_INTR_IPTXWE, base + FSPI_INTR);
693 }
694}
695
696static void nxp_fspi_read_rxfifo(struct nxp_fspi *f,
697 const struct spi_mem_op *op)
698{
699 void __iomem *base = f->iobase;
700 int i, ret;
701 int len = op->data.nbytes;
702 u8 *buf = (u8 *) op->data.buf.in;
703
704 /*
705 * Default value of water mark level is 8 bytes, hence in single
706 * read request controller can read max 8 bytes of data.
707 */
708 for (i = 0; i < ALIGN_DOWN(len, 8); i += 8) {
709 /* Wait for RXFIFO available */
710 ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
711 FSPI_INTR_IPRXWA, 0,
712 POLL_TOUT, true);
713 WARN_ON(ret);
714
715 *(u32 *)(buf + i) = fspi_readl(f, base + FSPI_RFDR);
716 *(u32 *)(buf + i + 4) = fspi_readl(f, base + FSPI_RFDR + 4);
717 /* move the FIFO pointer */
718 fspi_writel(f, FSPI_INTR_IPRXWA, base + FSPI_INTR);
719 }
720
721 if (i < len) {
722 u32 tmp;
723 int size, j;
724
725 buf = op->data.buf.in + i;
726 /* Wait for RXFIFO available */
727 ret = fspi_readl_poll_tout(f, f->iobase + FSPI_INTR,
728 FSPI_INTR_IPRXWA, 0,
729 POLL_TOUT, true);
730 WARN_ON(ret);
731
732 len = op->data.nbytes - i;
733 for (j = 0; j < op->data.nbytes - i; j += 4) {
734 tmp = fspi_readl(f, base + FSPI_RFDR + j);
735 size = min(len, 4);
736 memcpy(buf + j, &tmp, size);
737 len -= size;
738 }
739 }
740
741 /* invalid the RXFIFO */
742 fspi_writel(f, FSPI_IPRXFCR_CLR, base + FSPI_IPRXFCR);
743 /* move the FIFO pointer */
744 fspi_writel(f, FSPI_INTR_IPRXWA, base + FSPI_INTR);
745}
746
747static int nxp_fspi_do_op(struct nxp_fspi *f, const struct spi_mem_op *op)
748{
749 void __iomem *base = f->iobase;
750 int seqnum = 0;
751 int err = 0;
752 u32 reg;
753
754 reg = fspi_readl(f, base + FSPI_IPRXFCR);
755 /* invalid RXFIFO first */
756 reg &= ~FSPI_IPRXFCR_DMA_EN;
757 reg = reg | FSPI_IPRXFCR_CLR;
758 fspi_writel(f, reg, base + FSPI_IPRXFCR);
759
760 init_completion(&f->c);
761
762 fspi_writel(f, op->addr.val, base + FSPI_IPCR0);
763 /*
764 * Always start the sequence at the same index since we update
765 * the LUT at each exec_op() call. And also specify the DATA
766 * length, since it's has not been specified in the LUT.
767 */
768 fspi_writel(f, op->data.nbytes |
769 (SEQID_LUT << FSPI_IPCR1_SEQID_SHIFT) |
770 (seqnum << FSPI_IPCR1_SEQNUM_SHIFT),
771 base + FSPI_IPCR1);
772
773 /* Trigger the LUT now. */
774 fspi_writel(f, FSPI_IPCMD_TRG, base + FSPI_IPCMD);
775
776 /* Wait for the interrupt. */
777 if (!wait_for_completion_timeout(&f->c, msecs_to_jiffies(1000)))
778 err = -ETIMEDOUT;
779
780 /* Invoke IP data read, if request is of data read. */
781 if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
782 nxp_fspi_read_rxfifo(f, op);
783
784 return err;
785}
786
787static int nxp_fspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
788{
789 struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master);
790 int err = 0;
791
792 mutex_lock(&f->lock);
793
794 /* Wait for controller being ready. */
795 err = fspi_readl_poll_tout(f, f->iobase + FSPI_STS0,
796 FSPI_STS0_ARB_IDLE, 1, POLL_TOUT, true);
797 WARN_ON(err);
798
799 nxp_fspi_select_mem(f, mem->spi);
800
801 nxp_fspi_prepare_lut(f, op);
802 /*
803 * If we have large chunks of data, we read them through the AHB bus
804 * by accessing the mapped memory. In all other cases we use
805 * IP commands to access the flash.
806 */
807 if (op->data.nbytes > (f->devtype_data->rxfifo - 4) &&
808 op->data.dir == SPI_MEM_DATA_IN) {
809 nxp_fspi_read_ahb(f, op);
810 } else {
811 if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
812 nxp_fspi_fill_txfifo(f, op);
813
814 err = nxp_fspi_do_op(f, op);
815 }
816
817 /* Invalidate the data in the AHB buffer. */
818 nxp_fspi_invalid(f);
819
820 mutex_unlock(&f->lock);
821
822 return err;
823}
824
825static int nxp_fspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
826{
827 struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master);
828
829 if (op->data.dir == SPI_MEM_DATA_OUT) {
830 if (op->data.nbytes > f->devtype_data->txfifo)
831 op->data.nbytes = f->devtype_data->txfifo;
832 } else {
833 if (op->data.nbytes > f->devtype_data->ahb_buf_size)
834 op->data.nbytes = f->devtype_data->ahb_buf_size;
835 else if (op->data.nbytes > (f->devtype_data->rxfifo - 4))
836 op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8);
837 }
838
839 return 0;
840}
841
842static int nxp_fspi_default_setup(struct nxp_fspi *f)
843{
844 void __iomem *base = f->iobase;
845 int ret, i;
846 u32 reg;
847
848 /* disable and unprepare clock to avoid glitch pass to controller */
849 nxp_fspi_clk_disable_unprep(f);
850
851 /* the default frequency, we will change it later if necessary. */
852 ret = clk_set_rate(f->clk, 20000000);
853 if (ret)
854 return ret;
855
856 ret = nxp_fspi_clk_prep_enable(f);
857 if (ret)
858 return ret;
859
860 /* Reset the module */
861 /* w1c register, wait unit clear */
862 ret = fspi_readl_poll_tout(f, f->iobase + FSPI_MCR0,
863 FSPI_MCR0_SWRST, 0, POLL_TOUT, false);
864 WARN_ON(ret);
865
866 /* Disable the module */
867 fspi_writel(f, FSPI_MCR0_MDIS, base + FSPI_MCR0);
868
869 /* Reset the DLL register to default value */
870 fspi_writel(f, FSPI_DLLACR_OVRDEN, base + FSPI_DLLACR);
871 fspi_writel(f, FSPI_DLLBCR_OVRDEN, base + FSPI_DLLBCR);
872
873 /* enable module */
874 fspi_writel(f, FSPI_MCR0_AHB_TIMEOUT(0xFF) | FSPI_MCR0_IP_TIMEOUT(0xFF),
875 base + FSPI_MCR0);
876
877 /*
878 * Disable same device enable bit and configure all slave devices
879 * independently.
880 */
881 reg = fspi_readl(f, f->iobase + FSPI_MCR2);
882 reg = reg & ~(FSPI_MCR2_SAMEDEVICEEN);
883 fspi_writel(f, reg, base + FSPI_MCR2);
884
885 /* AHB configuration for access buffer 0~7. */
886 for (i = 0; i < 7; i++)
887 fspi_writel(f, 0, base + FSPI_AHBRX_BUF0CR0 + 4 * i);
888
889 /*
890 * Set ADATSZ with the maximum AHB buffer size to improve the read
891 * performance.
892 */
893 fspi_writel(f, (f->devtype_data->ahb_buf_size / 8 |
894 FSPI_AHBRXBUF0CR7_PREF), base + FSPI_AHBRX_BUF7CR0);
895
896 /* prefetch and no start address alignment limitation */
897 fspi_writel(f, FSPI_AHBCR_PREF_EN | FSPI_AHBCR_RDADDROPT,
898 base + FSPI_AHBCR);
899
900 /* AHB Read - Set lut sequence ID for all CS. */
901 fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA1CR2);
902 fspi_writel(f, SEQID_LUT, base + FSPI_FLSHA2CR2);
903 fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB1CR2);
904 fspi_writel(f, SEQID_LUT, base + FSPI_FLSHB2CR2);
905
906 f->selected = -1;
907
908 /* enable the interrupt */
909 fspi_writel(f, FSPI_INTEN_IPCMDDONE, base + FSPI_INTEN);
910
911 return 0;
912}
913
914static const char *nxp_fspi_get_name(struct spi_mem *mem)
915{
916 struct nxp_fspi *f = spi_controller_get_devdata(mem->spi->master);
917 struct device *dev = &mem->spi->dev;
918 const char *name;
919
920 // Set custom name derived from the platform_device of the controller.
921 if (of_get_available_child_count(f->dev->of_node) == 1)
922 return dev_name(f->dev);
923
924 name = devm_kasprintf(dev, GFP_KERNEL,
925 "%s-%d", dev_name(f->dev),
926 mem->spi->chip_select);
927
928 if (!name) {
929 dev_err(dev, "failed to get memory for custom flash name\n");
930 return ERR_PTR(-ENOMEM);
931 }
932
933 return name;
934}
935
936static const struct spi_controller_mem_ops nxp_fspi_mem_ops = {
937 .adjust_op_size = nxp_fspi_adjust_op_size,
938 .supports_op = nxp_fspi_supports_op,
939 .exec_op = nxp_fspi_exec_op,
940 .get_name = nxp_fspi_get_name,
941};
942
943static int nxp_fspi_probe(struct platform_device *pdev)
944{
945 struct spi_controller *ctlr;
946 struct device *dev = &pdev->dev;
947 struct device_node *np = dev->of_node;
948 struct resource *res;
949 struct nxp_fspi *f;
950 int ret;
951
952 ctlr = spi_alloc_master(&pdev->dev, sizeof(*f));
953 if (!ctlr)
954 return -ENOMEM;
955
956 ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL |
957 SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL;
958
959 f = spi_controller_get_devdata(ctlr);
960 f->dev = dev;
961 f->devtype_data = of_device_get_match_data(dev);
962 if (!f->devtype_data) {
963 ret = -ENODEV;
964 goto err_put_ctrl;
965 }
966
967 platform_set_drvdata(pdev, f);
968
969 /* find the resources - configuration register address space */
970 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_base");
971 f->iobase = devm_ioremap_resource(dev, res);
972 if (IS_ERR(f->iobase)) {
973 ret = PTR_ERR(f->iobase);
974 goto err_put_ctrl;
975 }
976
977 /* find the resources - controller memory mapped space */
978 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_mmap");
979 f->ahb_addr = devm_ioremap_resource(dev, res);
980 if (IS_ERR(f->ahb_addr)) {
981 ret = PTR_ERR(f->ahb_addr);
982 goto err_put_ctrl;
983 }
984
985 /* assign memory mapped starting address and mapped size. */
986 f->memmap_phy = res->start;
987 f->memmap_phy_size = resource_size(res);
988
989 /* find the clocks */
990 f->clk_en = devm_clk_get(dev, "fspi_en");
991 if (IS_ERR(f->clk_en)) {
992 ret = PTR_ERR(f->clk_en);
993 goto err_put_ctrl;
994 }
995
996 f->clk = devm_clk_get(dev, "fspi");
997 if (IS_ERR(f->clk)) {
998 ret = PTR_ERR(f->clk);
999 goto err_put_ctrl;
1000 }
1001
1002 ret = nxp_fspi_clk_prep_enable(f);
1003 if (ret) {
1004 dev_err(dev, "can not enable the clock\n");
1005 goto err_put_ctrl;
1006 }
1007
1008 /* find the irq */
1009 ret = platform_get_irq(pdev, 0);
1010 if (ret < 0) {
1011 dev_err(dev, "failed to get the irq: %d\n", ret);
1012 goto err_disable_clk;
1013 }
1014
1015 ret = devm_request_irq(dev, ret,
1016 nxp_fspi_irq_handler, 0, pdev->name, f);
1017 if (ret) {
1018 dev_err(dev, "failed to request irq: %d\n", ret);
1019 goto err_disable_clk;
1020 }
1021
1022 mutex_init(&f->lock);
1023
1024 ctlr->bus_num = -1;
1025 ctlr->num_chipselect = NXP_FSPI_MAX_CHIPSELECT;
1026 ctlr->mem_ops = &nxp_fspi_mem_ops;
1027
1028 nxp_fspi_default_setup(f);
1029
1030 ctlr->dev.of_node = np;
1031
1032 ret = spi_register_controller(ctlr);
1033 if (ret)
1034 goto err_destroy_mutex;
1035
1036 return 0;
1037
1038err_destroy_mutex:
1039 mutex_destroy(&f->lock);
1040
1041err_disable_clk:
1042 nxp_fspi_clk_disable_unprep(f);
1043
1044err_put_ctrl:
1045 spi_controller_put(ctlr);
1046
1047 dev_err(dev, "NXP FSPI probe failed\n");
1048 return ret;
1049}
1050
1051static int nxp_fspi_remove(struct platform_device *pdev)
1052{
1053 struct nxp_fspi *f = platform_get_drvdata(pdev);
1054
1055 /* disable the hardware */
1056 fspi_writel(f, FSPI_MCR0_MDIS, f->iobase + FSPI_MCR0);
1057
1058 nxp_fspi_clk_disable_unprep(f);
1059
1060 mutex_destroy(&f->lock);
1061
1062 return 0;
1063}
1064
1065static int nxp_fspi_suspend(struct device *dev)
1066{
1067 return 0;
1068}
1069
1070static int nxp_fspi_resume(struct device *dev)
1071{
1072 struct nxp_fspi *f = dev_get_drvdata(dev);
1073
1074 nxp_fspi_default_setup(f);
1075
1076 return 0;
1077}
1078
1079static const struct of_device_id nxp_fspi_dt_ids[] = {
1080 { .compatible = "nxp,lx2160a-fspi", .data = (void *)&lx2160a_data, },
1081 { /* sentinel */ }
1082};
1083MODULE_DEVICE_TABLE(of, nxp_fspi_dt_ids);
1084
1085static const struct dev_pm_ops nxp_fspi_pm_ops = {
1086 .suspend = nxp_fspi_suspend,
1087 .resume = nxp_fspi_resume,
1088};
1089
1090static struct platform_driver nxp_fspi_driver = {
1091 .driver = {
1092 .name = "nxp-fspi",
1093 .of_match_table = nxp_fspi_dt_ids,
1094 .pm = &nxp_fspi_pm_ops,
1095 },
1096 .probe = nxp_fspi_probe,
1097 .remove = nxp_fspi_remove,
1098};
1099module_platform_driver(nxp_fspi_driver);
1100
1101MODULE_DESCRIPTION("NXP FSPI Controller Driver");
1102MODULE_AUTHOR("NXP Semiconductor");
1103MODULE_AUTHOR("Yogesh Narayan Gaur <yogeshnarayan.gaur@nxp.com>");
1104MODULE_AUTHOR("Boris Brezillon <bbrezillon@kernel.org>");
1105MODULE_AUTHOR("Frieder Schrempf <frieder.schrempf@kontron.de>");
1106MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 0c793e31d60f..26684178786f 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -253,6 +253,7 @@
253#define STATE_RUNNING ((void *) 1) 253#define STATE_RUNNING ((void *) 1)
254#define STATE_DONE ((void *) 2) 254#define STATE_DONE ((void *) 2)
255#define STATE_ERROR ((void *) -1) 255#define STATE_ERROR ((void *) -1)
256#define STATE_TIMEOUT ((void *) -2)
256 257
257/* 258/*
258 * SSP State - Whether Enabled or Disabled 259 * SSP State - Whether Enabled or Disabled
@@ -1484,6 +1485,30 @@ err_config_dma:
1484 writew(irqflags, SSP_IMSC(pl022->virtbase)); 1485 writew(irqflags, SSP_IMSC(pl022->virtbase));
1485} 1486}
1486 1487
1488static void print_current_status(struct pl022 *pl022)
1489{
1490 u32 read_cr0;
1491 u16 read_cr1, read_dmacr, read_sr;
1492
1493 if (pl022->vendor->extended_cr)
1494 read_cr0 = readl(SSP_CR0(pl022->virtbase));
1495 else
1496 read_cr0 = readw(SSP_CR0(pl022->virtbase));
1497 read_cr1 = readw(SSP_CR1(pl022->virtbase));
1498 read_dmacr = readw(SSP_DMACR(pl022->virtbase));
1499 read_sr = readw(SSP_SR(pl022->virtbase));
1500
1501 dev_warn(&pl022->adev->dev, "spi-pl022 CR0: %x\n", read_cr0);
1502 dev_warn(&pl022->adev->dev, "spi-pl022 CR1: %x\n", read_cr1);
1503 dev_warn(&pl022->adev->dev, "spi-pl022 DMACR: %x\n", read_dmacr);
1504 dev_warn(&pl022->adev->dev, "spi-pl022 SR: %x\n", read_sr);
1505 dev_warn(&pl022->adev->dev,
1506 "spi-pl022 exp_fifo_level/fifodepth: %u/%d\n",
1507 pl022->exp_fifo_level,
1508 pl022->vendor->fifodepth);
1509
1510}
1511
1487static void do_polling_transfer(struct pl022 *pl022) 1512static void do_polling_transfer(struct pl022 *pl022)
1488{ 1513{
1489 struct spi_message *message = NULL; 1514 struct spi_message *message = NULL;
@@ -1535,7 +1560,8 @@ static void do_polling_transfer(struct pl022 *pl022)
1535 if (time_after(time, timeout)) { 1560 if (time_after(time, timeout)) {
1536 dev_warn(&pl022->adev->dev, 1561 dev_warn(&pl022->adev->dev,
1537 "%s: timeout!\n", __func__); 1562 "%s: timeout!\n", __func__);
1538 message->state = STATE_ERROR; 1563 message->state = STATE_TIMEOUT;
1564 print_current_status(pl022);
1539 goto out; 1565 goto out;
1540 } 1566 }
1541 cpu_relax(); 1567 cpu_relax();
@@ -1553,6 +1579,8 @@ out:
1553 /* Handle end of message */ 1579 /* Handle end of message */
1554 if (message->state == STATE_DONE) 1580 if (message->state == STATE_DONE)
1555 message->status = 0; 1581 message->status = 0;
1582 else if (message->state == STATE_TIMEOUT)
1583 message->status = -EAGAIN;
1556 else 1584 else
1557 message->status = -EIO; 1585 message->status = -EIO;
1558 1586
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 2fa7f4b43492..15592598273e 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -23,7 +23,7 @@
23static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, 23static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
24 bool error) 24 bool error)
25{ 25{
26 struct spi_message *msg = drv_data->master->cur_msg; 26 struct spi_message *msg = drv_data->controller->cur_msg;
27 27
28 /* 28 /*
29 * It is possible that one CPU is handling ROR interrupt and other 29 * It is possible that one CPU is handling ROR interrupt and other
@@ -59,7 +59,7 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
59 msg->status = -EIO; 59 msg->status = -EIO;
60 } 60 }
61 61
62 spi_finalize_current_transfer(drv_data->master); 62 spi_finalize_current_transfer(drv_data->controller);
63 } 63 }
64} 64}
65 65
@@ -74,7 +74,7 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
74 struct spi_transfer *xfer) 74 struct spi_transfer *xfer)
75{ 75{
76 struct chip_data *chip = 76 struct chip_data *chip =
77 spi_get_ctldata(drv_data->master->cur_msg->spi); 77 spi_get_ctldata(drv_data->controller->cur_msg->spi);
78 enum dma_slave_buswidth width; 78 enum dma_slave_buswidth width;
79 struct dma_slave_config cfg; 79 struct dma_slave_config cfg;
80 struct dma_chan *chan; 80 struct dma_chan *chan;
@@ -102,14 +102,14 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
102 cfg.dst_maxburst = chip->dma_burst_size; 102 cfg.dst_maxburst = chip->dma_burst_size;
103 103
104 sgt = &xfer->tx_sg; 104 sgt = &xfer->tx_sg;
105 chan = drv_data->master->dma_tx; 105 chan = drv_data->controller->dma_tx;
106 } else { 106 } else {
107 cfg.src_addr = drv_data->ssdr_physical; 107 cfg.src_addr = drv_data->ssdr_physical;
108 cfg.src_addr_width = width; 108 cfg.src_addr_width = width;
109 cfg.src_maxburst = chip->dma_burst_size; 109 cfg.src_maxburst = chip->dma_burst_size;
110 110
111 sgt = &xfer->rx_sg; 111 sgt = &xfer->rx_sg;
112 chan = drv_data->master->dma_rx; 112 chan = drv_data->controller->dma_rx;
113 } 113 }
114 114
115 ret = dmaengine_slave_config(chan, &cfg); 115 ret = dmaengine_slave_config(chan, &cfg);
@@ -130,8 +130,8 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
130 if (status & SSSR_ROR) { 130 if (status & SSSR_ROR) {
131 dev_err(&drv_data->pdev->dev, "FIFO overrun\n"); 131 dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
132 132
133 dmaengine_terminate_async(drv_data->master->dma_rx); 133 dmaengine_terminate_async(drv_data->controller->dma_rx);
134 dmaengine_terminate_async(drv_data->master->dma_tx); 134 dmaengine_terminate_async(drv_data->controller->dma_tx);
135 135
136 pxa2xx_spi_dma_transfer_complete(drv_data, true); 136 pxa2xx_spi_dma_transfer_complete(drv_data, true);
137 return IRQ_HANDLED; 137 return IRQ_HANDLED;
@@ -171,15 +171,15 @@ int pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
171 return 0; 171 return 0;
172 172
173err_rx: 173err_rx:
174 dmaengine_terminate_async(drv_data->master->dma_tx); 174 dmaengine_terminate_async(drv_data->controller->dma_tx);
175err_tx: 175err_tx:
176 return err; 176 return err;
177} 177}
178 178
179void pxa2xx_spi_dma_start(struct driver_data *drv_data) 179void pxa2xx_spi_dma_start(struct driver_data *drv_data)
180{ 180{
181 dma_async_issue_pending(drv_data->master->dma_rx); 181 dma_async_issue_pending(drv_data->controller->dma_rx);
182 dma_async_issue_pending(drv_data->master->dma_tx); 182 dma_async_issue_pending(drv_data->controller->dma_tx);
183 183
184 atomic_set(&drv_data->dma_running, 1); 184 atomic_set(&drv_data->dma_running, 1);
185} 185}
@@ -187,30 +187,30 @@ void pxa2xx_spi_dma_start(struct driver_data *drv_data)
187void pxa2xx_spi_dma_stop(struct driver_data *drv_data) 187void pxa2xx_spi_dma_stop(struct driver_data *drv_data)
188{ 188{
189 atomic_set(&drv_data->dma_running, 0); 189 atomic_set(&drv_data->dma_running, 0);
190 dmaengine_terminate_sync(drv_data->master->dma_rx); 190 dmaengine_terminate_sync(drv_data->controller->dma_rx);
191 dmaengine_terminate_sync(drv_data->master->dma_tx); 191 dmaengine_terminate_sync(drv_data->controller->dma_tx);
192} 192}
193 193
194int pxa2xx_spi_dma_setup(struct driver_data *drv_data) 194int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
195{ 195{
196 struct pxa2xx_spi_master *pdata = drv_data->master_info; 196 struct pxa2xx_spi_controller *pdata = drv_data->controller_info;
197 struct device *dev = &drv_data->pdev->dev; 197 struct device *dev = &drv_data->pdev->dev;
198 struct spi_controller *master = drv_data->master; 198 struct spi_controller *controller = drv_data->controller;
199 dma_cap_mask_t mask; 199 dma_cap_mask_t mask;
200 200
201 dma_cap_zero(mask); 201 dma_cap_zero(mask);
202 dma_cap_set(DMA_SLAVE, mask); 202 dma_cap_set(DMA_SLAVE, mask);
203 203
204 master->dma_tx = dma_request_slave_channel_compat(mask, 204 controller->dma_tx = dma_request_slave_channel_compat(mask,
205 pdata->dma_filter, pdata->tx_param, dev, "tx"); 205 pdata->dma_filter, pdata->tx_param, dev, "tx");
206 if (!master->dma_tx) 206 if (!controller->dma_tx)
207 return -ENODEV; 207 return -ENODEV;
208 208
209 master->dma_rx = dma_request_slave_channel_compat(mask, 209 controller->dma_rx = dma_request_slave_channel_compat(mask,
210 pdata->dma_filter, pdata->rx_param, dev, "rx"); 210 pdata->dma_filter, pdata->rx_param, dev, "rx");
211 if (!master->dma_rx) { 211 if (!controller->dma_rx) {
212 dma_release_channel(master->dma_tx); 212 dma_release_channel(controller->dma_tx);
213 master->dma_tx = NULL; 213 controller->dma_tx = NULL;
214 return -ENODEV; 214 return -ENODEV;
215 } 215 }
216 216
@@ -219,17 +219,17 @@ int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
219 219
220void pxa2xx_spi_dma_release(struct driver_data *drv_data) 220void pxa2xx_spi_dma_release(struct driver_data *drv_data)
221{ 221{
222 struct spi_controller *master = drv_data->master; 222 struct spi_controller *controller = drv_data->controller;
223 223
224 if (master->dma_rx) { 224 if (controller->dma_rx) {
225 dmaengine_terminate_sync(master->dma_rx); 225 dmaengine_terminate_sync(controller->dma_rx);
226 dma_release_channel(master->dma_rx); 226 dma_release_channel(controller->dma_rx);
227 master->dma_rx = NULL; 227 controller->dma_rx = NULL;
228 } 228 }
229 if (master->dma_tx) { 229 if (controller->dma_tx) {
230 dmaengine_terminate_sync(master->dma_tx); 230 dmaengine_terminate_sync(controller->dma_tx);
231 dma_release_channel(master->dma_tx); 231 dma_release_channel(controller->dma_tx);
232 master->dma_tx = NULL; 232 controller->dma_tx = NULL;
233 } 233 }
234} 234}
235 235
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 869f188b02eb..1727fdfbac28 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -197,7 +197,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
197 struct platform_device_info pi; 197 struct platform_device_info pi;
198 int ret; 198 int ret;
199 struct platform_device *pdev; 199 struct platform_device *pdev;
200 struct pxa2xx_spi_master spi_pdata; 200 struct pxa2xx_spi_controller spi_pdata;
201 struct ssp_device *ssp; 201 struct ssp_device *ssp;
202 struct pxa_spi_info *c; 202 struct pxa_spi_info *c;
203 char buf[40]; 203 char buf[40];
@@ -265,7 +265,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
265static void pxa2xx_spi_pci_remove(struct pci_dev *dev) 265static void pxa2xx_spi_pci_remove(struct pci_dev *dev)
266{ 266{
267 struct platform_device *pdev = pci_get_drvdata(dev); 267 struct platform_device *pdev = pci_get_drvdata(dev);
268 struct pxa2xx_spi_master *spi_pdata; 268 struct pxa2xx_spi_controller *spi_pdata;
269 269
270 spi_pdata = dev_get_platdata(&pdev->dev); 270 spi_pdata = dev_get_platdata(&pdev->dev);
271 271
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 3e82eaad0f2d..b6ddba833d02 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -328,7 +328,7 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
328 __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value); 328 __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
329 329
330 /* Enable multiblock DMA transfers */ 330 /* Enable multiblock DMA transfers */
331 if (drv_data->master_info->enable_dma) { 331 if (drv_data->controller_info->enable_dma) {
332 __lpss_ssp_write_priv(drv_data, config->reg_ssp, 1); 332 __lpss_ssp_write_priv(drv_data, config->reg_ssp, 1);
333 333
334 if (config->reg_general >= 0) { 334 if (config->reg_general >= 0) {
@@ -368,7 +368,7 @@ static void lpss_ssp_select_cs(struct spi_device *spi,
368 __lpss_ssp_write_priv(drv_data, 368 __lpss_ssp_write_priv(drv_data,
369 config->reg_cs_ctrl, value); 369 config->reg_cs_ctrl, value);
370 ndelay(1000000000 / 370 ndelay(1000000000 /
371 (drv_data->master->max_speed_hz / 2)); 371 (drv_data->controller->max_speed_hz / 2));
372 } 372 }
373} 373}
374 374
@@ -567,7 +567,7 @@ static int u32_reader(struct driver_data *drv_data)
567static void reset_sccr1(struct driver_data *drv_data) 567static void reset_sccr1(struct driver_data *drv_data)
568{ 568{
569 struct chip_data *chip = 569 struct chip_data *chip =
570 spi_get_ctldata(drv_data->master->cur_msg->spi); 570 spi_get_ctldata(drv_data->controller->cur_msg->spi);
571 u32 sccr1_reg; 571 u32 sccr1_reg;
572 572
573 sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1; 573 sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
@@ -599,8 +599,8 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
599 599
600 dev_err(&drv_data->pdev->dev, "%s\n", msg); 600 dev_err(&drv_data->pdev->dev, "%s\n", msg);
601 601
602 drv_data->master->cur_msg->status = -EIO; 602 drv_data->controller->cur_msg->status = -EIO;
603 spi_finalize_current_transfer(drv_data->master); 603 spi_finalize_current_transfer(drv_data->controller);
604} 604}
605 605
606static void int_transfer_complete(struct driver_data *drv_data) 606static void int_transfer_complete(struct driver_data *drv_data)
@@ -611,7 +611,7 @@ static void int_transfer_complete(struct driver_data *drv_data)
611 if (!pxa25x_ssp_comp(drv_data)) 611 if (!pxa25x_ssp_comp(drv_data))
612 pxa2xx_spi_write(drv_data, SSTO, 0); 612 pxa2xx_spi_write(drv_data, SSTO, 0);
613 613
614 spi_finalize_current_transfer(drv_data->master); 614 spi_finalize_current_transfer(drv_data->controller);
615} 615}
616 616
617static irqreturn_t interrupt_transfer(struct driver_data *drv_data) 617static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
@@ -747,7 +747,7 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
747 pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg & ~drv_data->int_cr1); 747 pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg & ~drv_data->int_cr1);
748 pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg); 748 pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
749 749
750 if (!drv_data->master->cur_msg) { 750 if (!drv_data->controller->cur_msg) {
751 handle_bad_msg(drv_data); 751 handle_bad_msg(drv_data);
752 /* Never fail */ 752 /* Never fail */
753 return IRQ_HANDLED; 753 return IRQ_HANDLED;
@@ -879,7 +879,7 @@ static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds)
879 879
880static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) 880static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
881{ 881{
882 unsigned long ssp_clk = drv_data->master->max_speed_hz; 882 unsigned long ssp_clk = drv_data->controller->max_speed_hz;
883 const struct ssp_device *ssp = drv_data->ssp; 883 const struct ssp_device *ssp = drv_data->ssp;
884 884
885 rate = min_t(int, ssp_clk, rate); 885 rate = min_t(int, ssp_clk, rate);
@@ -894,7 +894,7 @@ static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
894 int rate) 894 int rate)
895{ 895{
896 struct chip_data *chip = 896 struct chip_data *chip =
897 spi_get_ctldata(drv_data->master->cur_msg->spi); 897 spi_get_ctldata(drv_data->controller->cur_msg->spi);
898 unsigned int clk_div; 898 unsigned int clk_div;
899 899
900 switch (drv_data->ssp_type) { 900 switch (drv_data->ssp_type) {
@@ -908,7 +908,7 @@ static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
908 return clk_div << 8; 908 return clk_div << 8;
909} 909}
910 910
911static bool pxa2xx_spi_can_dma(struct spi_controller *master, 911static bool pxa2xx_spi_can_dma(struct spi_controller *controller,
912 struct spi_device *spi, 912 struct spi_device *spi,
913 struct spi_transfer *xfer) 913 struct spi_transfer *xfer)
914{ 914{
@@ -919,12 +919,12 @@ static bool pxa2xx_spi_can_dma(struct spi_controller *master,
919 xfer->len >= chip->dma_burst_size; 919 xfer->len >= chip->dma_burst_size;
920} 920}
921 921
922static int pxa2xx_spi_transfer_one(struct spi_controller *master, 922static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
923 struct spi_device *spi, 923 struct spi_device *spi,
924 struct spi_transfer *transfer) 924 struct spi_transfer *transfer)
925{ 925{
926 struct driver_data *drv_data = spi_controller_get_devdata(master); 926 struct driver_data *drv_data = spi_controller_get_devdata(controller);
927 struct spi_message *message = master->cur_msg; 927 struct spi_message *message = controller->cur_msg;
928 struct chip_data *chip = spi_get_ctldata(message->spi); 928 struct chip_data *chip = spi_get_ctldata(message->spi);
929 u32 dma_thresh = chip->dma_threshold; 929 u32 dma_thresh = chip->dma_threshold;
930 u32 dma_burst = chip->dma_burst_size; 930 u32 dma_burst = chip->dma_burst_size;
@@ -1006,9 +1006,9 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master,
1006 "DMA burst size reduced to match bits_per_word\n"); 1006 "DMA burst size reduced to match bits_per_word\n");
1007 } 1007 }
1008 1008
1009 dma_mapped = master->can_dma && 1009 dma_mapped = controller->can_dma &&
1010 master->can_dma(master, message->spi, transfer) && 1010 controller->can_dma(controller, message->spi, transfer) &&
1011 master->cur_msg_mapped; 1011 controller->cur_msg_mapped;
1012 if (dma_mapped) { 1012 if (dma_mapped) {
1013 1013
1014 /* Ensure we have the correct interrupt handler */ 1014 /* Ensure we have the correct interrupt handler */
@@ -1036,12 +1036,12 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master,
1036 cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits); 1036 cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
1037 if (!pxa25x_ssp_comp(drv_data)) 1037 if (!pxa25x_ssp_comp(drv_data))
1038 dev_dbg(&message->spi->dev, "%u Hz actual, %s\n", 1038 dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
1039 master->max_speed_hz 1039 controller->max_speed_hz
1040 / (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)), 1040 / (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
1041 dma_mapped ? "DMA" : "PIO"); 1041 dma_mapped ? "DMA" : "PIO");
1042 else 1042 else
1043 dev_dbg(&message->spi->dev, "%u Hz actual, %s\n", 1043 dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
1044 master->max_speed_hz / 2 1044 controller->max_speed_hz / 2
1045 / (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)), 1045 / (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
1046 dma_mapped ? "DMA" : "PIO"); 1046 dma_mapped ? "DMA" : "PIO");
1047 1047
@@ -1092,7 +1092,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master,
1092 } 1092 }
1093 } 1093 }
1094 1094
1095 if (spi_controller_is_slave(master)) { 1095 if (spi_controller_is_slave(controller)) {
1096 while (drv_data->write(drv_data)) 1096 while (drv_data->write(drv_data))
1097 ; 1097 ;
1098 if (drv_data->gpiod_ready) { 1098 if (drv_data->gpiod_ready) {
@@ -1111,9 +1111,9 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master,
1111 return 1; 1111 return 1;
1112} 1112}
1113 1113
1114static int pxa2xx_spi_slave_abort(struct spi_master *master) 1114static int pxa2xx_spi_slave_abort(struct spi_controller *controller)
1115{ 1115{
1116 struct driver_data *drv_data = spi_controller_get_devdata(master); 1116 struct driver_data *drv_data = spi_controller_get_devdata(controller);
1117 1117
1118 /* Stop and reset SSP */ 1118 /* Stop and reset SSP */
1119 write_SSSR_CS(drv_data, drv_data->clear_sr); 1119 write_SSSR_CS(drv_data, drv_data->clear_sr);
@@ -1126,16 +1126,16 @@ static int pxa2xx_spi_slave_abort(struct spi_master *master)
1126 1126
1127 dev_dbg(&drv_data->pdev->dev, "transfer aborted\n"); 1127 dev_dbg(&drv_data->pdev->dev, "transfer aborted\n");
1128 1128
1129 drv_data->master->cur_msg->status = -EINTR; 1129 drv_data->controller->cur_msg->status = -EINTR;
1130 spi_finalize_current_transfer(drv_data->master); 1130 spi_finalize_current_transfer(drv_data->controller);
1131 1131
1132 return 0; 1132 return 0;
1133} 1133}
1134 1134
1135static void pxa2xx_spi_handle_err(struct spi_controller *master, 1135static void pxa2xx_spi_handle_err(struct spi_controller *controller,
1136 struct spi_message *msg) 1136 struct spi_message *msg)
1137{ 1137{
1138 struct driver_data *drv_data = spi_controller_get_devdata(master); 1138 struct driver_data *drv_data = spi_controller_get_devdata(controller);
1139 1139
1140 /* Disable the SSP */ 1140 /* Disable the SSP */
1141 pxa2xx_spi_write(drv_data, SSCR0, 1141 pxa2xx_spi_write(drv_data, SSCR0,
@@ -1159,9 +1159,9 @@ static void pxa2xx_spi_handle_err(struct spi_controller *master,
1159 pxa2xx_spi_dma_stop(drv_data); 1159 pxa2xx_spi_dma_stop(drv_data);
1160} 1160}
1161 1161
1162static int pxa2xx_spi_unprepare_transfer(struct spi_controller *master) 1162static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller)
1163{ 1163{
1164 struct driver_data *drv_data = spi_controller_get_devdata(master); 1164 struct driver_data *drv_data = spi_controller_get_devdata(controller);
1165 1165
1166 /* Disable the SSP now */ 1166 /* Disable the SSP now */
1167 pxa2xx_spi_write(drv_data, SSCR0, 1167 pxa2xx_spi_write(drv_data, SSCR0,
@@ -1260,7 +1260,7 @@ static int setup(struct spi_device *spi)
1260 break; 1260 break;
1261 default: 1261 default:
1262 tx_hi_thres = 0; 1262 tx_hi_thres = 0;
1263 if (spi_controller_is_slave(drv_data->master)) { 1263 if (spi_controller_is_slave(drv_data->controller)) {
1264 tx_thres = 1; 1264 tx_thres = 1;
1265 rx_thres = 2; 1265 rx_thres = 2;
1266 } else { 1266 } else {
@@ -1287,7 +1287,7 @@ static int setup(struct spi_device *spi)
1287 1287
1288 chip->frm = spi->chip_select; 1288 chip->frm = spi->chip_select;
1289 } 1289 }
1290 chip->enable_dma = drv_data->master_info->enable_dma; 1290 chip->enable_dma = drv_data->controller_info->enable_dma;
1291 chip->timeout = TIMOUT_DFLT; 1291 chip->timeout = TIMOUT_DFLT;
1292 } 1292 }
1293 1293
@@ -1310,7 +1310,7 @@ static int setup(struct spi_device *spi)
1310 if (chip_info->enable_loopback) 1310 if (chip_info->enable_loopback)
1311 chip->cr1 = SSCR1_LBM; 1311 chip->cr1 = SSCR1_LBM;
1312 } 1312 }
1313 if (spi_controller_is_slave(drv_data->master)) { 1313 if (spi_controller_is_slave(drv_data->controller)) {
1314 chip->cr1 |= SSCR1_SCFR; 1314 chip->cr1 |= SSCR1_SCFR;
1315 chip->cr1 |= SSCR1_SCLKDIR; 1315 chip->cr1 |= SSCR1_SCLKDIR;
1316 chip->cr1 |= SSCR1_SFRMDIR; 1316 chip->cr1 |= SSCR1_SFRMDIR;
@@ -1497,10 +1497,10 @@ static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
1497 1497
1498#endif /* CONFIG_PCI */ 1498#endif /* CONFIG_PCI */
1499 1499
1500static struct pxa2xx_spi_master * 1500static struct pxa2xx_spi_controller *
1501pxa2xx_spi_init_pdata(struct platform_device *pdev) 1501pxa2xx_spi_init_pdata(struct platform_device *pdev)
1502{ 1502{
1503 struct pxa2xx_spi_master *pdata; 1503 struct pxa2xx_spi_controller *pdata;
1504 struct acpi_device *adev; 1504 struct acpi_device *adev;
1505 struct ssp_device *ssp; 1505 struct ssp_device *ssp;
1506 struct resource *res; 1506 struct resource *res;
@@ -1568,10 +1568,10 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
1568 return pdata; 1568 return pdata;
1569} 1569}
1570 1570
1571static int pxa2xx_spi_fw_translate_cs(struct spi_controller *master, 1571static int pxa2xx_spi_fw_translate_cs(struct spi_controller *controller,
1572 unsigned int cs) 1572 unsigned int cs)
1573{ 1573{
1574 struct driver_data *drv_data = spi_controller_get_devdata(master); 1574 struct driver_data *drv_data = spi_controller_get_devdata(controller);
1575 1575
1576 if (has_acpi_companion(&drv_data->pdev->dev)) { 1576 if (has_acpi_companion(&drv_data->pdev->dev)) {
1577 switch (drv_data->ssp_type) { 1577 switch (drv_data->ssp_type) {
@@ -1595,8 +1595,8 @@ static int pxa2xx_spi_fw_translate_cs(struct spi_controller *master,
1595static int pxa2xx_spi_probe(struct platform_device *pdev) 1595static int pxa2xx_spi_probe(struct platform_device *pdev)
1596{ 1596{
1597 struct device *dev = &pdev->dev; 1597 struct device *dev = &pdev->dev;
1598 struct pxa2xx_spi_master *platform_info; 1598 struct pxa2xx_spi_controller *platform_info;
1599 struct spi_controller *master; 1599 struct spi_controller *controller;
1600 struct driver_data *drv_data; 1600 struct driver_data *drv_data;
1601 struct ssp_device *ssp; 1601 struct ssp_device *ssp;
1602 const struct lpss_config *config; 1602 const struct lpss_config *config;
@@ -1622,37 +1622,37 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1622 } 1622 }
1623 1623
1624 if (platform_info->is_slave) 1624 if (platform_info->is_slave)
1625 master = spi_alloc_slave(dev, sizeof(struct driver_data)); 1625 controller = spi_alloc_slave(dev, sizeof(struct driver_data));
1626 else 1626 else
1627 master = spi_alloc_master(dev, sizeof(struct driver_data)); 1627 controller = spi_alloc_master(dev, sizeof(struct driver_data));
1628 1628
1629 if (!master) { 1629 if (!controller) {
1630 dev_err(&pdev->dev, "cannot alloc spi_master\n"); 1630 dev_err(&pdev->dev, "cannot alloc spi_controller\n");
1631 pxa_ssp_free(ssp); 1631 pxa_ssp_free(ssp);
1632 return -ENOMEM; 1632 return -ENOMEM;
1633 } 1633 }
1634 drv_data = spi_controller_get_devdata(master); 1634 drv_data = spi_controller_get_devdata(controller);
1635 drv_data->master = master; 1635 drv_data->controller = controller;
1636 drv_data->master_info = platform_info; 1636 drv_data->controller_info = platform_info;
1637 drv_data->pdev = pdev; 1637 drv_data->pdev = pdev;
1638 drv_data->ssp = ssp; 1638 drv_data->ssp = ssp;
1639 1639
1640 master->dev.of_node = pdev->dev.of_node; 1640 controller->dev.of_node = pdev->dev.of_node;
1641 /* the spi->mode bits understood by this driver: */ 1641 /* the spi->mode bits understood by this driver: */
1642 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; 1642 controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
1643 1643
1644 master->bus_num = ssp->port_id; 1644 controller->bus_num = ssp->port_id;
1645 master->dma_alignment = DMA_ALIGNMENT; 1645 controller->dma_alignment = DMA_ALIGNMENT;
1646 master->cleanup = cleanup; 1646 controller->cleanup = cleanup;
1647 master->setup = setup; 1647 controller->setup = setup;
1648 master->set_cs = pxa2xx_spi_set_cs; 1648 controller->set_cs = pxa2xx_spi_set_cs;
1649 master->transfer_one = pxa2xx_spi_transfer_one; 1649 controller->transfer_one = pxa2xx_spi_transfer_one;
1650 master->slave_abort = pxa2xx_spi_slave_abort; 1650 controller->slave_abort = pxa2xx_spi_slave_abort;
1651 master->handle_err = pxa2xx_spi_handle_err; 1651 controller->handle_err = pxa2xx_spi_handle_err;
1652 master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer; 1652 controller->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
1653 master->fw_translate_cs = pxa2xx_spi_fw_translate_cs; 1653 controller->fw_translate_cs = pxa2xx_spi_fw_translate_cs;
1654 master->auto_runtime_pm = true; 1654 controller->auto_runtime_pm = true;
1655 master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; 1655 controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
1656 1656
1657 drv_data->ssp_type = ssp->type; 1657 drv_data->ssp_type = ssp->type;
1658 1658
@@ -1661,10 +1661,10 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1661 if (pxa25x_ssp_comp(drv_data)) { 1661 if (pxa25x_ssp_comp(drv_data)) {
1662 switch (drv_data->ssp_type) { 1662 switch (drv_data->ssp_type) {
1663 case QUARK_X1000_SSP: 1663 case QUARK_X1000_SSP:
1664 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); 1664 controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1665 break; 1665 break;
1666 default: 1666 default:
1667 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16); 1667 controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
1668 break; 1668 break;
1669 } 1669 }
1670 1670
@@ -1673,7 +1673,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1673 drv_data->clear_sr = SSSR_ROR; 1673 drv_data->clear_sr = SSSR_ROR;
1674 drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR; 1674 drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
1675 } else { 1675 } else {
1676 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); 1676 controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1677 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE; 1677 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
1678 drv_data->dma_cr1 = DEFAULT_DMA_CR1; 1678 drv_data->dma_cr1 = DEFAULT_DMA_CR1;
1679 drv_data->clear_sr = SSSR_ROR | SSSR_TINT; 1679 drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
@@ -1685,7 +1685,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1685 drv_data); 1685 drv_data);
1686 if (status < 0) { 1686 if (status < 0) {
1687 dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq); 1687 dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq);
1688 goto out_error_master_alloc; 1688 goto out_error_controller_alloc;
1689 } 1689 }
1690 1690
1691 /* Setup DMA if requested */ 1691 /* Setup DMA if requested */
@@ -1695,8 +1695,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1695 dev_dbg(dev, "no DMA channels available, using PIO\n"); 1695 dev_dbg(dev, "no DMA channels available, using PIO\n");
1696 platform_info->enable_dma = false; 1696 platform_info->enable_dma = false;
1697 } else { 1697 } else {
1698 master->can_dma = pxa2xx_spi_can_dma; 1698 controller->can_dma = pxa2xx_spi_can_dma;
1699 master->max_dma_len = MAX_DMA_LEN; 1699 controller->max_dma_len = MAX_DMA_LEN;
1700 } 1700 }
1701 } 1701 }
1702 1702
@@ -1705,7 +1705,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1705 if (status) 1705 if (status)
1706 goto out_error_dma_irq_alloc; 1706 goto out_error_dma_irq_alloc;
1707 1707
1708 master->max_speed_hz = clk_get_rate(ssp->clk); 1708 controller->max_speed_hz = clk_get_rate(ssp->clk);
1709 1709
1710 /* Load default SSP configuration */ 1710 /* Load default SSP configuration */
1711 pxa2xx_spi_write(drv_data, SSCR0, 0); 1711 pxa2xx_spi_write(drv_data, SSCR0, 0);
@@ -1728,7 +1728,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1728 break; 1728 break;
1729 default: 1729 default:
1730 1730
1731 if (spi_controller_is_slave(master)) { 1731 if (spi_controller_is_slave(controller)) {
1732 tmp = SSCR1_SCFR | 1732 tmp = SSCR1_SCFR |
1733 SSCR1_SCLKDIR | 1733 SSCR1_SCLKDIR |
1734 SSCR1_SFRMDIR | 1734 SSCR1_SFRMDIR |
@@ -1741,7 +1741,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1741 } 1741 }
1742 pxa2xx_spi_write(drv_data, SSCR1, tmp); 1742 pxa2xx_spi_write(drv_data, SSCR1, tmp);
1743 tmp = SSCR0_Motorola | SSCR0_DataSize(8); 1743 tmp = SSCR0_Motorola | SSCR0_DataSize(8);
1744 if (!spi_controller_is_slave(master)) 1744 if (!spi_controller_is_slave(controller))
1745 tmp |= SSCR0_SCR(2); 1745 tmp |= SSCR0_SCR(2);
1746 pxa2xx_spi_write(drv_data, SSCR0, tmp); 1746 pxa2xx_spi_write(drv_data, SSCR0, tmp);
1747 break; 1747 break;
@@ -1766,24 +1766,24 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1766 platform_info->num_chipselect = config->cs_num; 1766 platform_info->num_chipselect = config->cs_num;
1767 } 1767 }
1768 } 1768 }
1769 master->num_chipselect = platform_info->num_chipselect; 1769 controller->num_chipselect = platform_info->num_chipselect;
1770 1770
1771 count = gpiod_count(&pdev->dev, "cs"); 1771 count = gpiod_count(&pdev->dev, "cs");
1772 if (count > 0) { 1772 if (count > 0) {
1773 int i; 1773 int i;
1774 1774
1775 master->num_chipselect = max_t(int, count, 1775 controller->num_chipselect = max_t(int, count,
1776 master->num_chipselect); 1776 controller->num_chipselect);
1777 1777
1778 drv_data->cs_gpiods = devm_kcalloc(&pdev->dev, 1778 drv_data->cs_gpiods = devm_kcalloc(&pdev->dev,
1779 master->num_chipselect, sizeof(struct gpio_desc *), 1779 controller->num_chipselect, sizeof(struct gpio_desc *),
1780 GFP_KERNEL); 1780 GFP_KERNEL);
1781 if (!drv_data->cs_gpiods) { 1781 if (!drv_data->cs_gpiods) {
1782 status = -ENOMEM; 1782 status = -ENOMEM;
1783 goto out_error_clock_enabled; 1783 goto out_error_clock_enabled;
1784 } 1784 }
1785 1785
1786 for (i = 0; i < master->num_chipselect; i++) { 1786 for (i = 0; i < controller->num_chipselect; i++) {
1787 struct gpio_desc *gpiod; 1787 struct gpio_desc *gpiod;
1788 1788
1789 gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS); 1789 gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
@@ -1816,9 +1816,9 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1816 1816
1817 /* Register with the SPI framework */ 1817 /* Register with the SPI framework */
1818 platform_set_drvdata(pdev, drv_data); 1818 platform_set_drvdata(pdev, drv_data);
1819 status = devm_spi_register_controller(&pdev->dev, master); 1819 status = devm_spi_register_controller(&pdev->dev, controller);
1820 if (status != 0) { 1820 if (status != 0) {
1821 dev_err(&pdev->dev, "problem registering spi master\n"); 1821 dev_err(&pdev->dev, "problem registering spi controller\n");
1822 goto out_error_clock_enabled; 1822 goto out_error_clock_enabled;
1823 } 1823 }
1824 1824
@@ -1833,8 +1833,8 @@ out_error_dma_irq_alloc:
1833 pxa2xx_spi_dma_release(drv_data); 1833 pxa2xx_spi_dma_release(drv_data);
1834 free_irq(ssp->irq, drv_data); 1834 free_irq(ssp->irq, drv_data);
1835 1835
1836out_error_master_alloc: 1836out_error_controller_alloc:
1837 spi_controller_put(master); 1837 spi_controller_put(controller);
1838 pxa_ssp_free(ssp); 1838 pxa_ssp_free(ssp);
1839 return status; 1839 return status;
1840} 1840}
@@ -1855,7 +1855,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
1855 clk_disable_unprepare(ssp->clk); 1855 clk_disable_unprepare(ssp->clk);
1856 1856
1857 /* Release DMA */ 1857 /* Release DMA */
1858 if (drv_data->master_info->enable_dma) 1858 if (drv_data->controller_info->enable_dma)
1859 pxa2xx_spi_dma_release(drv_data); 1859 pxa2xx_spi_dma_release(drv_data);
1860 1860
1861 pm_runtime_put_noidle(&pdev->dev); 1861 pm_runtime_put_noidle(&pdev->dev);
@@ -1877,7 +1877,7 @@ static int pxa2xx_spi_suspend(struct device *dev)
1877 struct ssp_device *ssp = drv_data->ssp; 1877 struct ssp_device *ssp = drv_data->ssp;
1878 int status; 1878 int status;
1879 1879
1880 status = spi_controller_suspend(drv_data->master); 1880 status = spi_controller_suspend(drv_data->controller);
1881 if (status != 0) 1881 if (status != 0)
1882 return status; 1882 return status;
1883 pxa2xx_spi_write(drv_data, SSCR0, 0); 1883 pxa2xx_spi_write(drv_data, SSCR0, 0);
@@ -1902,7 +1902,7 @@ static int pxa2xx_spi_resume(struct device *dev)
1902 } 1902 }
1903 1903
1904 /* Start the queue running */ 1904 /* Start the queue running */
1905 return spi_controller_resume(drv_data->master); 1905 return spi_controller_resume(drv_data->controller);
1906} 1906}
1907#endif 1907#endif
1908 1908
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 4e324da66ef7..aba777b4502d 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -31,10 +31,10 @@ struct driver_data {
31 31
32 /* SPI framework hookup */ 32 /* SPI framework hookup */
33 enum pxa_ssp_type ssp_type; 33 enum pxa_ssp_type ssp_type;
34 struct spi_controller *master; 34 struct spi_controller *controller;
35 35
36 /* PXA hookup */ 36 /* PXA hookup */
37 struct pxa2xx_spi_master *master_info; 37 struct pxa2xx_spi_controller *controller_info;
38 38
39 /* SSP register addresses */ 39 /* SSP register addresses */
40 void __iomem *ioaddr; 40 void __iomem *ioaddr;
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index a4ef641b5227..556870dcdf79 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -180,7 +180,7 @@
180struct rspi_data { 180struct rspi_data {
181 void __iomem *addr; 181 void __iomem *addr;
182 u32 max_speed_hz; 182 u32 max_speed_hz;
183 struct spi_master *master; 183 struct spi_controller *ctlr;
184 wait_queue_head_t wait; 184 wait_queue_head_t wait;
185 struct clk *clk; 185 struct clk *clk;
186 u16 spcmd; 186 u16 spcmd;
@@ -237,8 +237,8 @@ static u16 rspi_read_data(const struct rspi_data *rspi)
237/* optional functions */ 237/* optional functions */
238struct spi_ops { 238struct spi_ops {
239 int (*set_config_register)(struct rspi_data *rspi, int access_size); 239 int (*set_config_register)(struct rspi_data *rspi, int access_size);
240 int (*transfer_one)(struct spi_master *master, struct spi_device *spi, 240 int (*transfer_one)(struct spi_controller *ctlr,
241 struct spi_transfer *xfer); 241 struct spi_device *spi, struct spi_transfer *xfer);
242 u16 mode_bits; 242 u16 mode_bits;
243 u16 flags; 243 u16 flags;
244 u16 fifo_size; 244 u16 fifo_size;
@@ -466,7 +466,7 @@ static int rspi_data_out(struct rspi_data *rspi, u8 data)
466{ 466{
467 int error = rspi_wait_for_tx_empty(rspi); 467 int error = rspi_wait_for_tx_empty(rspi);
468 if (error < 0) { 468 if (error < 0) {
469 dev_err(&rspi->master->dev, "transmit timeout\n"); 469 dev_err(&rspi->ctlr->dev, "transmit timeout\n");
470 return error; 470 return error;
471 } 471 }
472 rspi_write_data(rspi, data); 472 rspi_write_data(rspi, data);
@@ -480,7 +480,7 @@ static int rspi_data_in(struct rspi_data *rspi)
480 480
481 error = rspi_wait_for_rx_full(rspi); 481 error = rspi_wait_for_rx_full(rspi);
482 if (error < 0) { 482 if (error < 0) {
483 dev_err(&rspi->master->dev, "receive timeout\n"); 483 dev_err(&rspi->ctlr->dev, "receive timeout\n");
484 return error; 484 return error;
485 } 485 }
486 data = rspi_read_data(rspi); 486 data = rspi_read_data(rspi);
@@ -526,8 +526,8 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
526 526
527 /* First prepare and submit the DMA request(s), as this may fail */ 527 /* First prepare and submit the DMA request(s), as this may fail */
528 if (rx) { 528 if (rx) {
529 desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx, 529 desc_rx = dmaengine_prep_slave_sg(rspi->ctlr->dma_rx, rx->sgl,
530 rx->sgl, rx->nents, DMA_DEV_TO_MEM, 530 rx->nents, DMA_DEV_TO_MEM,
531 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 531 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
532 if (!desc_rx) { 532 if (!desc_rx) {
533 ret = -EAGAIN; 533 ret = -EAGAIN;
@@ -546,8 +546,8 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
546 } 546 }
547 547
548 if (tx) { 548 if (tx) {
549 desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx, 549 desc_tx = dmaengine_prep_slave_sg(rspi->ctlr->dma_tx, tx->sgl,
550 tx->sgl, tx->nents, DMA_MEM_TO_DEV, 550 tx->nents, DMA_MEM_TO_DEV,
551 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 551 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
552 if (!desc_tx) { 552 if (!desc_tx) {
553 ret = -EAGAIN; 553 ret = -EAGAIN;
@@ -584,9 +584,9 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
584 584
585 /* Now start DMA */ 585 /* Now start DMA */
586 if (rx) 586 if (rx)
587 dma_async_issue_pending(rspi->master->dma_rx); 587 dma_async_issue_pending(rspi->ctlr->dma_rx);
588 if (tx) 588 if (tx)
589 dma_async_issue_pending(rspi->master->dma_tx); 589 dma_async_issue_pending(rspi->ctlr->dma_tx);
590 590
591 ret = wait_event_interruptible_timeout(rspi->wait, 591 ret = wait_event_interruptible_timeout(rspi->wait,
592 rspi->dma_callbacked, HZ); 592 rspi->dma_callbacked, HZ);
@@ -594,13 +594,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
594 ret = 0; 594 ret = 0;
595 } else { 595 } else {
596 if (!ret) { 596 if (!ret) {
597 dev_err(&rspi->master->dev, "DMA timeout\n"); 597 dev_err(&rspi->ctlr->dev, "DMA timeout\n");
598 ret = -ETIMEDOUT; 598 ret = -ETIMEDOUT;
599 } 599 }
600 if (tx) 600 if (tx)
601 dmaengine_terminate_all(rspi->master->dma_tx); 601 dmaengine_terminate_all(rspi->ctlr->dma_tx);
602 if (rx) 602 if (rx)
603 dmaengine_terminate_all(rspi->master->dma_rx); 603 dmaengine_terminate_all(rspi->ctlr->dma_rx);
604 } 604 }
605 605
606 rspi_disable_irq(rspi, irq_mask); 606 rspi_disable_irq(rspi, irq_mask);
@@ -614,12 +614,12 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
614 614
615no_dma_tx: 615no_dma_tx:
616 if (rx) 616 if (rx)
617 dmaengine_terminate_all(rspi->master->dma_rx); 617 dmaengine_terminate_all(rspi->ctlr->dma_rx);
618no_dma_rx: 618no_dma_rx:
619 if (ret == -EAGAIN) { 619 if (ret == -EAGAIN) {
620 pr_warn_once("%s %s: DMA not available, falling back to PIO\n", 620 pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
621 dev_driver_string(&rspi->master->dev), 621 dev_driver_string(&rspi->ctlr->dev),
622 dev_name(&rspi->master->dev)); 622 dev_name(&rspi->ctlr->dev));
623 } 623 }
624 return ret; 624 return ret;
625} 625}
@@ -660,10 +660,10 @@ static bool __rspi_can_dma(const struct rspi_data *rspi,
660 return xfer->len > rspi->ops->fifo_size; 660 return xfer->len > rspi->ops->fifo_size;
661} 661}
662 662
663static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi, 663static bool rspi_can_dma(struct spi_controller *ctlr, struct spi_device *spi,
664 struct spi_transfer *xfer) 664 struct spi_transfer *xfer)
665{ 665{
666 struct rspi_data *rspi = spi_master_get_devdata(master); 666 struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
667 667
668 return __rspi_can_dma(rspi, xfer); 668 return __rspi_can_dma(rspi, xfer);
669} 669}
@@ -671,7 +671,7 @@ static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi,
671static int rspi_dma_check_then_transfer(struct rspi_data *rspi, 671static int rspi_dma_check_then_transfer(struct rspi_data *rspi,
672 struct spi_transfer *xfer) 672 struct spi_transfer *xfer)
673{ 673{
674 if (!rspi->master->can_dma || !__rspi_can_dma(rspi, xfer)) 674 if (!rspi->ctlr->can_dma || !__rspi_can_dma(rspi, xfer))
675 return -EAGAIN; 675 return -EAGAIN;
676 676
677 /* rx_buf can be NULL on RSPI on SH in TX-only Mode */ 677 /* rx_buf can be NULL on RSPI on SH in TX-only Mode */
@@ -698,10 +698,10 @@ static int rspi_common_transfer(struct rspi_data *rspi,
698 return 0; 698 return 0;
699} 699}
700 700
701static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi, 701static int rspi_transfer_one(struct spi_controller *ctlr,
702 struct spi_transfer *xfer) 702 struct spi_device *spi, struct spi_transfer *xfer)
703{ 703{
704 struct rspi_data *rspi = spi_master_get_devdata(master); 704 struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
705 u8 spcr; 705 u8 spcr;
706 706
707 spcr = rspi_read8(rspi, RSPI_SPCR); 707 spcr = rspi_read8(rspi, RSPI_SPCR);
@@ -716,11 +716,11 @@ static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
716 return rspi_common_transfer(rspi, xfer); 716 return rspi_common_transfer(rspi, xfer);
717} 717}
718 718
719static int rspi_rz_transfer_one(struct spi_master *master, 719static int rspi_rz_transfer_one(struct spi_controller *ctlr,
720 struct spi_device *spi, 720 struct spi_device *spi,
721 struct spi_transfer *xfer) 721 struct spi_transfer *xfer)
722{ 722{
723 struct rspi_data *rspi = spi_master_get_devdata(master); 723 struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
724 724
725 rspi_rz_receive_init(rspi); 725 rspi_rz_receive_init(rspi);
726 726
@@ -739,7 +739,7 @@ static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
739 if (n == QSPI_BUFFER_SIZE) { 739 if (n == QSPI_BUFFER_SIZE) {
740 ret = rspi_wait_for_tx_empty(rspi); 740 ret = rspi_wait_for_tx_empty(rspi);
741 if (ret < 0) { 741 if (ret < 0) {
742 dev_err(&rspi->master->dev, "transmit timeout\n"); 742 dev_err(&rspi->ctlr->dev, "transmit timeout\n");
743 return ret; 743 return ret;
744 } 744 }
745 for (i = 0; i < n; i++) 745 for (i = 0; i < n; i++)
@@ -747,7 +747,7 @@ static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
747 747
748 ret = rspi_wait_for_rx_full(rspi); 748 ret = rspi_wait_for_rx_full(rspi);
749 if (ret < 0) { 749 if (ret < 0) {
750 dev_err(&rspi->master->dev, "receive timeout\n"); 750 dev_err(&rspi->ctlr->dev, "receive timeout\n");
751 return ret; 751 return ret;
752 } 752 }
753 for (i = 0; i < n; i++) 753 for (i = 0; i < n; i++)
@@ -785,7 +785,7 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
785 unsigned int i, len; 785 unsigned int i, len;
786 int ret; 786 int ret;
787 787
788 if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { 788 if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
789 ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL); 789 ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL);
790 if (ret != -EAGAIN) 790 if (ret != -EAGAIN)
791 return ret; 791 return ret;
@@ -796,7 +796,7 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
796 if (len == QSPI_BUFFER_SIZE) { 796 if (len == QSPI_BUFFER_SIZE) {
797 ret = rspi_wait_for_tx_empty(rspi); 797 ret = rspi_wait_for_tx_empty(rspi);
798 if (ret < 0) { 798 if (ret < 0) {
799 dev_err(&rspi->master->dev, "transmit timeout\n"); 799 dev_err(&rspi->ctlr->dev, "transmit timeout\n");
800 return ret; 800 return ret;
801 } 801 }
802 for (i = 0; i < len; i++) 802 for (i = 0; i < len; i++)
@@ -822,7 +822,7 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
822 unsigned int i, len; 822 unsigned int i, len;
823 int ret; 823 int ret;
824 824
825 if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { 825 if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
826 int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg); 826 int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
827 if (ret != -EAGAIN) 827 if (ret != -EAGAIN)
828 return ret; 828 return ret;
@@ -833,7 +833,7 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
833 if (len == QSPI_BUFFER_SIZE) { 833 if (len == QSPI_BUFFER_SIZE) {
834 ret = rspi_wait_for_rx_full(rspi); 834 ret = rspi_wait_for_rx_full(rspi);
835 if (ret < 0) { 835 if (ret < 0) {
836 dev_err(&rspi->master->dev, "receive timeout\n"); 836 dev_err(&rspi->ctlr->dev, "receive timeout\n");
837 return ret; 837 return ret;
838 } 838 }
839 for (i = 0; i < len; i++) 839 for (i = 0; i < len; i++)
@@ -849,10 +849,10 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
849 return 0; 849 return 0;
850} 850}
851 851
852static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi, 852static int qspi_transfer_one(struct spi_controller *ctlr,
853 struct spi_transfer *xfer) 853 struct spi_device *spi, struct spi_transfer *xfer)
854{ 854{
855 struct rspi_data *rspi = spi_master_get_devdata(master); 855 struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
856 856
857 if (spi->mode & SPI_LOOP) { 857 if (spi->mode & SPI_LOOP) {
858 return qspi_transfer_out_in(rspi, xfer); 858 return qspi_transfer_out_in(rspi, xfer);
@@ -870,7 +870,7 @@ static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
870 870
871static int rspi_setup(struct spi_device *spi) 871static int rspi_setup(struct spi_device *spi)
872{ 872{
873 struct rspi_data *rspi = spi_master_get_devdata(spi->master); 873 struct rspi_data *rspi = spi_controller_get_devdata(spi->controller);
874 874
875 rspi->max_speed_hz = spi->max_speed_hz; 875 rspi->max_speed_hz = spi->max_speed_hz;
876 876
@@ -955,10 +955,10 @@ static int qspi_setup_sequencer(struct rspi_data *rspi,
955 return 0; 955 return 0;
956} 956}
957 957
958static int rspi_prepare_message(struct spi_master *master, 958static int rspi_prepare_message(struct spi_controller *ctlr,
959 struct spi_message *msg) 959 struct spi_message *msg)
960{ 960{
961 struct rspi_data *rspi = spi_master_get_devdata(master); 961 struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
962 int ret; 962 int ret;
963 963
964 if (msg->spi->mode & 964 if (msg->spi->mode &
@@ -974,10 +974,10 @@ static int rspi_prepare_message(struct spi_master *master,
974 return 0; 974 return 0;
975} 975}
976 976
977static int rspi_unprepare_message(struct spi_master *master, 977static int rspi_unprepare_message(struct spi_controller *ctlr,
978 struct spi_message *msg) 978 struct spi_message *msg)
979{ 979{
980 struct rspi_data *rspi = spi_master_get_devdata(master); 980 struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
981 981
982 /* Disable SPI function */ 982 /* Disable SPI function */
983 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR); 983 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
@@ -1081,7 +1081,7 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev,
1081 return chan; 1081 return chan;
1082} 1082}
1083 1083
1084static int rspi_request_dma(struct device *dev, struct spi_master *master, 1084static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr,
1085 const struct resource *res) 1085 const struct resource *res)
1086{ 1086{
1087 const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev); 1087 const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev);
@@ -1099,37 +1099,37 @@ static int rspi_request_dma(struct device *dev, struct spi_master *master,
1099 return 0; 1099 return 0;
1100 } 1100 }
1101 1101
1102 master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id, 1102 ctlr->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
1103 res->start + RSPI_SPDR); 1103 res->start + RSPI_SPDR);
1104 if (!master->dma_tx) 1104 if (!ctlr->dma_tx)
1105 return -ENODEV; 1105 return -ENODEV;
1106 1106
1107 master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id, 1107 ctlr->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
1108 res->start + RSPI_SPDR); 1108 res->start + RSPI_SPDR);
1109 if (!master->dma_rx) { 1109 if (!ctlr->dma_rx) {
1110 dma_release_channel(master->dma_tx); 1110 dma_release_channel(ctlr->dma_tx);
1111 master->dma_tx = NULL; 1111 ctlr->dma_tx = NULL;
1112 return -ENODEV; 1112 return -ENODEV;
1113 } 1113 }
1114 1114
1115 master->can_dma = rspi_can_dma; 1115 ctlr->can_dma = rspi_can_dma;
1116 dev_info(dev, "DMA available"); 1116 dev_info(dev, "DMA available");
1117 return 0; 1117 return 0;
1118} 1118}
1119 1119
1120static void rspi_release_dma(struct spi_master *master) 1120static void rspi_release_dma(struct spi_controller *ctlr)
1121{ 1121{
1122 if (master->dma_tx) 1122 if (ctlr->dma_tx)
1123 dma_release_channel(master->dma_tx); 1123 dma_release_channel(ctlr->dma_tx);
1124 if (master->dma_rx) 1124 if (ctlr->dma_rx)
1125 dma_release_channel(master->dma_rx); 1125 dma_release_channel(ctlr->dma_rx);
1126} 1126}
1127 1127
1128static int rspi_remove(struct platform_device *pdev) 1128static int rspi_remove(struct platform_device *pdev)
1129{ 1129{
1130 struct rspi_data *rspi = platform_get_drvdata(pdev); 1130 struct rspi_data *rspi = platform_get_drvdata(pdev);
1131 1131
1132 rspi_release_dma(rspi->master); 1132 rspi_release_dma(rspi->ctlr);
1133 pm_runtime_disable(&pdev->dev); 1133 pm_runtime_disable(&pdev->dev);
1134 1134
1135 return 0; 1135 return 0;
@@ -1139,7 +1139,7 @@ static const struct spi_ops rspi_ops = {
1139 .set_config_register = rspi_set_config_register, 1139 .set_config_register = rspi_set_config_register,
1140 .transfer_one = rspi_transfer_one, 1140 .transfer_one = rspi_transfer_one,
1141 .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, 1141 .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
1142 .flags = SPI_MASTER_MUST_TX, 1142 .flags = SPI_CONTROLLER_MUST_TX,
1143 .fifo_size = 8, 1143 .fifo_size = 8,
1144}; 1144};
1145 1145
@@ -1147,7 +1147,7 @@ static const struct spi_ops rspi_rz_ops = {
1147 .set_config_register = rspi_rz_set_config_register, 1147 .set_config_register = rspi_rz_set_config_register,
1148 .transfer_one = rspi_rz_transfer_one, 1148 .transfer_one = rspi_rz_transfer_one,
1149 .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, 1149 .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
1150 .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX, 1150 .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
1151 .fifo_size = 8, /* 8 for TX, 32 for RX */ 1151 .fifo_size = 8, /* 8 for TX, 32 for RX */
1152}; 1152};
1153 1153
@@ -1157,7 +1157,7 @@ static const struct spi_ops qspi_ops = {
1157 .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP | 1157 .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP |
1158 SPI_TX_DUAL | SPI_TX_QUAD | 1158 SPI_TX_DUAL | SPI_TX_QUAD |
1159 SPI_RX_DUAL | SPI_RX_QUAD, 1159 SPI_RX_DUAL | SPI_RX_QUAD,
1160 .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX, 1160 .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
1161 .fifo_size = 32, 1161 .fifo_size = 32,
1162}; 1162};
1163 1163
@@ -1174,7 +1174,7 @@ static const struct of_device_id rspi_of_match[] = {
1174 1174
1175MODULE_DEVICE_TABLE(of, rspi_of_match); 1175MODULE_DEVICE_TABLE(of, rspi_of_match);
1176 1176
1177static int rspi_parse_dt(struct device *dev, struct spi_master *master) 1177static int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
1178{ 1178{
1179 u32 num_cs; 1179 u32 num_cs;
1180 int error; 1180 int error;
@@ -1186,12 +1186,12 @@ static int rspi_parse_dt(struct device *dev, struct spi_master *master)
1186 return error; 1186 return error;
1187 } 1187 }
1188 1188
1189 master->num_chipselect = num_cs; 1189 ctlr->num_chipselect = num_cs;
1190 return 0; 1190 return 0;
1191} 1191}
1192#else 1192#else
1193#define rspi_of_match NULL 1193#define rspi_of_match NULL
1194static inline int rspi_parse_dt(struct device *dev, struct spi_master *master) 1194static inline int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
1195{ 1195{
1196 return -EINVAL; 1196 return -EINVAL;
1197} 1197}
@@ -1212,28 +1212,28 @@ static int rspi_request_irq(struct device *dev, unsigned int irq,
1212static int rspi_probe(struct platform_device *pdev) 1212static int rspi_probe(struct platform_device *pdev)
1213{ 1213{
1214 struct resource *res; 1214 struct resource *res;
1215 struct spi_master *master; 1215 struct spi_controller *ctlr;
1216 struct rspi_data *rspi; 1216 struct rspi_data *rspi;
1217 int ret; 1217 int ret;
1218 const struct rspi_plat_data *rspi_pd; 1218 const struct rspi_plat_data *rspi_pd;
1219 const struct spi_ops *ops; 1219 const struct spi_ops *ops;
1220 1220
1221 master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data)); 1221 ctlr = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
1222 if (master == NULL) 1222 if (ctlr == NULL)
1223 return -ENOMEM; 1223 return -ENOMEM;
1224 1224
1225 ops = of_device_get_match_data(&pdev->dev); 1225 ops = of_device_get_match_data(&pdev->dev);
1226 if (ops) { 1226 if (ops) {
1227 ret = rspi_parse_dt(&pdev->dev, master); 1227 ret = rspi_parse_dt(&pdev->dev, ctlr);
1228 if (ret) 1228 if (ret)
1229 goto error1; 1229 goto error1;
1230 } else { 1230 } else {
1231 ops = (struct spi_ops *)pdev->id_entry->driver_data; 1231 ops = (struct spi_ops *)pdev->id_entry->driver_data;
1232 rspi_pd = dev_get_platdata(&pdev->dev); 1232 rspi_pd = dev_get_platdata(&pdev->dev);
1233 if (rspi_pd && rspi_pd->num_chipselect) 1233 if (rspi_pd && rspi_pd->num_chipselect)
1234 master->num_chipselect = rspi_pd->num_chipselect; 1234 ctlr->num_chipselect = rspi_pd->num_chipselect;
1235 else 1235 else
1236 master->num_chipselect = 2; /* default */ 1236 ctlr->num_chipselect = 2; /* default */
1237 } 1237 }
1238 1238
1239 /* ops parameter check */ 1239 /* ops parameter check */
@@ -1243,10 +1243,10 @@ static int rspi_probe(struct platform_device *pdev)
1243 goto error1; 1243 goto error1;
1244 } 1244 }
1245 1245
1246 rspi = spi_master_get_devdata(master); 1246 rspi = spi_controller_get_devdata(ctlr);
1247 platform_set_drvdata(pdev, rspi); 1247 platform_set_drvdata(pdev, rspi);
1248 rspi->ops = ops; 1248 rspi->ops = ops;
1249 rspi->master = master; 1249 rspi->ctlr = ctlr;
1250 1250
1251 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1251 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1252 rspi->addr = devm_ioremap_resource(&pdev->dev, res); 1252 rspi->addr = devm_ioremap_resource(&pdev->dev, res);
@@ -1266,15 +1266,15 @@ static int rspi_probe(struct platform_device *pdev)
1266 1266
1267 init_waitqueue_head(&rspi->wait); 1267 init_waitqueue_head(&rspi->wait);
1268 1268
1269 master->bus_num = pdev->id; 1269 ctlr->bus_num = pdev->id;
1270 master->setup = rspi_setup; 1270 ctlr->setup = rspi_setup;
1271 master->auto_runtime_pm = true; 1271 ctlr->auto_runtime_pm = true;
1272 master->transfer_one = ops->transfer_one; 1272 ctlr->transfer_one = ops->transfer_one;
1273 master->prepare_message = rspi_prepare_message; 1273 ctlr->prepare_message = rspi_prepare_message;
1274 master->unprepare_message = rspi_unprepare_message; 1274 ctlr->unprepare_message = rspi_unprepare_message;
1275 master->mode_bits = ops->mode_bits; 1275 ctlr->mode_bits = ops->mode_bits;
1276 master->flags = ops->flags; 1276 ctlr->flags = ops->flags;
1277 master->dev.of_node = pdev->dev.of_node; 1277 ctlr->dev.of_node = pdev->dev.of_node;
1278 1278
1279 ret = platform_get_irq_byname(pdev, "rx"); 1279 ret = platform_get_irq_byname(pdev, "rx");
1280 if (ret < 0) { 1280 if (ret < 0) {
@@ -1311,13 +1311,13 @@ static int rspi_probe(struct platform_device *pdev)
1311 goto error2; 1311 goto error2;
1312 } 1312 }
1313 1313
1314 ret = rspi_request_dma(&pdev->dev, master, res); 1314 ret = rspi_request_dma(&pdev->dev, ctlr, res);
1315 if (ret < 0) 1315 if (ret < 0)
1316 dev_warn(&pdev->dev, "DMA not available, using PIO\n"); 1316 dev_warn(&pdev->dev, "DMA not available, using PIO\n");
1317 1317
1318 ret = devm_spi_register_master(&pdev->dev, master); 1318 ret = devm_spi_register_controller(&pdev->dev, ctlr);
1319 if (ret < 0) { 1319 if (ret < 0) {
1320 dev_err(&pdev->dev, "spi_register_master error.\n"); 1320 dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
1321 goto error3; 1321 goto error3;
1322 } 1322 }
1323 1323
@@ -1326,11 +1326,11 @@ static int rspi_probe(struct platform_device *pdev)
1326 return 0; 1326 return 0;
1327 1327
1328error3: 1328error3:
1329 rspi_release_dma(master); 1329 rspi_release_dma(ctlr);
1330error2: 1330error2:
1331 pm_runtime_disable(&pdev->dev); 1331 pm_runtime_disable(&pdev->dev);
1332error1: 1332error1:
1333 spi_master_put(master); 1333 spi_controller_put(ctlr);
1334 1334
1335 return ret; 1335 return ret;
1336} 1336}
@@ -1349,14 +1349,14 @@ static int rspi_suspend(struct device *dev)
1349{ 1349{
1350 struct rspi_data *rspi = dev_get_drvdata(dev); 1350 struct rspi_data *rspi = dev_get_drvdata(dev);
1351 1351
1352 return spi_master_suspend(rspi->master); 1352 return spi_controller_suspend(rspi->ctlr);
1353} 1353}
1354 1354
1355static int rspi_resume(struct device *dev) 1355static int rspi_resume(struct device *dev)
1356{ 1356{
1357 struct rspi_data *rspi = dev_get_drvdata(dev); 1357 struct rspi_data *rspi = dev_get_drvdata(dev);
1358 1358
1359 return spi_master_resume(rspi->master); 1359 return spi_controller_resume(rspi->ctlr);
1360} 1360}
1361 1361
1362static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume); 1362static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index dc0926e43665..7f73f91d412a 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -35,7 +35,7 @@
35 35
36struct hspi_priv { 36struct hspi_priv {
37 void __iomem *addr; 37 void __iomem *addr;
38 struct spi_master *master; 38 struct spi_controller *ctlr;
39 struct device *dev; 39 struct device *dev;
40 struct clk *clk; 40 struct clk *clk;
41}; 41};
@@ -140,10 +140,10 @@ static void hspi_hw_setup(struct hspi_priv *hspi,
140 hspi_write(hspi, SPSCR, 0x21); /* master mode / CS control */ 140 hspi_write(hspi, SPSCR, 0x21); /* master mode / CS control */
141} 141}
142 142
143static int hspi_transfer_one_message(struct spi_master *master, 143static int hspi_transfer_one_message(struct spi_controller *ctlr,
144 struct spi_message *msg) 144 struct spi_message *msg)
145{ 145{
146 struct hspi_priv *hspi = spi_master_get_devdata(master); 146 struct hspi_priv *hspi = spi_controller_get_devdata(ctlr);
147 struct spi_transfer *t; 147 struct spi_transfer *t;
148 u32 tx; 148 u32 tx;
149 u32 rx; 149 u32 rx;
@@ -205,7 +205,7 @@ static int hspi_transfer_one_message(struct spi_master *master,
205 ndelay(nsecs); 205 ndelay(nsecs);
206 hspi_hw_cs_disable(hspi); 206 hspi_hw_cs_disable(hspi);
207 } 207 }
208 spi_finalize_current_message(master); 208 spi_finalize_current_message(ctlr);
209 209
210 return ret; 210 return ret;
211} 211}
@@ -213,7 +213,7 @@ static int hspi_transfer_one_message(struct spi_master *master,
213static int hspi_probe(struct platform_device *pdev) 213static int hspi_probe(struct platform_device *pdev)
214{ 214{
215 struct resource *res; 215 struct resource *res;
216 struct spi_master *master; 216 struct spi_controller *ctlr;
217 struct hspi_priv *hspi; 217 struct hspi_priv *hspi;
218 struct clk *clk; 218 struct clk *clk;
219 int ret; 219 int ret;
@@ -225,11 +225,9 @@ static int hspi_probe(struct platform_device *pdev)
225 return -EINVAL; 225 return -EINVAL;
226 } 226 }
227 227
228 master = spi_alloc_master(&pdev->dev, sizeof(*hspi)); 228 ctlr = spi_alloc_master(&pdev->dev, sizeof(*hspi));
229 if (!master) { 229 if (!ctlr)
230 dev_err(&pdev->dev, "spi_alloc_master error.\n");
231 return -ENOMEM; 230 return -ENOMEM;
232 }
233 231
234 clk = clk_get(&pdev->dev, NULL); 232 clk = clk_get(&pdev->dev, NULL);
235 if (IS_ERR(clk)) { 233 if (IS_ERR(clk)) {
@@ -238,33 +236,32 @@ static int hspi_probe(struct platform_device *pdev)
238 goto error0; 236 goto error0;
239 } 237 }
240 238
241 hspi = spi_master_get_devdata(master); 239 hspi = spi_controller_get_devdata(ctlr);
242 platform_set_drvdata(pdev, hspi); 240 platform_set_drvdata(pdev, hspi);
243 241
244 /* init hspi */ 242 /* init hspi */
245 hspi->master = master; 243 hspi->ctlr = ctlr;
246 hspi->dev = &pdev->dev; 244 hspi->dev = &pdev->dev;
247 hspi->clk = clk; 245 hspi->clk = clk;
248 hspi->addr = devm_ioremap(hspi->dev, 246 hspi->addr = devm_ioremap(hspi->dev,
249 res->start, resource_size(res)); 247 res->start, resource_size(res));
250 if (!hspi->addr) { 248 if (!hspi->addr) {
251 dev_err(&pdev->dev, "ioremap error.\n");
252 ret = -ENOMEM; 249 ret = -ENOMEM;
253 goto error1; 250 goto error1;
254 } 251 }
255 252
256 pm_runtime_enable(&pdev->dev); 253 pm_runtime_enable(&pdev->dev);
257 254
258 master->bus_num = pdev->id; 255 ctlr->bus_num = pdev->id;
259 master->mode_bits = SPI_CPOL | SPI_CPHA; 256 ctlr->mode_bits = SPI_CPOL | SPI_CPHA;
260 master->dev.of_node = pdev->dev.of_node; 257 ctlr->dev.of_node = pdev->dev.of_node;
261 master->auto_runtime_pm = true; 258 ctlr->auto_runtime_pm = true;
262 master->transfer_one_message = hspi_transfer_one_message; 259 ctlr->transfer_one_message = hspi_transfer_one_message;
263 master->bits_per_word_mask = SPI_BPW_MASK(8); 260 ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
264 261
265 ret = devm_spi_register_master(&pdev->dev, master); 262 ret = devm_spi_register_controller(&pdev->dev, ctlr);
266 if (ret < 0) { 263 if (ret < 0) {
267 dev_err(&pdev->dev, "spi_register_master error.\n"); 264 dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
268 goto error2; 265 goto error2;
269 } 266 }
270 267
@@ -275,7 +272,7 @@ static int hspi_probe(struct platform_device *pdev)
275 error1: 272 error1:
276 clk_put(clk); 273 clk_put(clk);
277 error0: 274 error0:
278 spi_master_put(master); 275 spi_controller_put(ctlr);
279 276
280 return ret; 277 return ret;
281} 278}
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index d14b407cc800..e2eb466db10a 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -1,6 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * SuperH MSIOF SPI Master Interface 3 * SuperH MSIOF SPI Controller Interface
4 * 4 *
5 * Copyright (c) 2009 Magnus Damm 5 * Copyright (c) 2009 Magnus Damm
6 * Copyright (C) 2014 Renesas Electronics Corporation 6 * Copyright (C) 2014 Renesas Electronics Corporation
@@ -32,14 +32,15 @@
32#include <asm/unaligned.h> 32#include <asm/unaligned.h>
33 33
34struct sh_msiof_chipdata { 34struct sh_msiof_chipdata {
35 u32 bits_per_word_mask;
35 u16 tx_fifo_size; 36 u16 tx_fifo_size;
36 u16 rx_fifo_size; 37 u16 rx_fifo_size;
37 u16 master_flags; 38 u16 ctlr_flags;
38 u16 min_div_pow; 39 u16 min_div_pow;
39}; 40};
40 41
41struct sh_msiof_spi_priv { 42struct sh_msiof_spi_priv {
42 struct spi_master *master; 43 struct spi_controller *ctlr;
43 void __iomem *mapbase; 44 void __iomem *mapbase;
44 struct clk *clk; 45 struct clk *clk;
45 struct platform_device *pdev; 46 struct platform_device *pdev;
@@ -287,7 +288,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
287 288
288 scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps); 289 scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps);
289 sh_msiof_write(p, TSCR, scr); 290 sh_msiof_write(p, TSCR, scr);
290 if (!(p->master->flags & SPI_MASTER_MUST_TX)) 291 if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
291 sh_msiof_write(p, RSCR, scr); 292 sh_msiof_write(p, RSCR, scr);
292} 293}
293 294
@@ -351,14 +352,14 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
351 tmp |= !cs_high << MDR1_SYNCAC_SHIFT; 352 tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
352 tmp |= lsb_first << MDR1_BITLSB_SHIFT; 353 tmp |= lsb_first << MDR1_BITLSB_SHIFT;
353 tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p); 354 tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
354 if (spi_controller_is_slave(p->master)) { 355 if (spi_controller_is_slave(p->ctlr)) {
355 sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON); 356 sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
356 } else { 357 } else {
357 sh_msiof_write(p, TMDR1, 358 sh_msiof_write(p, TMDR1,
358 tmp | MDR1_TRMD | TMDR1_PCON | 359 tmp | MDR1_TRMD | TMDR1_PCON |
359 (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT); 360 (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT);
360 } 361 }
361 if (p->master->flags & SPI_MASTER_MUST_TX) { 362 if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) {
362 /* These bits are reserved if RX needs TX */ 363 /* These bits are reserved if RX needs TX */
363 tmp &= ~0x0000ffff; 364 tmp &= ~0x0000ffff;
364 } 365 }
@@ -382,7 +383,7 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
382{ 383{
383 u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words); 384 u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
384 385
385 if (tx_buf || (p->master->flags & SPI_MASTER_MUST_TX)) 386 if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
386 sh_msiof_write(p, TMDR2, dr2); 387 sh_msiof_write(p, TMDR2, dr2);
387 else 388 else
388 sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1); 389 sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
@@ -539,8 +540,9 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
539 540
540static int sh_msiof_spi_setup(struct spi_device *spi) 541static int sh_msiof_spi_setup(struct spi_device *spi)
541{ 542{
542 struct device_node *np = spi->master->dev.of_node; 543 struct device_node *np = spi->controller->dev.of_node;
543 struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master); 544 struct sh_msiof_spi_priv *p =
545 spi_controller_get_devdata(spi->controller);
544 u32 clr, set, tmp; 546 u32 clr, set, tmp;
545 547
546 if (!np) { 548 if (!np) {
@@ -556,7 +558,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
556 return 0; 558 return 0;
557 } 559 }
558 560
559 if (spi_controller_is_slave(p->master)) 561 if (spi_controller_is_slave(p->ctlr))
560 return 0; 562 return 0;
561 563
562 if (p->native_cs_inited && 564 if (p->native_cs_inited &&
@@ -581,10 +583,10 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
581 return 0; 583 return 0;
582} 584}
583 585
584static int sh_msiof_prepare_message(struct spi_master *master, 586static int sh_msiof_prepare_message(struct spi_controller *ctlr,
585 struct spi_message *msg) 587 struct spi_message *msg)
586{ 588{
587 struct sh_msiof_spi_priv *p = spi_master_get_devdata(master); 589 struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
588 const struct spi_device *spi = msg->spi; 590 const struct spi_device *spi = msg->spi;
589 u32 ss, cs_high; 591 u32 ss, cs_high;
590 592
@@ -605,7 +607,7 @@ static int sh_msiof_prepare_message(struct spi_master *master,
605 607
606static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf) 608static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
607{ 609{
608 bool slave = spi_controller_is_slave(p->master); 610 bool slave = spi_controller_is_slave(p->ctlr);
609 int ret = 0; 611 int ret = 0;
610 612
611 /* setup clock and rx/tx signals */ 613 /* setup clock and rx/tx signals */
@@ -625,7 +627,7 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
625 627
626static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf) 628static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
627{ 629{
628 bool slave = spi_controller_is_slave(p->master); 630 bool slave = spi_controller_is_slave(p->ctlr);
629 int ret = 0; 631 int ret = 0;
630 632
631 /* shut down frame, rx/tx and clock signals */ 633 /* shut down frame, rx/tx and clock signals */
@@ -641,9 +643,9 @@ static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
641 return ret; 643 return ret;
642} 644}
643 645
644static int sh_msiof_slave_abort(struct spi_master *master) 646static int sh_msiof_slave_abort(struct spi_controller *ctlr)
645{ 647{
646 struct sh_msiof_spi_priv *p = spi_master_get_devdata(master); 648 struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
647 649
648 p->slave_aborted = true; 650 p->slave_aborted = true;
649 complete(&p->done); 651 complete(&p->done);
@@ -654,7 +656,7 @@ static int sh_msiof_slave_abort(struct spi_master *master)
654static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p, 656static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p,
655 struct completion *x) 657 struct completion *x)
656{ 658{
657 if (spi_controller_is_slave(p->master)) { 659 if (spi_controller_is_slave(p->ctlr)) {
658 if (wait_for_completion_interruptible(x) || 660 if (wait_for_completion_interruptible(x) ||
659 p->slave_aborted) { 661 p->slave_aborted) {
660 dev_dbg(&p->pdev->dev, "interrupted\n"); 662 dev_dbg(&p->pdev->dev, "interrupted\n");
@@ -754,7 +756,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
754 /* First prepare and submit the DMA request(s), as this may fail */ 756 /* First prepare and submit the DMA request(s), as this may fail */
755 if (rx) { 757 if (rx) {
756 ier_bits |= IER_RDREQE | IER_RDMAE; 758 ier_bits |= IER_RDREQE | IER_RDMAE;
757 desc_rx = dmaengine_prep_slave_single(p->master->dma_rx, 759 desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx,
758 p->rx_dma_addr, len, DMA_DEV_TO_MEM, 760 p->rx_dma_addr, len, DMA_DEV_TO_MEM,
759 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 761 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
760 if (!desc_rx) 762 if (!desc_rx)
@@ -769,9 +771,9 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
769 771
770 if (tx) { 772 if (tx) {
771 ier_bits |= IER_TDREQE | IER_TDMAE; 773 ier_bits |= IER_TDREQE | IER_TDMAE;
772 dma_sync_single_for_device(p->master->dma_tx->device->dev, 774 dma_sync_single_for_device(p->ctlr->dma_tx->device->dev,
773 p->tx_dma_addr, len, DMA_TO_DEVICE); 775 p->tx_dma_addr, len, DMA_TO_DEVICE);
774 desc_tx = dmaengine_prep_slave_single(p->master->dma_tx, 776 desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx,
775 p->tx_dma_addr, len, DMA_MEM_TO_DEV, 777 p->tx_dma_addr, len, DMA_MEM_TO_DEV,
776 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 778 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
777 if (!desc_tx) { 779 if (!desc_tx) {
@@ -803,9 +805,9 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
803 805
804 /* Now start DMA */ 806 /* Now start DMA */
805 if (rx) 807 if (rx)
806 dma_async_issue_pending(p->master->dma_rx); 808 dma_async_issue_pending(p->ctlr->dma_rx);
807 if (tx) 809 if (tx)
808 dma_async_issue_pending(p->master->dma_tx); 810 dma_async_issue_pending(p->ctlr->dma_tx);
809 811
810 ret = sh_msiof_spi_start(p, rx); 812 ret = sh_msiof_spi_start(p, rx);
811 if (ret) { 813 if (ret) {
@@ -845,9 +847,8 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
845 } 847 }
846 848
847 if (rx) 849 if (rx)
848 dma_sync_single_for_cpu(p->master->dma_rx->device->dev, 850 dma_sync_single_for_cpu(p->ctlr->dma_rx->device->dev,
849 p->rx_dma_addr, len, 851 p->rx_dma_addr, len, DMA_FROM_DEVICE);
850 DMA_FROM_DEVICE);
851 852
852 return 0; 853 return 0;
853 854
@@ -856,10 +857,10 @@ stop_reset:
856 sh_msiof_spi_stop(p, rx); 857 sh_msiof_spi_stop(p, rx);
857stop_dma: 858stop_dma:
858 if (tx) 859 if (tx)
859 dmaengine_terminate_all(p->master->dma_tx); 860 dmaengine_terminate_all(p->ctlr->dma_tx);
860no_dma_tx: 861no_dma_tx:
861 if (rx) 862 if (rx)
862 dmaengine_terminate_all(p->master->dma_rx); 863 dmaengine_terminate_all(p->ctlr->dma_rx);
863 sh_msiof_write(p, IER, 0); 864 sh_msiof_write(p, IER, 0);
864 return ret; 865 return ret;
865} 866}
@@ -907,11 +908,11 @@ static void copy_plain32(u32 *dst, const u32 *src, unsigned int words)
907 memcpy(dst, src, words * 4); 908 memcpy(dst, src, words * 4);
908} 909}
909 910
910static int sh_msiof_transfer_one(struct spi_master *master, 911static int sh_msiof_transfer_one(struct spi_controller *ctlr,
911 struct spi_device *spi, 912 struct spi_device *spi,
912 struct spi_transfer *t) 913 struct spi_transfer *t)
913{ 914{
914 struct sh_msiof_spi_priv *p = spi_master_get_devdata(master); 915 struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
915 void (*copy32)(u32 *, const u32 *, unsigned int); 916 void (*copy32)(u32 *, const u32 *, unsigned int);
916 void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int); 917 void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
917 void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int); 918 void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
@@ -926,10 +927,10 @@ static int sh_msiof_transfer_one(struct spi_master *master,
926 int ret; 927 int ret;
927 928
928 /* setup clocks (clock already enabled in chipselect()) */ 929 /* setup clocks (clock already enabled in chipselect()) */
929 if (!spi_controller_is_slave(p->master)) 930 if (!spi_controller_is_slave(p->ctlr))
930 sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz); 931 sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
931 932
932 while (master->dma_tx && len > 15) { 933 while (ctlr->dma_tx && len > 15) {
933 /* 934 /*
934 * DMA supports 32-bit words only, hence pack 8-bit and 16-bit 935 * DMA supports 32-bit words only, hence pack 8-bit and 16-bit
935 * words, with byte resp. word swapping. 936 * words, with byte resp. word swapping.
@@ -937,17 +938,13 @@ static int sh_msiof_transfer_one(struct spi_master *master,
937 unsigned int l = 0; 938 unsigned int l = 0;
938 939
939 if (tx_buf) 940 if (tx_buf)
940 l = min(len, p->tx_fifo_size * 4); 941 l = min(round_down(len, 4), p->tx_fifo_size * 4);
941 if (rx_buf) 942 if (rx_buf)
942 l = min(len, p->rx_fifo_size * 4); 943 l = min(round_down(len, 4), p->rx_fifo_size * 4);
943 944
944 if (bits <= 8) { 945 if (bits <= 8) {
945 if (l & 3)
946 break;
947 copy32 = copy_bswap32; 946 copy32 = copy_bswap32;
948 } else if (bits <= 16) { 947 } else if (bits <= 16) {
949 if (l & 3)
950 break;
951 copy32 = copy_wswap32; 948 copy32 = copy_wswap32;
952 } else { 949 } else {
953 copy32 = copy_plain32; 950 copy32 = copy_plain32;
@@ -1052,23 +1049,28 @@ static int sh_msiof_transfer_one(struct spi_master *master,
1052} 1049}
1053 1050
1054static const struct sh_msiof_chipdata sh_data = { 1051static const struct sh_msiof_chipdata sh_data = {
1052 .bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32),
1055 .tx_fifo_size = 64, 1053 .tx_fifo_size = 64,
1056 .rx_fifo_size = 64, 1054 .rx_fifo_size = 64,
1057 .master_flags = 0, 1055 .ctlr_flags = 0,
1058 .min_div_pow = 0, 1056 .min_div_pow = 0,
1059}; 1057};
1060 1058
1061static const struct sh_msiof_chipdata rcar_gen2_data = { 1059static const struct sh_msiof_chipdata rcar_gen2_data = {
1060 .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
1061 SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
1062 .tx_fifo_size = 64, 1062 .tx_fifo_size = 64,
1063 .rx_fifo_size = 64, 1063 .rx_fifo_size = 64,
1064 .master_flags = SPI_MASTER_MUST_TX, 1064 .ctlr_flags = SPI_CONTROLLER_MUST_TX,
1065 .min_div_pow = 0, 1065 .min_div_pow = 0,
1066}; 1066};
1067 1067
1068static const struct sh_msiof_chipdata rcar_gen3_data = { 1068static const struct sh_msiof_chipdata rcar_gen3_data = {
1069 .bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
1070 SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
1069 .tx_fifo_size = 64, 1071 .tx_fifo_size = 64,
1070 .rx_fifo_size = 64, 1072 .rx_fifo_size = 64,
1071 .master_flags = SPI_MASTER_MUST_TX, 1073 .ctlr_flags = SPI_CONTROLLER_MUST_TX,
1072 .min_div_pow = 1, 1074 .min_div_pow = 1,
1073}; 1075};
1074 1076
@@ -1136,7 +1138,7 @@ static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
1136 if (ret <= 0) 1138 if (ret <= 0)
1137 return 0; 1139 return 0;
1138 1140
1139 num_cs = max_t(unsigned int, ret, p->master->num_chipselect); 1141 num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect);
1140 for (i = 0; i < num_cs; i++) { 1142 for (i = 0; i < num_cs; i++) {
1141 struct gpio_desc *gpiod; 1143 struct gpio_desc *gpiod;
1142 1144
@@ -1206,10 +1208,10 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
1206{ 1208{
1207 struct platform_device *pdev = p->pdev; 1209 struct platform_device *pdev = p->pdev;
1208 struct device *dev = &pdev->dev; 1210 struct device *dev = &pdev->dev;
1209 const struct sh_msiof_spi_info *info = dev_get_platdata(dev); 1211 const struct sh_msiof_spi_info *info = p->info;
1210 unsigned int dma_tx_id, dma_rx_id; 1212 unsigned int dma_tx_id, dma_rx_id;
1211 const struct resource *res; 1213 const struct resource *res;
1212 struct spi_master *master; 1214 struct spi_controller *ctlr;
1213 struct device *tx_dev, *rx_dev; 1215 struct device *tx_dev, *rx_dev;
1214 1216
1215 if (dev->of_node) { 1217 if (dev->of_node) {
@@ -1229,17 +1231,15 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
1229 if (!res) 1231 if (!res)
1230 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1232 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1231 1233
1232 master = p->master; 1234 ctlr = p->ctlr;
1233 master->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV, 1235 ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
1234 dma_tx_id, 1236 dma_tx_id, res->start + TFDR);
1235 res->start + TFDR); 1237 if (!ctlr->dma_tx)
1236 if (!master->dma_tx)
1237 return -ENODEV; 1238 return -ENODEV;
1238 1239
1239 master->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM, 1240 ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
1240 dma_rx_id, 1241 dma_rx_id, res->start + RFDR);
1241 res->start + RFDR); 1242 if (!ctlr->dma_rx)
1242 if (!master->dma_rx)
1243 goto free_tx_chan; 1243 goto free_tx_chan;
1244 1244
1245 p->tx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); 1245 p->tx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
@@ -1250,13 +1250,13 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
1250 if (!p->rx_dma_page) 1250 if (!p->rx_dma_page)
1251 goto free_tx_page; 1251 goto free_tx_page;
1252 1252
1253 tx_dev = master->dma_tx->device->dev; 1253 tx_dev = ctlr->dma_tx->device->dev;
1254 p->tx_dma_addr = dma_map_single(tx_dev, p->tx_dma_page, PAGE_SIZE, 1254 p->tx_dma_addr = dma_map_single(tx_dev, p->tx_dma_page, PAGE_SIZE,
1255 DMA_TO_DEVICE); 1255 DMA_TO_DEVICE);
1256 if (dma_mapping_error(tx_dev, p->tx_dma_addr)) 1256 if (dma_mapping_error(tx_dev, p->tx_dma_addr))
1257 goto free_rx_page; 1257 goto free_rx_page;
1258 1258
1259 rx_dev = master->dma_rx->device->dev; 1259 rx_dev = ctlr->dma_rx->device->dev;
1260 p->rx_dma_addr = dma_map_single(rx_dev, p->rx_dma_page, PAGE_SIZE, 1260 p->rx_dma_addr = dma_map_single(rx_dev, p->rx_dma_page, PAGE_SIZE,
1261 DMA_FROM_DEVICE); 1261 DMA_FROM_DEVICE);
1262 if (dma_mapping_error(rx_dev, p->rx_dma_addr)) 1262 if (dma_mapping_error(rx_dev, p->rx_dma_addr))
@@ -1272,34 +1272,34 @@ free_rx_page:
1272free_tx_page: 1272free_tx_page:
1273 free_page((unsigned long)p->tx_dma_page); 1273 free_page((unsigned long)p->tx_dma_page);
1274free_rx_chan: 1274free_rx_chan:
1275 dma_release_channel(master->dma_rx); 1275 dma_release_channel(ctlr->dma_rx);
1276free_tx_chan: 1276free_tx_chan:
1277 dma_release_channel(master->dma_tx); 1277 dma_release_channel(ctlr->dma_tx);
1278 master->dma_tx = NULL; 1278 ctlr->dma_tx = NULL;
1279 return -ENODEV; 1279 return -ENODEV;
1280} 1280}
1281 1281
1282static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p) 1282static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p)
1283{ 1283{
1284 struct spi_master *master = p->master; 1284 struct spi_controller *ctlr = p->ctlr;
1285 1285
1286 if (!master->dma_tx) 1286 if (!ctlr->dma_tx)
1287 return; 1287 return;
1288 1288
1289 dma_unmap_single(master->dma_rx->device->dev, p->rx_dma_addr, 1289 dma_unmap_single(ctlr->dma_rx->device->dev, p->rx_dma_addr, PAGE_SIZE,
1290 PAGE_SIZE, DMA_FROM_DEVICE); 1290 DMA_FROM_DEVICE);
1291 dma_unmap_single(master->dma_tx->device->dev, p->tx_dma_addr, 1291 dma_unmap_single(ctlr->dma_tx->device->dev, p->tx_dma_addr, PAGE_SIZE,
1292 PAGE_SIZE, DMA_TO_DEVICE); 1292 DMA_TO_DEVICE);
1293 free_page((unsigned long)p->rx_dma_page); 1293 free_page((unsigned long)p->rx_dma_page);
1294 free_page((unsigned long)p->tx_dma_page); 1294 free_page((unsigned long)p->tx_dma_page);
1295 dma_release_channel(master->dma_rx); 1295 dma_release_channel(ctlr->dma_rx);
1296 dma_release_channel(master->dma_tx); 1296 dma_release_channel(ctlr->dma_tx);
1297} 1297}
1298 1298
1299static int sh_msiof_spi_probe(struct platform_device *pdev) 1299static int sh_msiof_spi_probe(struct platform_device *pdev)
1300{ 1300{
1301 struct resource *r; 1301 struct resource *r;
1302 struct spi_master *master; 1302 struct spi_controller *ctlr;
1303 const struct sh_msiof_chipdata *chipdata; 1303 const struct sh_msiof_chipdata *chipdata;
1304 struct sh_msiof_spi_info *info; 1304 struct sh_msiof_spi_info *info;
1305 struct sh_msiof_spi_priv *p; 1305 struct sh_msiof_spi_priv *p;
@@ -1320,18 +1320,18 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
1320 } 1320 }
1321 1321
1322 if (info->mode == MSIOF_SPI_SLAVE) 1322 if (info->mode == MSIOF_SPI_SLAVE)
1323 master = spi_alloc_slave(&pdev->dev, 1323 ctlr = spi_alloc_slave(&pdev->dev,
1324 sizeof(struct sh_msiof_spi_priv)); 1324 sizeof(struct sh_msiof_spi_priv));
1325 else 1325 else
1326 master = spi_alloc_master(&pdev->dev, 1326 ctlr = spi_alloc_master(&pdev->dev,
1327 sizeof(struct sh_msiof_spi_priv)); 1327 sizeof(struct sh_msiof_spi_priv));
1328 if (master == NULL) 1328 if (ctlr == NULL)
1329 return -ENOMEM; 1329 return -ENOMEM;
1330 1330
1331 p = spi_master_get_devdata(master); 1331 p = spi_controller_get_devdata(ctlr);
1332 1332
1333 platform_set_drvdata(pdev, p); 1333 platform_set_drvdata(pdev, p);
1334 p->master = master; 1334 p->ctlr = ctlr;
1335 p->info = info; 1335 p->info = info;
1336 p->min_div_pow = chipdata->min_div_pow; 1336 p->min_div_pow = chipdata->min_div_pow;
1337 1337
@@ -1378,31 +1378,31 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
1378 p->rx_fifo_size = p->info->rx_fifo_override; 1378 p->rx_fifo_size = p->info->rx_fifo_override;
1379 1379
1380 /* Setup GPIO chip selects */ 1380 /* Setup GPIO chip selects */
1381 master->num_chipselect = p->info->num_chipselect; 1381 ctlr->num_chipselect = p->info->num_chipselect;
1382 ret = sh_msiof_get_cs_gpios(p); 1382 ret = sh_msiof_get_cs_gpios(p);
1383 if (ret) 1383 if (ret)
1384 goto err1; 1384 goto err1;
1385 1385
1386 /* init master code */ 1386 /* init controller code */
1387 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1387 ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1388 master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE; 1388 ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
1389 master->flags = chipdata->master_flags; 1389 ctlr->flags = chipdata->ctlr_flags;
1390 master->bus_num = pdev->id; 1390 ctlr->bus_num = pdev->id;
1391 master->dev.of_node = pdev->dev.of_node; 1391 ctlr->dev.of_node = pdev->dev.of_node;
1392 master->setup = sh_msiof_spi_setup; 1392 ctlr->setup = sh_msiof_spi_setup;
1393 master->prepare_message = sh_msiof_prepare_message; 1393 ctlr->prepare_message = sh_msiof_prepare_message;
1394 master->slave_abort = sh_msiof_slave_abort; 1394 ctlr->slave_abort = sh_msiof_slave_abort;
1395 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); 1395 ctlr->bits_per_word_mask = chipdata->bits_per_word_mask;
1396 master->auto_runtime_pm = true; 1396 ctlr->auto_runtime_pm = true;
1397 master->transfer_one = sh_msiof_transfer_one; 1397 ctlr->transfer_one = sh_msiof_transfer_one;
1398 1398
1399 ret = sh_msiof_request_dma(p); 1399 ret = sh_msiof_request_dma(p);
1400 if (ret < 0) 1400 if (ret < 0)
1401 dev_warn(&pdev->dev, "DMA not available, using PIO\n"); 1401 dev_warn(&pdev->dev, "DMA not available, using PIO\n");
1402 1402
1403 ret = devm_spi_register_master(&pdev->dev, master); 1403 ret = devm_spi_register_controller(&pdev->dev, ctlr);
1404 if (ret < 0) { 1404 if (ret < 0) {
1405 dev_err(&pdev->dev, "spi_register_master error.\n"); 1405 dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
1406 goto err2; 1406 goto err2;
1407 } 1407 }
1408 1408
@@ -1412,7 +1412,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
1412 sh_msiof_release_dma(p); 1412 sh_msiof_release_dma(p);
1413 pm_runtime_disable(&pdev->dev); 1413 pm_runtime_disable(&pdev->dev);
1414 err1: 1414 err1:
1415 spi_master_put(master); 1415 spi_controller_put(ctlr);
1416 return ret; 1416 return ret;
1417} 1417}
1418 1418
@@ -1436,14 +1436,14 @@ static int sh_msiof_spi_suspend(struct device *dev)
1436{ 1436{
1437 struct sh_msiof_spi_priv *p = dev_get_drvdata(dev); 1437 struct sh_msiof_spi_priv *p = dev_get_drvdata(dev);
1438 1438
1439 return spi_master_suspend(p->master); 1439 return spi_controller_suspend(p->ctlr);
1440} 1440}
1441 1441
1442static int sh_msiof_spi_resume(struct device *dev) 1442static int sh_msiof_spi_resume(struct device *dev)
1443{ 1443{
1444 struct sh_msiof_spi_priv *p = dev_get_drvdata(dev); 1444 struct sh_msiof_spi_priv *p = dev_get_drvdata(dev);
1445 1445
1446 return spi_master_resume(p->master); 1446 return spi_controller_resume(p->ctlr);
1447} 1447}
1448 1448
1449static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend, 1449static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
@@ -1465,7 +1465,7 @@ static struct platform_driver sh_msiof_spi_drv = {
1465}; 1465};
1466module_platform_driver(sh_msiof_spi_drv); 1466module_platform_driver(sh_msiof_spi_drv);
1467 1467
1468MODULE_DESCRIPTION("SuperH MSIOF SPI Master Interface Driver"); 1468MODULE_DESCRIPTION("SuperH MSIOF SPI Controller Interface Driver");
1469MODULE_AUTHOR("Magnus Damm"); 1469MODULE_AUTHOR("Magnus Damm");
1470MODULE_LICENSE("GPL v2"); 1470MODULE_LICENSE("GPL v2");
1471MODULE_ALIAS("platform:spi_sh_msiof"); 1471MODULE_ALIAS("platform:spi_sh_msiof");
diff --git a/drivers/spi/spi-sifive.c b/drivers/spi/spi-sifive.c
new file mode 100644
index 000000000000..93ec2c6cdbfd
--- /dev/null
+++ b/drivers/spi/spi-sifive.c
@@ -0,0 +1,448 @@
1// SPDX-License-Identifier: GPL-2.0
2//
3// Copyright 2018 SiFive, Inc.
4//
5// SiFive SPI controller driver (master mode only)
6//
7// Author: SiFive, Inc.
8// sifive@sifive.com
9
10#include <linux/clk.h>
11#include <linux/module.h>
12#include <linux/interrupt.h>
13#include <linux/of.h>
14#include <linux/platform_device.h>
15#include <linux/spi/spi.h>
16#include <linux/io.h>
17#include <linux/log2.h>
18
19#define SIFIVE_SPI_DRIVER_NAME "sifive_spi"
20
21#define SIFIVE_SPI_MAX_CS 32
22#define SIFIVE_SPI_DEFAULT_DEPTH 8
23#define SIFIVE_SPI_DEFAULT_MAX_BITS 8
24
25/* register offsets */
26#define SIFIVE_SPI_REG_SCKDIV 0x00 /* Serial clock divisor */
27#define SIFIVE_SPI_REG_SCKMODE 0x04 /* Serial clock mode */
28#define SIFIVE_SPI_REG_CSID 0x10 /* Chip select ID */
29#define SIFIVE_SPI_REG_CSDEF 0x14 /* Chip select default */
30#define SIFIVE_SPI_REG_CSMODE 0x18 /* Chip select mode */
31#define SIFIVE_SPI_REG_DELAY0 0x28 /* Delay control 0 */
32#define SIFIVE_SPI_REG_DELAY1 0x2c /* Delay control 1 */
33#define SIFIVE_SPI_REG_FMT 0x40 /* Frame format */
34#define SIFIVE_SPI_REG_TXDATA 0x48 /* Tx FIFO data */
35#define SIFIVE_SPI_REG_RXDATA 0x4c /* Rx FIFO data */
36#define SIFIVE_SPI_REG_TXMARK 0x50 /* Tx FIFO watermark */
37#define SIFIVE_SPI_REG_RXMARK 0x54 /* Rx FIFO watermark */
38#define SIFIVE_SPI_REG_FCTRL 0x60 /* SPI flash interface control */
39#define SIFIVE_SPI_REG_FFMT 0x64 /* SPI flash instruction format */
40#define SIFIVE_SPI_REG_IE 0x70 /* Interrupt Enable Register */
41#define SIFIVE_SPI_REG_IP 0x74 /* Interrupt Pendings Register */
42
43/* sckdiv bits */
44#define SIFIVE_SPI_SCKDIV_DIV_MASK 0xfffU
45
46/* sckmode bits */
47#define SIFIVE_SPI_SCKMODE_PHA BIT(0)
48#define SIFIVE_SPI_SCKMODE_POL BIT(1)
49#define SIFIVE_SPI_SCKMODE_MODE_MASK (SIFIVE_SPI_SCKMODE_PHA | \
50 SIFIVE_SPI_SCKMODE_POL)
51
52/* csmode bits */
53#define SIFIVE_SPI_CSMODE_MODE_AUTO 0U
54#define SIFIVE_SPI_CSMODE_MODE_HOLD 2U
55#define SIFIVE_SPI_CSMODE_MODE_OFF 3U
56
57/* delay0 bits */
58#define SIFIVE_SPI_DELAY0_CSSCK(x) ((u32)(x))
59#define SIFIVE_SPI_DELAY0_CSSCK_MASK 0xffU
60#define SIFIVE_SPI_DELAY0_SCKCS(x) ((u32)(x) << 16)
61#define SIFIVE_SPI_DELAY0_SCKCS_MASK (0xffU << 16)
62
63/* delay1 bits */
64#define SIFIVE_SPI_DELAY1_INTERCS(x) ((u32)(x))
65#define SIFIVE_SPI_DELAY1_INTERCS_MASK 0xffU
66#define SIFIVE_SPI_DELAY1_INTERXFR(x) ((u32)(x) << 16)
67#define SIFIVE_SPI_DELAY1_INTERXFR_MASK (0xffU << 16)
68
69/* fmt bits */
70#define SIFIVE_SPI_FMT_PROTO_SINGLE 0U
71#define SIFIVE_SPI_FMT_PROTO_DUAL 1U
72#define SIFIVE_SPI_FMT_PROTO_QUAD 2U
73#define SIFIVE_SPI_FMT_PROTO_MASK 3U
74#define SIFIVE_SPI_FMT_ENDIAN BIT(2)
75#define SIFIVE_SPI_FMT_DIR BIT(3)
76#define SIFIVE_SPI_FMT_LEN(x) ((u32)(x) << 16)
77#define SIFIVE_SPI_FMT_LEN_MASK (0xfU << 16)
78
79/* txdata bits */
80#define SIFIVE_SPI_TXDATA_DATA_MASK 0xffU
81#define SIFIVE_SPI_TXDATA_FULL BIT(31)
82
83/* rxdata bits */
84#define SIFIVE_SPI_RXDATA_DATA_MASK 0xffU
85#define SIFIVE_SPI_RXDATA_EMPTY BIT(31)
86
87/* ie and ip bits */
88#define SIFIVE_SPI_IP_TXWM BIT(0)
89#define SIFIVE_SPI_IP_RXWM BIT(1)
90
91struct sifive_spi {
92 void __iomem *regs; /* virt. address of control registers */
93 struct clk *clk; /* bus clock */
94 unsigned int fifo_depth; /* fifo depth in words */
95 u32 cs_inactive; /* level of the CS pins when inactive */
96 struct completion done; /* wake-up from interrupt */
97};
98
99static void sifive_spi_write(struct sifive_spi *spi, int offset, u32 value)
100{
101 iowrite32(value, spi->regs + offset);
102}
103
104static u32 sifive_spi_read(struct sifive_spi *spi, int offset)
105{
106 return ioread32(spi->regs + offset);
107}
108
109static void sifive_spi_init(struct sifive_spi *spi)
110{
111 /* Watermark interrupts are disabled by default */
112 sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
113
114 /* Default watermark FIFO threshold values */
115 sifive_spi_write(spi, SIFIVE_SPI_REG_TXMARK, 1);
116 sifive_spi_write(spi, SIFIVE_SPI_REG_RXMARK, 0);
117
118 /* Set CS/SCK Delays and Inactive Time to defaults */
119 sifive_spi_write(spi, SIFIVE_SPI_REG_DELAY0,
120 SIFIVE_SPI_DELAY0_CSSCK(1) |
121 SIFIVE_SPI_DELAY0_SCKCS(1));
122 sifive_spi_write(spi, SIFIVE_SPI_REG_DELAY1,
123 SIFIVE_SPI_DELAY1_INTERCS(1) |
124 SIFIVE_SPI_DELAY1_INTERXFR(0));
125
126 /* Exit specialized memory-mapped SPI flash mode */
127 sifive_spi_write(spi, SIFIVE_SPI_REG_FCTRL, 0);
128}
129
130static int
131sifive_spi_prepare_message(struct spi_master *master, struct spi_message *msg)
132{
133 struct sifive_spi *spi = spi_master_get_devdata(master);
134 struct spi_device *device = msg->spi;
135
136 /* Update the chip select polarity */
137 if (device->mode & SPI_CS_HIGH)
138 spi->cs_inactive &= ~BIT(device->chip_select);
139 else
140 spi->cs_inactive |= BIT(device->chip_select);
141 sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, spi->cs_inactive);
142
143 /* Select the correct device */
144 sifive_spi_write(spi, SIFIVE_SPI_REG_CSID, device->chip_select);
145
146 /* Set clock mode */
147 sifive_spi_write(spi, SIFIVE_SPI_REG_SCKMODE,
148 device->mode & SIFIVE_SPI_SCKMODE_MODE_MASK);
149
150 return 0;
151}
152
153static void sifive_spi_set_cs(struct spi_device *device, bool is_high)
154{
155 struct sifive_spi *spi = spi_master_get_devdata(device->master);
156
157 /* Reverse polarity is handled by SCMR/CPOL. Not inverted CS. */
158 if (device->mode & SPI_CS_HIGH)
159 is_high = !is_high;
160
161 sifive_spi_write(spi, SIFIVE_SPI_REG_CSMODE, is_high ?
162 SIFIVE_SPI_CSMODE_MODE_AUTO :
163 SIFIVE_SPI_CSMODE_MODE_HOLD);
164}
165
166static int
167sifive_spi_prep_transfer(struct sifive_spi *spi, struct spi_device *device,
168 struct spi_transfer *t)
169{
170 u32 cr;
171 unsigned int mode;
172
173 /* Calculate and program the clock rate */
174 cr = DIV_ROUND_UP(clk_get_rate(spi->clk) >> 1, t->speed_hz) - 1;
175 cr &= SIFIVE_SPI_SCKDIV_DIV_MASK;
176 sifive_spi_write(spi, SIFIVE_SPI_REG_SCKDIV, cr);
177
178 mode = max_t(unsigned int, t->rx_nbits, t->tx_nbits);
179
180 /* Set frame format */
181 cr = SIFIVE_SPI_FMT_LEN(t->bits_per_word);
182 switch (mode) {
183 case SPI_NBITS_QUAD:
184 cr |= SIFIVE_SPI_FMT_PROTO_QUAD;
185 break;
186 case SPI_NBITS_DUAL:
187 cr |= SIFIVE_SPI_FMT_PROTO_DUAL;
188 break;
189 default:
190 cr |= SIFIVE_SPI_FMT_PROTO_SINGLE;
191 break;
192 }
193 if (device->mode & SPI_LSB_FIRST)
194 cr |= SIFIVE_SPI_FMT_ENDIAN;
195 if (!t->rx_buf)
196 cr |= SIFIVE_SPI_FMT_DIR;
197 sifive_spi_write(spi, SIFIVE_SPI_REG_FMT, cr);
198
199 /* We will want to poll if the time we need to wait is
200 * less than the context switching time.
201 * Let's call that threshold 5us. The operation will take:
202 * (8/mode) * fifo_depth / hz <= 5 * 10^-6
203 * 1600000 * fifo_depth <= hz * mode
204 */
205 return 1600000 * spi->fifo_depth <= t->speed_hz * mode;
206}
207
208static irqreturn_t sifive_spi_irq(int irq, void *dev_id)
209{
210 struct sifive_spi *spi = dev_id;
211 u32 ip = sifive_spi_read(spi, SIFIVE_SPI_REG_IP);
212
213 if (ip & (SIFIVE_SPI_IP_TXWM | SIFIVE_SPI_IP_RXWM)) {
214 /* Disable interrupts until next transfer */
215 sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
216 complete(&spi->done);
217 return IRQ_HANDLED;
218 }
219
220 return IRQ_NONE;
221}
222
223static void sifive_spi_wait(struct sifive_spi *spi, u32 bit, int poll)
224{
225 if (poll) {
226 u32 cr;
227
228 do {
229 cr = sifive_spi_read(spi, SIFIVE_SPI_REG_IP);
230 } while (!(cr & bit));
231 } else {
232 reinit_completion(&spi->done);
233 sifive_spi_write(spi, SIFIVE_SPI_REG_IE, bit);
234 wait_for_completion(&spi->done);
235 }
236}
237
238static void sifive_spi_tx(struct sifive_spi *spi, const u8 *tx_ptr)
239{
240 WARN_ON_ONCE((sifive_spi_read(spi, SIFIVE_SPI_REG_TXDATA)
241 & SIFIVE_SPI_TXDATA_FULL) != 0);
242 sifive_spi_write(spi, SIFIVE_SPI_REG_TXDATA,
243 *tx_ptr & SIFIVE_SPI_TXDATA_DATA_MASK);
244}
245
246static void sifive_spi_rx(struct sifive_spi *spi, u8 *rx_ptr)
247{
248 u32 data = sifive_spi_read(spi, SIFIVE_SPI_REG_RXDATA);
249
250 WARN_ON_ONCE((data & SIFIVE_SPI_RXDATA_EMPTY) != 0);
251 *rx_ptr = data & SIFIVE_SPI_RXDATA_DATA_MASK;
252}
253
254static int
255sifive_spi_transfer_one(struct spi_master *master, struct spi_device *device,
256 struct spi_transfer *t)
257{
258 struct sifive_spi *spi = spi_master_get_devdata(master);
259 int poll = sifive_spi_prep_transfer(spi, device, t);
260 const u8 *tx_ptr = t->tx_buf;
261 u8 *rx_ptr = t->rx_buf;
262 unsigned int remaining_words = t->len;
263
264 while (remaining_words) {
265 unsigned int n_words = min(remaining_words, spi->fifo_depth);
266 unsigned int i;
267
268 /* Enqueue n_words for transmission */
269 for (i = 0; i < n_words; i++)
270 sifive_spi_tx(spi, tx_ptr++);
271
272 if (rx_ptr) {
273 /* Wait for transmission + reception to complete */
274 sifive_spi_write(spi, SIFIVE_SPI_REG_RXMARK,
275 n_words - 1);
276 sifive_spi_wait(spi, SIFIVE_SPI_IP_RXWM, poll);
277
278 /* Read out all the data from the RX FIFO */
279 for (i = 0; i < n_words; i++)
280 sifive_spi_rx(spi, rx_ptr++);
281 } else {
282 /* Wait for transmission to complete */
283 sifive_spi_wait(spi, SIFIVE_SPI_IP_TXWM, poll);
284 }
285
286 remaining_words -= n_words;
287 }
288
289 return 0;
290}
291
292static int sifive_spi_probe(struct platform_device *pdev)
293{
294 struct sifive_spi *spi;
295 struct resource *res;
296 int ret, irq, num_cs;
297 u32 cs_bits, max_bits_per_word;
298 struct spi_master *master;
299
300 master = spi_alloc_master(&pdev->dev, sizeof(struct sifive_spi));
301 if (!master) {
302 dev_err(&pdev->dev, "out of memory\n");
303 return -ENOMEM;
304 }
305
306 spi = spi_master_get_devdata(master);
307 init_completion(&spi->done);
308 platform_set_drvdata(pdev, master);
309
310 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
311 spi->regs = devm_ioremap_resource(&pdev->dev, res);
312 if (IS_ERR(spi->regs)) {
313 ret = PTR_ERR(spi->regs);
314 goto put_master;
315 }
316
317 spi->clk = devm_clk_get(&pdev->dev, NULL);
318 if (IS_ERR(spi->clk)) {
319 dev_err(&pdev->dev, "Unable to find bus clock\n");
320 ret = PTR_ERR(spi->clk);
321 goto put_master;
322 }
323
324 irq = platform_get_irq(pdev, 0);
325 if (irq < 0) {
326 dev_err(&pdev->dev, "Unable to find interrupt\n");
327 ret = irq;
328 goto put_master;
329 }
330
331 /* Optional parameters */
332 ret =
333 of_property_read_u32(pdev->dev.of_node, "sifive,fifo-depth",
334 &spi->fifo_depth);
335 if (ret < 0)
336 spi->fifo_depth = SIFIVE_SPI_DEFAULT_DEPTH;
337
338 ret =
339 of_property_read_u32(pdev->dev.of_node, "sifive,max-bits-per-word",
340 &max_bits_per_word);
341
342 if (!ret && max_bits_per_word < 8) {
343 dev_err(&pdev->dev, "Only 8bit SPI words supported by the driver\n");
344 ret = -EINVAL;
345 goto put_master;
346 }
347
348 /* Spin up the bus clock before hitting registers */
349 ret = clk_prepare_enable(spi->clk);
350 if (ret) {
351 dev_err(&pdev->dev, "Unable to enable bus clock\n");
352 goto put_master;
353 }
354
355 /* probe the number of CS lines */
356 spi->cs_inactive = sifive_spi_read(spi, SIFIVE_SPI_REG_CSDEF);
357 sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, 0xffffffffU);
358 cs_bits = sifive_spi_read(spi, SIFIVE_SPI_REG_CSDEF);
359 sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, spi->cs_inactive);
360 if (!cs_bits) {
361 dev_err(&pdev->dev, "Could not auto probe CS lines\n");
362 ret = -EINVAL;
363 goto put_master;
364 }
365
366 num_cs = ilog2(cs_bits) + 1;
367 if (num_cs > SIFIVE_SPI_MAX_CS) {
368 dev_err(&pdev->dev, "Invalid number of spi slaves\n");
369 ret = -EINVAL;
370 goto put_master;
371 }
372
373 /* Define our master */
374 master->dev.of_node = pdev->dev.of_node;
375 master->bus_num = pdev->id;
376 master->num_chipselect = num_cs;
377 master->mode_bits = SPI_CPHA | SPI_CPOL
378 | SPI_CS_HIGH | SPI_LSB_FIRST
379 | SPI_TX_DUAL | SPI_TX_QUAD
380 | SPI_RX_DUAL | SPI_RX_QUAD;
381 /* TODO: add driver support for bits_per_word < 8
382 * we need to "left-align" the bits (unless SPI_LSB_FIRST)
383 */
384 master->bits_per_word_mask = SPI_BPW_MASK(8);
385 master->flags = SPI_CONTROLLER_MUST_TX | SPI_MASTER_GPIO_SS;
386 master->prepare_message = sifive_spi_prepare_message;
387 master->set_cs = sifive_spi_set_cs;
388 master->transfer_one = sifive_spi_transfer_one;
389
390 pdev->dev.dma_mask = NULL;
391 /* Configure the SPI master hardware */
392 sifive_spi_init(spi);
393
394 /* Register for SPI Interrupt */
395 ret = devm_request_irq(&pdev->dev, irq, sifive_spi_irq, 0,
396 dev_name(&pdev->dev), spi);
397 if (ret) {
398 dev_err(&pdev->dev, "Unable to bind to interrupt\n");
399 goto put_master;
400 }
401
402 dev_info(&pdev->dev, "mapped; irq=%d, cs=%d\n",
403 irq, master->num_chipselect);
404
405 ret = devm_spi_register_master(&pdev->dev, master);
406 if (ret < 0) {
407 dev_err(&pdev->dev, "spi_register_master failed\n");
408 goto put_master;
409 }
410
411 return 0;
412
413put_master:
414 spi_master_put(master);
415
416 return ret;
417}
418
419static int sifive_spi_remove(struct platform_device *pdev)
420{
421 struct spi_master *master = platform_get_drvdata(pdev);
422 struct sifive_spi *spi = spi_master_get_devdata(master);
423
424 /* Disable all the interrupts just in case */
425 sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
426
427 return 0;
428}
429
430static const struct of_device_id sifive_spi_of_match[] = {
431 { .compatible = "sifive,spi0", },
432 {}
433};
434MODULE_DEVICE_TABLE(of, sifive_spi_of_match);
435
436static struct platform_driver sifive_spi_driver = {
437 .probe = sifive_spi_probe,
438 .remove = sifive_spi_remove,
439 .driver = {
440 .name = SIFIVE_SPI_DRIVER_NAME,
441 .of_match_table = sifive_spi_of_match,
442 },
443};
444module_platform_driver(sifive_spi_driver);
445
446MODULE_AUTHOR("SiFive, Inc. <sifive@sifive.com>");
447MODULE_DESCRIPTION("SiFive SPI driver");
448MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
index fa324ce194b2..1b7eebb72c07 100644
--- a/drivers/spi/spi-sprd.c
+++ b/drivers/spi/spi-sprd.c
@@ -2,6 +2,9 @@
2// Copyright (C) 2018 Spreadtrum Communications Inc. 2// Copyright (C) 2018 Spreadtrum Communications Inc.
3 3
4#include <linux/clk.h> 4#include <linux/clk.h>
5#include <linux/dmaengine.h>
6#include <linux/dma-mapping.h>
7#include <linux/dma/sprd-dma.h>
5#include <linux/interrupt.h> 8#include <linux/interrupt.h>
6#include <linux/io.h> 9#include <linux/io.h>
7#include <linux/iopoll.h> 10#include <linux/iopoll.h>
@@ -9,6 +12,7 @@
9#include <linux/module.h> 12#include <linux/module.h>
10#include <linux/of.h> 13#include <linux/of.h>
11#include <linux/of_device.h> 14#include <linux/of_device.h>
15#include <linux/of_dma.h>
12#include <linux/platform_device.h> 16#include <linux/platform_device.h>
13#include <linux/pm_runtime.h> 17#include <linux/pm_runtime.h>
14#include <linux/spi/spi.h> 18#include <linux/spi/spi.h>
@@ -128,11 +132,28 @@
128#define SPRD_SPI_DEFAULT_SOURCE 26000000 132#define SPRD_SPI_DEFAULT_SOURCE 26000000
129#define SPRD_SPI_MAX_SPEED_HZ 48000000 133#define SPRD_SPI_MAX_SPEED_HZ 48000000
130#define SPRD_SPI_AUTOSUSPEND_DELAY 100 134#define SPRD_SPI_AUTOSUSPEND_DELAY 100
135#define SPRD_SPI_DMA_STEP 8
136
137enum sprd_spi_dma_channel {
138 SPRD_SPI_RX,
139 SPRD_SPI_TX,
140 SPRD_SPI_MAX,
141};
142
143struct sprd_spi_dma {
144 bool enable;
145 struct dma_chan *dma_chan[SPRD_SPI_MAX];
146 enum dma_slave_buswidth width;
147 u32 fragmens_len;
148 u32 rx_len;
149};
131 150
132struct sprd_spi { 151struct sprd_spi {
133 void __iomem *base; 152 void __iomem *base;
153 phys_addr_t phy_base;
134 struct device *dev; 154 struct device *dev;
135 struct clk *clk; 155 struct clk *clk;
156 int irq;
136 u32 src_clk; 157 u32 src_clk;
137 u32 hw_mode; 158 u32 hw_mode;
138 u32 trans_len; 159 u32 trans_len;
@@ -141,6 +162,8 @@ struct sprd_spi {
141 u32 hw_speed_hz; 162 u32 hw_speed_hz;
142 u32 len; 163 u32 len;
143 int status; 164 int status;
165 struct sprd_spi_dma dma;
166 struct completion xfer_completion;
144 const void *tx_buf; 167 const void *tx_buf;
145 void *rx_buf; 168 void *rx_buf;
146 int (*read_bufs)(struct sprd_spi *ss, u32 len); 169 int (*read_bufs)(struct sprd_spi *ss, u32 len);
@@ -431,6 +454,208 @@ complete:
431 return ret; 454 return ret;
432} 455}
433 456
457static void sprd_spi_irq_enable(struct sprd_spi *ss)
458{
459 u32 val;
460
461 /* Clear interrupt status before enabling interrupt. */
462 writel_relaxed(SPRD_SPI_TX_END_CLR | SPRD_SPI_RX_END_CLR,
463 ss->base + SPRD_SPI_INT_CLR);
464 /* Enable SPI interrupt only in DMA mode. */
465 val = readl_relaxed(ss->base + SPRD_SPI_INT_EN);
466 writel_relaxed(val | SPRD_SPI_TX_END_INT_EN |
467 SPRD_SPI_RX_END_INT_EN,
468 ss->base + SPRD_SPI_INT_EN);
469}
470
471static void sprd_spi_irq_disable(struct sprd_spi *ss)
472{
473 writel_relaxed(0, ss->base + SPRD_SPI_INT_EN);
474}
475
476static void sprd_spi_dma_enable(struct sprd_spi *ss, bool enable)
477{
478 u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL2);
479
480 if (enable)
481 val |= SPRD_SPI_DMA_EN;
482 else
483 val &= ~SPRD_SPI_DMA_EN;
484
485 writel_relaxed(val, ss->base + SPRD_SPI_CTL2);
486}
487
488static int sprd_spi_dma_submit(struct dma_chan *dma_chan,
489 struct dma_slave_config *c,
490 struct sg_table *sg,
491 enum dma_transfer_direction dir)
492{
493 struct dma_async_tx_descriptor *desc;
494 dma_cookie_t cookie;
495 unsigned long flags;
496 int ret;
497
498 ret = dmaengine_slave_config(dma_chan, c);
499 if (ret < 0)
500 return ret;
501
502 flags = SPRD_DMA_FLAGS(SPRD_DMA_CHN_MODE_NONE, SPRD_DMA_NO_TRG,
503 SPRD_DMA_FRAG_REQ, SPRD_DMA_TRANS_INT);
504 desc = dmaengine_prep_slave_sg(dma_chan, sg->sgl, sg->nents, dir, flags);
505 if (!desc)
506 return -ENODEV;
507
508 cookie = dmaengine_submit(desc);
509 if (dma_submit_error(cookie))
510 return dma_submit_error(cookie);
511
512 dma_async_issue_pending(dma_chan);
513
514 return 0;
515}
516
517static int sprd_spi_dma_rx_config(struct sprd_spi *ss, struct spi_transfer *t)
518{
519 struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_RX];
520 struct dma_slave_config config = {
521 .src_addr = ss->phy_base,
522 .src_addr_width = ss->dma.width,
523 .dst_addr_width = ss->dma.width,
524 .dst_maxburst = ss->dma.fragmens_len,
525 };
526 int ret;
527
528 ret = sprd_spi_dma_submit(dma_chan, &config, &t->rx_sg, DMA_DEV_TO_MEM);
529 if (ret)
530 return ret;
531
532 return ss->dma.rx_len;
533}
534
535static int sprd_spi_dma_tx_config(struct sprd_spi *ss, struct spi_transfer *t)
536{
537 struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_TX];
538 struct dma_slave_config config = {
539 .dst_addr = ss->phy_base,
540 .src_addr_width = ss->dma.width,
541 .dst_addr_width = ss->dma.width,
542 .src_maxburst = ss->dma.fragmens_len,
543 };
544 int ret;
545
546 ret = sprd_spi_dma_submit(dma_chan, &config, &t->tx_sg, DMA_MEM_TO_DEV);
547 if (ret)
548 return ret;
549
550 return t->len;
551}
552
553static int sprd_spi_dma_request(struct sprd_spi *ss)
554{
555 ss->dma.dma_chan[SPRD_SPI_RX] = dma_request_chan(ss->dev, "rx_chn");
556 if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_RX])) {
557 if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]) == -EPROBE_DEFER)
558 return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]);
559
560 dev_err(ss->dev, "request RX DMA channel failed!\n");
561 return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]);
562 }
563
564 ss->dma.dma_chan[SPRD_SPI_TX] = dma_request_chan(ss->dev, "tx_chn");
565 if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_TX])) {
566 if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]) == -EPROBE_DEFER)
567 return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]);
568
569 dev_err(ss->dev, "request TX DMA channel failed!\n");
570 dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]);
571 return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]);
572 }
573
574 return 0;
575}
576
577static void sprd_spi_dma_release(struct sprd_spi *ss)
578{
579 if (ss->dma.dma_chan[SPRD_SPI_RX])
580 dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]);
581
582 if (ss->dma.dma_chan[SPRD_SPI_TX])
583 dma_release_channel(ss->dma.dma_chan[SPRD_SPI_TX]);
584}
585
586static int sprd_spi_dma_txrx_bufs(struct spi_device *sdev,
587 struct spi_transfer *t)
588{
589 struct sprd_spi *ss = spi_master_get_devdata(sdev->master);
590 u32 trans_len = ss->trans_len;
591 int ret, write_size = 0;
592
593 reinit_completion(&ss->xfer_completion);
594 sprd_spi_irq_enable(ss);
595 if (ss->trans_mode & SPRD_SPI_TX_MODE) {
596 write_size = sprd_spi_dma_tx_config(ss, t);
597 sprd_spi_set_tx_length(ss, trans_len);
598
599 /*
600 * For our 3 wires mode or dual TX line mode, we need
601 * to request the controller to transfer.
602 */
603 if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
604 sprd_spi_tx_req(ss);
605 } else {
606 sprd_spi_set_rx_length(ss, trans_len);
607
608 /*
609 * For our 3 wires mode or dual TX line mode, we need
610 * to request the controller to read.
611 */
612 if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
613 sprd_spi_rx_req(ss);
614 else
615 write_size = ss->write_bufs(ss, trans_len);
616 }
617
618 if (write_size < 0) {
619 ret = write_size;
620 dev_err(ss->dev, "failed to write, ret = %d\n", ret);
621 goto trans_complete;
622 }
623
624 if (ss->trans_mode & SPRD_SPI_RX_MODE) {
625 /*
626 * Set up the DMA receive data length, which must be an
627 * integral multiple of fragment length. But when the length
628 * of received data is less than fragment length, DMA can be
629 * configured to receive data according to the actual length
630 * of received data.
631 */
632 ss->dma.rx_len = t->len > ss->dma.fragmens_len ?
633 (t->len - t->len % ss->dma.fragmens_len) :
634 t->len;
635 ret = sprd_spi_dma_rx_config(ss, t);
636 if (ret < 0) {
637 dev_err(&sdev->dev,
638 "failed to configure rx DMA, ret = %d\n", ret);
639 goto trans_complete;
640 }
641 }
642
643 sprd_spi_dma_enable(ss, true);
644 wait_for_completion(&(ss->xfer_completion));
645
646 if (ss->trans_mode & SPRD_SPI_TX_MODE)
647 ret = write_size;
648 else
649 ret = ss->dma.rx_len;
650
651trans_complete:
652 sprd_spi_dma_enable(ss, false);
653 sprd_spi_enter_idle(ss);
654 sprd_spi_irq_disable(ss);
655
656 return ret;
657}
658
434static void sprd_spi_set_speed(struct sprd_spi *ss, u32 speed_hz) 659static void sprd_spi_set_speed(struct sprd_spi *ss, u32 speed_hz)
435{ 660{
436 /* 661 /*
@@ -516,16 +741,22 @@ static int sprd_spi_setup_transfer(struct spi_device *sdev,
516 ss->trans_len = t->len; 741 ss->trans_len = t->len;
517 ss->read_bufs = sprd_spi_read_bufs_u8; 742 ss->read_bufs = sprd_spi_read_bufs_u8;
518 ss->write_bufs = sprd_spi_write_bufs_u8; 743 ss->write_bufs = sprd_spi_write_bufs_u8;
744 ss->dma.width = DMA_SLAVE_BUSWIDTH_1_BYTE;
745 ss->dma.fragmens_len = SPRD_SPI_DMA_STEP;
519 break; 746 break;
520 case 16: 747 case 16:
521 ss->trans_len = t->len >> 1; 748 ss->trans_len = t->len >> 1;
522 ss->read_bufs = sprd_spi_read_bufs_u16; 749 ss->read_bufs = sprd_spi_read_bufs_u16;
523 ss->write_bufs = sprd_spi_write_bufs_u16; 750 ss->write_bufs = sprd_spi_write_bufs_u16;
751 ss->dma.width = DMA_SLAVE_BUSWIDTH_2_BYTES;
752 ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 1;
524 break; 753 break;
525 case 32: 754 case 32:
526 ss->trans_len = t->len >> 2; 755 ss->trans_len = t->len >> 2;
527 ss->read_bufs = sprd_spi_read_bufs_u32; 756 ss->read_bufs = sprd_spi_read_bufs_u32;
528 ss->write_bufs = sprd_spi_write_bufs_u32; 757 ss->write_bufs = sprd_spi_write_bufs_u32;
758 ss->dma.width = DMA_SLAVE_BUSWIDTH_4_BYTES;
759 ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 2;
529 break; 760 break;
530 default: 761 default:
531 return -EINVAL; 762 return -EINVAL;
@@ -563,7 +794,11 @@ static int sprd_spi_transfer_one(struct spi_controller *sctlr,
563 if (ret) 794 if (ret)
564 goto setup_err; 795 goto setup_err;
565 796
566 ret = sprd_spi_txrx_bufs(sdev, t); 797 if (sctlr->can_dma(sctlr, sdev, t))
798 ret = sprd_spi_dma_txrx_bufs(sdev, t);
799 else
800 ret = sprd_spi_txrx_bufs(sdev, t);
801
567 if (ret == t->len) 802 if (ret == t->len)
568 ret = 0; 803 ret = 0;
569 else if (ret >= 0) 804 else if (ret >= 0)
@@ -575,6 +810,53 @@ setup_err:
575 return ret; 810 return ret;
576} 811}
577 812
813static irqreturn_t sprd_spi_handle_irq(int irq, void *data)
814{
815 struct sprd_spi *ss = (struct sprd_spi *)data;
816 u32 val = readl_relaxed(ss->base + SPRD_SPI_INT_MASK_STS);
817
818 if (val & SPRD_SPI_MASK_TX_END) {
819 writel_relaxed(SPRD_SPI_TX_END_CLR, ss->base + SPRD_SPI_INT_CLR);
820 if (!(ss->trans_mode & SPRD_SPI_RX_MODE))
821 complete(&ss->xfer_completion);
822
823 return IRQ_HANDLED;
824 }
825
826 if (val & SPRD_SPI_MASK_RX_END) {
827 writel_relaxed(SPRD_SPI_RX_END_CLR, ss->base + SPRD_SPI_INT_CLR);
828 if (ss->dma.rx_len < ss->len) {
829 ss->rx_buf += ss->dma.rx_len;
830 ss->dma.rx_len +=
831 ss->read_bufs(ss, ss->len - ss->dma.rx_len);
832 }
833 complete(&ss->xfer_completion);
834
835 return IRQ_HANDLED;
836 }
837
838 return IRQ_NONE;
839}
840
841static int sprd_spi_irq_init(struct platform_device *pdev, struct sprd_spi *ss)
842{
843 int ret;
844
845 ss->irq = platform_get_irq(pdev, 0);
846 if (ss->irq < 0) {
847 dev_err(&pdev->dev, "failed to get irq resource\n");
848 return ss->irq;
849 }
850
851 ret = devm_request_irq(&pdev->dev, ss->irq, sprd_spi_handle_irq,
852 0, pdev->name, ss);
853 if (ret)
854 dev_err(&pdev->dev, "failed to request spi irq %d, ret = %d\n",
855 ss->irq, ret);
856
857 return ret;
858}
859
578static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss) 860static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss)
579{ 861{
580 struct clk *clk_spi, *clk_parent; 862 struct clk *clk_spi, *clk_parent;
@@ -605,6 +887,35 @@ static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss)
605 return 0; 887 return 0;
606} 888}
607 889
890static bool sprd_spi_can_dma(struct spi_controller *sctlr,
891 struct spi_device *spi, struct spi_transfer *t)
892{
893 struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
894
895 return ss->dma.enable && (t->len > SPRD_SPI_FIFO_SIZE);
896}
897
898static int sprd_spi_dma_init(struct platform_device *pdev, struct sprd_spi *ss)
899{
900 int ret;
901
902 ret = sprd_spi_dma_request(ss);
903 if (ret) {
904 if (ret == -EPROBE_DEFER)
905 return ret;
906
907 dev_warn(&pdev->dev,
908 "failed to request dma, enter no dma mode, ret = %d\n",
909 ret);
910
911 return 0;
912 }
913
914 ss->dma.enable = true;
915
916 return 0;
917}
918
608static int sprd_spi_probe(struct platform_device *pdev) 919static int sprd_spi_probe(struct platform_device *pdev)
609{ 920{
610 struct spi_controller *sctlr; 921 struct spi_controller *sctlr;
@@ -625,25 +936,36 @@ static int sprd_spi_probe(struct platform_device *pdev)
625 goto free_controller; 936 goto free_controller;
626 } 937 }
627 938
939 ss->phy_base = res->start;
628 ss->dev = &pdev->dev; 940 ss->dev = &pdev->dev;
629 sctlr->dev.of_node = pdev->dev.of_node; 941 sctlr->dev.of_node = pdev->dev.of_node;
630 sctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE | SPI_TX_DUAL; 942 sctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE | SPI_TX_DUAL;
631 sctlr->bus_num = pdev->id; 943 sctlr->bus_num = pdev->id;
632 sctlr->set_cs = sprd_spi_chipselect; 944 sctlr->set_cs = sprd_spi_chipselect;
633 sctlr->transfer_one = sprd_spi_transfer_one; 945 sctlr->transfer_one = sprd_spi_transfer_one;
946 sctlr->can_dma = sprd_spi_can_dma;
634 sctlr->auto_runtime_pm = true; 947 sctlr->auto_runtime_pm = true;
635 sctlr->max_speed_hz = min_t(u32, ss->src_clk >> 1, 948 sctlr->max_speed_hz = min_t(u32, ss->src_clk >> 1,
636 SPRD_SPI_MAX_SPEED_HZ); 949 SPRD_SPI_MAX_SPEED_HZ);
637 950
951 init_completion(&ss->xfer_completion);
638 platform_set_drvdata(pdev, sctlr); 952 platform_set_drvdata(pdev, sctlr);
639 ret = sprd_spi_clk_init(pdev, ss); 953 ret = sprd_spi_clk_init(pdev, ss);
640 if (ret) 954 if (ret)
641 goto free_controller; 955 goto free_controller;
642 956
643 ret = clk_prepare_enable(ss->clk); 957 ret = sprd_spi_irq_init(pdev, ss);
958 if (ret)
959 goto free_controller;
960
961 ret = sprd_spi_dma_init(pdev, ss);
644 if (ret) 962 if (ret)
645 goto free_controller; 963 goto free_controller;
646 964
965 ret = clk_prepare_enable(ss->clk);
966 if (ret)
967 goto release_dma;
968
647 ret = pm_runtime_set_active(&pdev->dev); 969 ret = pm_runtime_set_active(&pdev->dev);
648 if (ret < 0) 970 if (ret < 0)
649 goto disable_clk; 971 goto disable_clk;
@@ -672,6 +994,8 @@ err_rpm_put:
672 pm_runtime_disable(&pdev->dev); 994 pm_runtime_disable(&pdev->dev);
673disable_clk: 995disable_clk:
674 clk_disable_unprepare(ss->clk); 996 clk_disable_unprepare(ss->clk);
997release_dma:
998 sprd_spi_dma_release(ss);
675free_controller: 999free_controller:
676 spi_controller_put(sctlr); 1000 spi_controller_put(sctlr);
677 1001
@@ -690,6 +1014,10 @@ static int sprd_spi_remove(struct platform_device *pdev)
690 return ret; 1014 return ret;
691 } 1015 }
692 1016
1017 spi_controller_suspend(sctlr);
1018
1019 if (ss->dma.enable)
1020 sprd_spi_dma_release(ss);
693 clk_disable_unprepare(ss->clk); 1021 clk_disable_unprepare(ss->clk);
694 pm_runtime_put_noidle(&pdev->dev); 1022 pm_runtime_put_noidle(&pdev->dev);
695 pm_runtime_disable(&pdev->dev); 1023 pm_runtime_disable(&pdev->dev);
@@ -702,6 +1030,9 @@ static int __maybe_unused sprd_spi_runtime_suspend(struct device *dev)
702 struct spi_controller *sctlr = dev_get_drvdata(dev); 1030 struct spi_controller *sctlr = dev_get_drvdata(dev);
703 struct sprd_spi *ss = spi_controller_get_devdata(sctlr); 1031 struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
704 1032
1033 if (ss->dma.enable)
1034 sprd_spi_dma_release(ss);
1035
705 clk_disable_unprepare(ss->clk); 1036 clk_disable_unprepare(ss->clk);
706 1037
707 return 0; 1038 return 0;
@@ -717,7 +1048,14 @@ static int __maybe_unused sprd_spi_runtime_resume(struct device *dev)
717 if (ret) 1048 if (ret)
718 return ret; 1049 return ret;
719 1050
720 return 0; 1051 if (!ss->dma.enable)
1052 return 0;
1053
1054 ret = sprd_spi_dma_request(ss);
1055 if (ret)
1056 clk_disable_unprepare(ss->clk);
1057
1058 return ret;
721} 1059}
722 1060
723static const struct dev_pm_ops sprd_spi_pm_ops = { 1061static const struct dev_pm_ops sprd_spi_pm_ops = {
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index ad1e55d3d5d5..4186ed20d796 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -1,23 +1,10 @@
1/* 1// SPDX-License-Identifier: GPL-2.0
2 * STMicroelectronics STM32 SPI Controller driver (master mode only) 2//
3 * 3// STMicroelectronics STM32 SPI Controller driver (master mode only)
4 * Copyright (C) 2017, STMicroelectronics - All Rights Reserved 4//
5 * Author(s): Amelie Delaunay <amelie.delaunay@st.com> for STMicroelectronics. 5// Copyright (C) 2017, STMicroelectronics - All Rights Reserved
6 * 6// Author(s): Amelie Delaunay <amelie.delaunay@st.com> for STMicroelectronics.
7 * License terms: GPL V2.0. 7
8 *
9 * spi_stm32 driver is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published by
11 * the Free Software Foundation.
12 *
13 * spi_stm32 driver is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
16 * details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * spi_stm32 driver. If not, see <http://www.gnu.org/licenses/>.
20 */
21#include <linux/debugfs.h> 8#include <linux/debugfs.h>
22#include <linux/clk.h> 9#include <linux/clk.h>
23#include <linux/delay.h> 10#include <linux/delay.h>
@@ -33,99 +20,251 @@
33 20
34#define DRIVER_NAME "spi_stm32" 21#define DRIVER_NAME "spi_stm32"
35 22
36/* STM32 SPI registers */ 23/* STM32F4 SPI registers */
37#define STM32_SPI_CR1 0x00 24#define STM32F4_SPI_CR1 0x00
38#define STM32_SPI_CR2 0x04 25#define STM32F4_SPI_CR2 0x04
39#define STM32_SPI_CFG1 0x08 26#define STM32F4_SPI_SR 0x08
40#define STM32_SPI_CFG2 0x0C 27#define STM32F4_SPI_DR 0x0C
41#define STM32_SPI_IER 0x10 28#define STM32F4_SPI_I2SCFGR 0x1C
42#define STM32_SPI_SR 0x14 29
43#define STM32_SPI_IFCR 0x18 30/* STM32F4_SPI_CR1 bit fields */
44#define STM32_SPI_TXDR 0x20 31#define STM32F4_SPI_CR1_CPHA BIT(0)
45#define STM32_SPI_RXDR 0x30 32#define STM32F4_SPI_CR1_CPOL BIT(1)
46#define STM32_SPI_I2SCFGR 0x50 33#define STM32F4_SPI_CR1_MSTR BIT(2)
47 34#define STM32F4_SPI_CR1_BR_SHIFT 3
48/* STM32_SPI_CR1 bit fields */ 35#define STM32F4_SPI_CR1_BR GENMASK(5, 3)
49#define SPI_CR1_SPE BIT(0) 36#define STM32F4_SPI_CR1_SPE BIT(6)
50#define SPI_CR1_MASRX BIT(8) 37#define STM32F4_SPI_CR1_LSBFRST BIT(7)
51#define SPI_CR1_CSTART BIT(9) 38#define STM32F4_SPI_CR1_SSI BIT(8)
52#define SPI_CR1_CSUSP BIT(10) 39#define STM32F4_SPI_CR1_SSM BIT(9)
53#define SPI_CR1_HDDIR BIT(11) 40#define STM32F4_SPI_CR1_RXONLY BIT(10)
54#define SPI_CR1_SSI BIT(12) 41#define STM32F4_SPI_CR1_DFF BIT(11)
55 42#define STM32F4_SPI_CR1_CRCNEXT BIT(12)
56/* STM32_SPI_CR2 bit fields */ 43#define STM32F4_SPI_CR1_CRCEN BIT(13)
57#define SPI_CR2_TSIZE_SHIFT 0 44#define STM32F4_SPI_CR1_BIDIOE BIT(14)
58#define SPI_CR2_TSIZE GENMASK(15, 0) 45#define STM32F4_SPI_CR1_BIDIMODE BIT(15)
59 46#define STM32F4_SPI_CR1_BR_MIN 0
60/* STM32_SPI_CFG1 bit fields */ 47#define STM32F4_SPI_CR1_BR_MAX (GENMASK(5, 3) >> 3)
61#define SPI_CFG1_DSIZE_SHIFT 0 48
62#define SPI_CFG1_DSIZE GENMASK(4, 0) 49/* STM32F4_SPI_CR2 bit fields */
63#define SPI_CFG1_FTHLV_SHIFT 5 50#define STM32F4_SPI_CR2_RXDMAEN BIT(0)
64#define SPI_CFG1_FTHLV GENMASK(8, 5) 51#define STM32F4_SPI_CR2_TXDMAEN BIT(1)
65#define SPI_CFG1_RXDMAEN BIT(14) 52#define STM32F4_SPI_CR2_SSOE BIT(2)
66#define SPI_CFG1_TXDMAEN BIT(15) 53#define STM32F4_SPI_CR2_FRF BIT(4)
67#define SPI_CFG1_MBR_SHIFT 28 54#define STM32F4_SPI_CR2_ERRIE BIT(5)
68#define SPI_CFG1_MBR GENMASK(30, 28) 55#define STM32F4_SPI_CR2_RXNEIE BIT(6)
69#define SPI_CFG1_MBR_MIN 0 56#define STM32F4_SPI_CR2_TXEIE BIT(7)
70#define SPI_CFG1_MBR_MAX (GENMASK(30, 28) >> 28) 57
71 58/* STM32F4_SPI_SR bit fields */
72/* STM32_SPI_CFG2 bit fields */ 59#define STM32F4_SPI_SR_RXNE BIT(0)
73#define SPI_CFG2_MIDI_SHIFT 4 60#define STM32F4_SPI_SR_TXE BIT(1)
74#define SPI_CFG2_MIDI GENMASK(7, 4) 61#define STM32F4_SPI_SR_CHSIDE BIT(2)
75#define SPI_CFG2_COMM_SHIFT 17 62#define STM32F4_SPI_SR_UDR BIT(3)
76#define SPI_CFG2_COMM GENMASK(18, 17) 63#define STM32F4_SPI_SR_CRCERR BIT(4)
77#define SPI_CFG2_SP_SHIFT 19 64#define STM32F4_SPI_SR_MODF BIT(5)
78#define SPI_CFG2_SP GENMASK(21, 19) 65#define STM32F4_SPI_SR_OVR BIT(6)
79#define SPI_CFG2_MASTER BIT(22) 66#define STM32F4_SPI_SR_BSY BIT(7)
80#define SPI_CFG2_LSBFRST BIT(23) 67#define STM32F4_SPI_SR_FRE BIT(8)
81#define SPI_CFG2_CPHA BIT(24) 68
82#define SPI_CFG2_CPOL BIT(25) 69/* STM32F4_SPI_I2SCFGR bit fields */
83#define SPI_CFG2_SSM BIT(26) 70#define STM32F4_SPI_I2SCFGR_I2SMOD BIT(11)
84#define SPI_CFG2_AFCNTR BIT(31) 71
85 72/* STM32F4 SPI Baud Rate min/max divisor */
86/* STM32_SPI_IER bit fields */ 73#define STM32F4_SPI_BR_DIV_MIN (2 << STM32F4_SPI_CR1_BR_MIN)
87#define SPI_IER_RXPIE BIT(0) 74#define STM32F4_SPI_BR_DIV_MAX (2 << STM32F4_SPI_CR1_BR_MAX)
88#define SPI_IER_TXPIE BIT(1) 75
89#define SPI_IER_DXPIE BIT(2) 76/* STM32H7 SPI registers */
90#define SPI_IER_EOTIE BIT(3) 77#define STM32H7_SPI_CR1 0x00
91#define SPI_IER_TXTFIE BIT(4) 78#define STM32H7_SPI_CR2 0x04
92#define SPI_IER_OVRIE BIT(6) 79#define STM32H7_SPI_CFG1 0x08
93#define SPI_IER_MODFIE BIT(9) 80#define STM32H7_SPI_CFG2 0x0C
94#define SPI_IER_ALL GENMASK(10, 0) 81#define STM32H7_SPI_IER 0x10
95 82#define STM32H7_SPI_SR 0x14
96/* STM32_SPI_SR bit fields */ 83#define STM32H7_SPI_IFCR 0x18
97#define SPI_SR_RXP BIT(0) 84#define STM32H7_SPI_TXDR 0x20
98#define SPI_SR_TXP BIT(1) 85#define STM32H7_SPI_RXDR 0x30
99#define SPI_SR_EOT BIT(3) 86#define STM32H7_SPI_I2SCFGR 0x50
100#define SPI_SR_OVR BIT(6) 87
101#define SPI_SR_MODF BIT(9) 88/* STM32H7_SPI_CR1 bit fields */
102#define SPI_SR_SUSP BIT(11) 89#define STM32H7_SPI_CR1_SPE BIT(0)
103#define SPI_SR_RXPLVL_SHIFT 13 90#define STM32H7_SPI_CR1_MASRX BIT(8)
104#define SPI_SR_RXPLVL GENMASK(14, 13) 91#define STM32H7_SPI_CR1_CSTART BIT(9)
105#define SPI_SR_RXWNE BIT(15) 92#define STM32H7_SPI_CR1_CSUSP BIT(10)
106 93#define STM32H7_SPI_CR1_HDDIR BIT(11)
107/* STM32_SPI_IFCR bit fields */ 94#define STM32H7_SPI_CR1_SSI BIT(12)
108#define SPI_IFCR_ALL GENMASK(11, 3) 95
109 96/* STM32H7_SPI_CR2 bit fields */
110/* STM32_SPI_I2SCFGR bit fields */ 97#define STM32H7_SPI_CR2_TSIZE_SHIFT 0
111#define SPI_I2SCFGR_I2SMOD BIT(0) 98#define STM32H7_SPI_CR2_TSIZE GENMASK(15, 0)
112 99
113/* SPI Master Baud Rate min/max divisor */ 100/* STM32H7_SPI_CFG1 bit fields */
114#define SPI_MBR_DIV_MIN (2 << SPI_CFG1_MBR_MIN) 101#define STM32H7_SPI_CFG1_DSIZE_SHIFT 0
115#define SPI_MBR_DIV_MAX (2 << SPI_CFG1_MBR_MAX) 102#define STM32H7_SPI_CFG1_DSIZE GENMASK(4, 0)
116 103#define STM32H7_SPI_CFG1_FTHLV_SHIFT 5
117/* SPI Communication mode */ 104#define STM32H7_SPI_CFG1_FTHLV GENMASK(8, 5)
105#define STM32H7_SPI_CFG1_RXDMAEN BIT(14)
106#define STM32H7_SPI_CFG1_TXDMAEN BIT(15)
107#define STM32H7_SPI_CFG1_MBR_SHIFT 28
108#define STM32H7_SPI_CFG1_MBR GENMASK(30, 28)
109#define STM32H7_SPI_CFG1_MBR_MIN 0
110#define STM32H7_SPI_CFG1_MBR_MAX (GENMASK(30, 28) >> 28)
111
112/* STM32H7_SPI_CFG2 bit fields */
113#define STM32H7_SPI_CFG2_MIDI_SHIFT 4
114#define STM32H7_SPI_CFG2_MIDI GENMASK(7, 4)
115#define STM32H7_SPI_CFG2_COMM_SHIFT 17
116#define STM32H7_SPI_CFG2_COMM GENMASK(18, 17)
117#define STM32H7_SPI_CFG2_SP_SHIFT 19
118#define STM32H7_SPI_CFG2_SP GENMASK(21, 19)
119#define STM32H7_SPI_CFG2_MASTER BIT(22)
120#define STM32H7_SPI_CFG2_LSBFRST BIT(23)
121#define STM32H7_SPI_CFG2_CPHA BIT(24)
122#define STM32H7_SPI_CFG2_CPOL BIT(25)
123#define STM32H7_SPI_CFG2_SSM BIT(26)
124#define STM32H7_SPI_CFG2_AFCNTR BIT(31)
125
126/* STM32H7_SPI_IER bit fields */
127#define STM32H7_SPI_IER_RXPIE BIT(0)
128#define STM32H7_SPI_IER_TXPIE BIT(1)
129#define STM32H7_SPI_IER_DXPIE BIT(2)
130#define STM32H7_SPI_IER_EOTIE BIT(3)
131#define STM32H7_SPI_IER_TXTFIE BIT(4)
132#define STM32H7_SPI_IER_OVRIE BIT(6)
133#define STM32H7_SPI_IER_MODFIE BIT(9)
134#define STM32H7_SPI_IER_ALL GENMASK(10, 0)
135
136/* STM32H7_SPI_SR bit fields */
137#define STM32H7_SPI_SR_RXP BIT(0)
138#define STM32H7_SPI_SR_TXP BIT(1)
139#define STM32H7_SPI_SR_EOT BIT(3)
140#define STM32H7_SPI_SR_OVR BIT(6)
141#define STM32H7_SPI_SR_MODF BIT(9)
142#define STM32H7_SPI_SR_SUSP BIT(11)
143#define STM32H7_SPI_SR_RXPLVL_SHIFT 13
144#define STM32H7_SPI_SR_RXPLVL GENMASK(14, 13)
145#define STM32H7_SPI_SR_RXWNE BIT(15)
146
147/* STM32H7_SPI_IFCR bit fields */
148#define STM32H7_SPI_IFCR_ALL GENMASK(11, 3)
149
150/* STM32H7_SPI_I2SCFGR bit fields */
151#define STM32H7_SPI_I2SCFGR_I2SMOD BIT(0)
152
153/* STM32H7 SPI Master Baud Rate min/max divisor */
154#define STM32H7_SPI_MBR_DIV_MIN (2 << STM32H7_SPI_CFG1_MBR_MIN)
155#define STM32H7_SPI_MBR_DIV_MAX (2 << STM32H7_SPI_CFG1_MBR_MAX)
156
157/* STM32H7 SPI Communication mode */
158#define STM32H7_SPI_FULL_DUPLEX 0
159#define STM32H7_SPI_SIMPLEX_TX 1
160#define STM32H7_SPI_SIMPLEX_RX 2
161#define STM32H7_SPI_HALF_DUPLEX 3
162
163/* SPI Communication type */
118#define SPI_FULL_DUPLEX 0 164#define SPI_FULL_DUPLEX 0
119#define SPI_SIMPLEX_TX 1 165#define SPI_SIMPLEX_TX 1
120#define SPI_SIMPLEX_RX 2 166#define SPI_SIMPLEX_RX 2
121#define SPI_HALF_DUPLEX 3 167#define SPI_3WIRE_TX 3
168#define SPI_3WIRE_RX 4
122 169
123#define SPI_1HZ_NS 1000000000 170#define SPI_1HZ_NS 1000000000
124 171
172/*
173 * use PIO for small transfers, avoiding DMA setup/teardown overhead for drivers
174 * without fifo buffers.
175 */
176#define SPI_DMA_MIN_BYTES 16
177
178/**
179 * stm32_spi_reg - stm32 SPI register & bitfield desc
180 * @reg: register offset
181 * @mask: bitfield mask
182 * @shift: left shift
183 */
184struct stm32_spi_reg {
185 int reg;
186 int mask;
187 int shift;
188};
189
190/**
191 * stm32_spi_regspec - stm32 registers definition, compatible dependent data
192 * en: enable register and SPI enable bit
193 * dma_rx_en: SPI DMA RX enable register end SPI DMA RX enable bit
194 * dma_tx_en: SPI DMA TX enable register end SPI DMA TX enable bit
195 * cpol: clock polarity register and polarity bit
196 * cpha: clock phase register and phase bit
197 * lsb_first: LSB transmitted first register and bit
198 * br: baud rate register and bitfields
199 * rx: SPI RX data register
200 * tx: SPI TX data register
201 */
202struct stm32_spi_regspec {
203 const struct stm32_spi_reg en;
204 const struct stm32_spi_reg dma_rx_en;
205 const struct stm32_spi_reg dma_tx_en;
206 const struct stm32_spi_reg cpol;
207 const struct stm32_spi_reg cpha;
208 const struct stm32_spi_reg lsb_first;
209 const struct stm32_spi_reg br;
210 const struct stm32_spi_reg rx;
211 const struct stm32_spi_reg tx;
212};
213
214struct stm32_spi;
215
216/**
217 * stm32_spi_cfg - stm32 compatible configuration data
218 * @regs: registers descriptions
219 * @get_fifo_size: routine to get fifo size
220 * @get_bpw_mask: routine to get bits per word mask
221 * @disable: routine to disable controller
222 * @config: routine to configure controller as SPI Master
223 * @set_bpw: routine to configure registers to for bits per word
224 * @set_mode: routine to configure registers to desired mode
225 * @set_data_idleness: optional routine to configure registers to desired idle
226 * time between frames (if driver has this functionality)
227 * set_number_of_data: optional routine to configure registers to desired
228 * number of data (if driver has this functionality)
229 * @can_dma: routine to determine if the transfer is eligible for DMA use
230 * @transfer_one_dma_start: routine to start transfer a single spi_transfer
231 * using DMA
232 * @dma_rx cb: routine to call after DMA RX channel operation is complete
233 * @dma_tx cb: routine to call after DMA TX channel operation is complete
234 * @transfer_one_irq: routine to configure interrupts for driver
235 * @irq_handler_event: Interrupt handler for SPI controller events
236 * @irq_handler_thread: thread of interrupt handler for SPI controller
237 * @baud_rate_div_min: minimum baud rate divisor
238 * @baud_rate_div_max: maximum baud rate divisor
239 * @has_fifo: boolean to know if fifo is used for driver
240 * @has_startbit: boolean to know if start bit is used to start transfer
241 */
242struct stm32_spi_cfg {
243 const struct stm32_spi_regspec *regs;
244 int (*get_fifo_size)(struct stm32_spi *spi);
245 int (*get_bpw_mask)(struct stm32_spi *spi);
246 void (*disable)(struct stm32_spi *spi);
247 int (*config)(struct stm32_spi *spi);
248 void (*set_bpw)(struct stm32_spi *spi);
249 int (*set_mode)(struct stm32_spi *spi, unsigned int comm_type);
250 void (*set_data_idleness)(struct stm32_spi *spi, u32 length);
251 int (*set_number_of_data)(struct stm32_spi *spi, u32 length);
252 void (*transfer_one_dma_start)(struct stm32_spi *spi);
253 void (*dma_rx_cb)(void *data);
254 void (*dma_tx_cb)(void *data);
255 int (*transfer_one_irq)(struct stm32_spi *spi);
256 irqreturn_t (*irq_handler_event)(int irq, void *dev_id);
257 irqreturn_t (*irq_handler_thread)(int irq, void *dev_id);
258 unsigned int baud_rate_div_min;
259 unsigned int baud_rate_div_max;
260 bool has_fifo;
261};
262
125/** 263/**
126 * struct stm32_spi - private data of the SPI controller 264 * struct stm32_spi - private data of the SPI controller
127 * @dev: driver model representation of the controller 265 * @dev: driver model representation of the controller
128 * @master: controller master interface 266 * @master: controller master interface
267 * @cfg: compatible configuration data
129 * @base: virtual memory area 268 * @base: virtual memory area
130 * @clk: hw kernel clock feeding the SPI clock generator 269 * @clk: hw kernel clock feeding the SPI clock generator
131 * @clk_rate: rate of the hw kernel clock feeding the SPI clock generator 270 * @clk_rate: rate of the hw kernel clock feeding the SPI clock generator
@@ -151,6 +290,7 @@
151struct stm32_spi { 290struct stm32_spi {
152 struct device *dev; 291 struct device *dev;
153 struct spi_master *master; 292 struct spi_master *master;
293 const struct stm32_spi_cfg *cfg;
154 void __iomem *base; 294 void __iomem *base;
155 struct clk *clk; 295 struct clk *clk;
156 u32 clk_rate; 296 u32 clk_rate;
@@ -176,6 +316,40 @@ struct stm32_spi {
176 dma_addr_t phys_addr; 316 dma_addr_t phys_addr;
177}; 317};
178 318
319static const struct stm32_spi_regspec stm32f4_spi_regspec = {
320 .en = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE },
321
322 .dma_rx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_RXDMAEN },
323 .dma_tx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN },
324
325 .cpol = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPOL },
326 .cpha = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPHA },
327 .lsb_first = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_LSBFRST },
328 .br = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_BR, STM32F4_SPI_CR1_BR_SHIFT },
329
330 .rx = { STM32F4_SPI_DR },
331 .tx = { STM32F4_SPI_DR },
332};
333
334static const struct stm32_spi_regspec stm32h7_spi_regspec = {
335 /* SPI data transfer is enabled but spi_ker_ck is idle.
336 * CFG1 and CFG2 registers are write protected when SPE is enabled.
337 */
338 .en = { STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE },
339
340 .dma_rx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_RXDMAEN },
341 .dma_tx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN },
342
343 .cpol = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPOL },
344 .cpha = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPHA },
345 .lsb_first = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_LSBFRST },
346 .br = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_MBR,
347 STM32H7_SPI_CFG1_MBR_SHIFT },
348
349 .rx = { STM32H7_SPI_RXDR },
350 .tx = { STM32H7_SPI_TXDR },
351};
352
179static inline void stm32_spi_set_bits(struct stm32_spi *spi, 353static inline void stm32_spi_set_bits(struct stm32_spi *spi,
180 u32 offset, u32 bits) 354 u32 offset, u32 bits)
181{ 355{
@@ -191,22 +365,22 @@ static inline void stm32_spi_clr_bits(struct stm32_spi *spi,
191} 365}
192 366
193/** 367/**
194 * stm32_spi_get_fifo_size - Return fifo size 368 * stm32h7_spi_get_fifo_size - Return fifo size
195 * @spi: pointer to the spi controller data structure 369 * @spi: pointer to the spi controller data structure
196 */ 370 */
197static int stm32_spi_get_fifo_size(struct stm32_spi *spi) 371static int stm32h7_spi_get_fifo_size(struct stm32_spi *spi)
198{ 372{
199 unsigned long flags; 373 unsigned long flags;
200 u32 count = 0; 374 u32 count = 0;
201 375
202 spin_lock_irqsave(&spi->lock, flags); 376 spin_lock_irqsave(&spi->lock, flags);
203 377
204 stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE); 378 stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
205 379
206 while (readl_relaxed(spi->base + STM32_SPI_SR) & SPI_SR_TXP) 380 while (readl_relaxed(spi->base + STM32H7_SPI_SR) & STM32H7_SPI_SR_TXP)
207 writeb_relaxed(++count, spi->base + STM32_SPI_TXDR); 381 writeb_relaxed(++count, spi->base + STM32H7_SPI_TXDR);
208 382
209 stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE); 383 stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
210 384
211 spin_unlock_irqrestore(&spi->lock, flags); 385 spin_unlock_irqrestore(&spi->lock, flags);
212 386
@@ -216,10 +390,20 @@ static int stm32_spi_get_fifo_size(struct stm32_spi *spi)
216} 390}
217 391
218/** 392/**
219 * stm32_spi_get_bpw_mask - Return bits per word mask 393 * stm32f4_spi_get_bpw_mask - Return bits per word mask
220 * @spi: pointer to the spi controller data structure 394 * @spi: pointer to the spi controller data structure
221 */ 395 */
222static int stm32_spi_get_bpw_mask(struct stm32_spi *spi) 396static int stm32f4_spi_get_bpw_mask(struct stm32_spi *spi)
397{
398 dev_dbg(spi->dev, "8-bit or 16-bit data frame supported\n");
399 return SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
400}
401
402/**
403 * stm32h7_spi_get_bpw_mask - Return bits per word mask
404 * @spi: pointer to the spi controller data structure
405 */
406static int stm32h7_spi_get_bpw_mask(struct stm32_spi *spi)
223{ 407{
224 unsigned long flags; 408 unsigned long flags;
225 u32 cfg1, max_bpw; 409 u32 cfg1, max_bpw;
@@ -230,10 +414,11 @@ static int stm32_spi_get_bpw_mask(struct stm32_spi *spi)
230 * The most significant bit at DSIZE bit field is reserved when the 414 * The most significant bit at DSIZE bit field is reserved when the
231 * maximum data size of periperal instances is limited to 16-bit 415 * maximum data size of periperal instances is limited to 16-bit
232 */ 416 */
233 stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_DSIZE); 417 stm32_spi_set_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_DSIZE);
234 418
235 cfg1 = readl_relaxed(spi->base + STM32_SPI_CFG1); 419 cfg1 = readl_relaxed(spi->base + STM32H7_SPI_CFG1);
236 max_bpw = (cfg1 & SPI_CFG1_DSIZE) >> SPI_CFG1_DSIZE_SHIFT; 420 max_bpw = (cfg1 & STM32H7_SPI_CFG1_DSIZE) >>
421 STM32H7_SPI_CFG1_DSIZE_SHIFT;
237 max_bpw += 1; 422 max_bpw += 1;
238 423
239 spin_unlock_irqrestore(&spi->lock, flags); 424 spin_unlock_irqrestore(&spi->lock, flags);
@@ -244,13 +429,16 @@ static int stm32_spi_get_bpw_mask(struct stm32_spi *spi)
244} 429}
245 430
246/** 431/**
247 * stm32_spi_prepare_mbr - Determine SPI_CFG1.MBR value 432 * stm32_spi_prepare_mbr - Determine baud rate divisor value
248 * @spi: pointer to the spi controller data structure 433 * @spi: pointer to the spi controller data structure
249 * @speed_hz: requested speed 434 * @speed_hz: requested speed
435 * @min_div: minimum baud rate divisor
436 * @max_div: maximum baud rate divisor
250 * 437 *
251 * Return SPI_CFG1.MBR value in case of success or -EINVAL 438 * Return baud rate divisor value in case of success or -EINVAL
252 */ 439 */
253static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz) 440static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
441 u32 min_div, u32 max_div)
254{ 442{
255 u32 div, mbrdiv; 443 u32 div, mbrdiv;
256 444
@@ -263,8 +451,7 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz)
263 * no need to check it there. 451 * no need to check it there.
264 * However, we need to ensure the following calculations. 452 * However, we need to ensure the following calculations.
265 */ 453 */
266 if (div < SPI_MBR_DIV_MIN || 454 if ((div < min_div) || (div > max_div))
267 div > SPI_MBR_DIV_MAX)
268 return -EINVAL; 455 return -EINVAL;
269 456
270 /* Determine the first power of 2 greater than or equal to div */ 457 /* Determine the first power of 2 greater than or equal to div */
@@ -279,10 +466,10 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz)
279} 466}
280 467
281/** 468/**
282 * stm32_spi_prepare_fthlv - Determine FIFO threshold level 469 * stm32h7_spi_prepare_fthlv - Determine FIFO threshold level
283 * @spi: pointer to the spi controller data structure 470 * @spi: pointer to the spi controller data structure
284 */ 471 */
285static u32 stm32_spi_prepare_fthlv(struct stm32_spi *spi) 472static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi)
286{ 473{
287 u32 fthlv, half_fifo; 474 u32 fthlv, half_fifo;
288 475
@@ -306,32 +493,62 @@ static u32 stm32_spi_prepare_fthlv(struct stm32_spi *spi)
306} 493}
307 494
308/** 495/**
309 * stm32_spi_write_txfifo - Write bytes in Transmit Data Register 496 * stm32f4_spi_write_tx - Write bytes to Transmit Data Register
310 * @spi: pointer to the spi controller data structure 497 * @spi: pointer to the spi controller data structure
311 * 498 *
312 * Read from tx_buf depends on remaining bytes to avoid to read beyond 499 * Read from tx_buf depends on remaining bytes to avoid to read beyond
313 * tx_buf end. 500 * tx_buf end.
314 */ 501 */
315static void stm32_spi_write_txfifo(struct stm32_spi *spi) 502static void stm32f4_spi_write_tx(struct stm32_spi *spi)
503{
504 if ((spi->tx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) &
505 STM32F4_SPI_SR_TXE)) {
506 u32 offs = spi->cur_xferlen - spi->tx_len;
507
508 if (spi->cur_bpw == 16) {
509 const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
510
511 writew_relaxed(*tx_buf16, spi->base + STM32F4_SPI_DR);
512 spi->tx_len -= sizeof(u16);
513 } else {
514 const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
515
516 writeb_relaxed(*tx_buf8, spi->base + STM32F4_SPI_DR);
517 spi->tx_len -= sizeof(u8);
518 }
519 }
520
521 dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->tx_len);
522}
523
524/**
525 * stm32h7_spi_write_txfifo - Write bytes in Transmit Data Register
526 * @spi: pointer to the spi controller data structure
527 *
528 * Read from tx_buf depends on remaining bytes to avoid to read beyond
529 * tx_buf end.
530 */
531static void stm32h7_spi_write_txfifo(struct stm32_spi *spi)
316{ 532{
317 while ((spi->tx_len > 0) && 533 while ((spi->tx_len > 0) &&
318 (readl_relaxed(spi->base + STM32_SPI_SR) & SPI_SR_TXP)) { 534 (readl_relaxed(spi->base + STM32H7_SPI_SR) &
535 STM32H7_SPI_SR_TXP)) {
319 u32 offs = spi->cur_xferlen - spi->tx_len; 536 u32 offs = spi->cur_xferlen - spi->tx_len;
320 537
321 if (spi->tx_len >= sizeof(u32)) { 538 if (spi->tx_len >= sizeof(u32)) {
322 const u32 *tx_buf32 = (const u32 *)(spi->tx_buf + offs); 539 const u32 *tx_buf32 = (const u32 *)(spi->tx_buf + offs);
323 540
324 writel_relaxed(*tx_buf32, spi->base + STM32_SPI_TXDR); 541 writel_relaxed(*tx_buf32, spi->base + STM32H7_SPI_TXDR);
325 spi->tx_len -= sizeof(u32); 542 spi->tx_len -= sizeof(u32);
326 } else if (spi->tx_len >= sizeof(u16)) { 543 } else if (spi->tx_len >= sizeof(u16)) {
327 const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs); 544 const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
328 545
329 writew_relaxed(*tx_buf16, spi->base + STM32_SPI_TXDR); 546 writew_relaxed(*tx_buf16, spi->base + STM32H7_SPI_TXDR);
330 spi->tx_len -= sizeof(u16); 547 spi->tx_len -= sizeof(u16);
331 } else { 548 } else {
332 const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs); 549 const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
333 550
334 writeb_relaxed(*tx_buf8, spi->base + STM32_SPI_TXDR); 551 writeb_relaxed(*tx_buf8, spi->base + STM32H7_SPI_TXDR);
335 spi->tx_len -= sizeof(u8); 552 spi->tx_len -= sizeof(u8);
336 } 553 }
337 } 554 }
@@ -340,43 +557,74 @@ static void stm32_spi_write_txfifo(struct stm32_spi *spi)
340} 557}
341 558
342/** 559/**
343 * stm32_spi_read_rxfifo - Read bytes in Receive Data Register 560 * stm32f4_spi_read_rx - Read bytes from Receive Data Register
561 * @spi: pointer to the spi controller data structure
562 *
563 * Write in rx_buf depends on remaining bytes to avoid to write beyond
564 * rx_buf end.
565 */
566static void stm32f4_spi_read_rx(struct stm32_spi *spi)
567{
568 if ((spi->rx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) &
569 STM32F4_SPI_SR_RXNE)) {
570 u32 offs = spi->cur_xferlen - spi->rx_len;
571
572 if (spi->cur_bpw == 16) {
573 u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
574
575 *rx_buf16 = readw_relaxed(spi->base + STM32F4_SPI_DR);
576 spi->rx_len -= sizeof(u16);
577 } else {
578 u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
579
580 *rx_buf8 = readb_relaxed(spi->base + STM32F4_SPI_DR);
581 spi->rx_len -= sizeof(u8);
582 }
583 }
584
585 dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->rx_len);
586}
587
588/**
589 * stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register
344 * @spi: pointer to the spi controller data structure 590 * @spi: pointer to the spi controller data structure
345 * 591 *
346 * Write in rx_buf depends on remaining bytes to avoid to write beyond 592 * Write in rx_buf depends on remaining bytes to avoid to write beyond
347 * rx_buf end. 593 * rx_buf end.
348 */ 594 */
349static void stm32_spi_read_rxfifo(struct stm32_spi *spi, bool flush) 595static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
350{ 596{
351 u32 sr = readl_relaxed(spi->base + STM32_SPI_SR); 597 u32 sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
352 u32 rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT; 598 u32 rxplvl = (sr & STM32H7_SPI_SR_RXPLVL) >>
599 STM32H7_SPI_SR_RXPLVL_SHIFT;
353 600
354 while ((spi->rx_len > 0) && 601 while ((spi->rx_len > 0) &&
355 ((sr & SPI_SR_RXP) || 602 ((sr & STM32H7_SPI_SR_RXP) ||
356 (flush && ((sr & SPI_SR_RXWNE) || (rxplvl > 0))))) { 603 (flush && ((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) {
357 u32 offs = spi->cur_xferlen - spi->rx_len; 604 u32 offs = spi->cur_xferlen - spi->rx_len;
358 605
359 if ((spi->rx_len >= sizeof(u32)) || 606 if ((spi->rx_len >= sizeof(u32)) ||
360 (flush && (sr & SPI_SR_RXWNE))) { 607 (flush && (sr & STM32H7_SPI_SR_RXWNE))) {
361 u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs); 608 u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs);
362 609
363 *rx_buf32 = readl_relaxed(spi->base + STM32_SPI_RXDR); 610 *rx_buf32 = readl_relaxed(spi->base + STM32H7_SPI_RXDR);
364 spi->rx_len -= sizeof(u32); 611 spi->rx_len -= sizeof(u32);
365 } else if ((spi->rx_len >= sizeof(u16)) || 612 } else if ((spi->rx_len >= sizeof(u16)) ||
366 (flush && (rxplvl >= 2 || spi->cur_bpw > 8))) { 613 (flush && (rxplvl >= 2 || spi->cur_bpw > 8))) {
367 u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs); 614 u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
368 615
369 *rx_buf16 = readw_relaxed(spi->base + STM32_SPI_RXDR); 616 *rx_buf16 = readw_relaxed(spi->base + STM32H7_SPI_RXDR);
370 spi->rx_len -= sizeof(u16); 617 spi->rx_len -= sizeof(u16);
371 } else { 618 } else {
372 u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs); 619 u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
373 620
374 *rx_buf8 = readb_relaxed(spi->base + STM32_SPI_RXDR); 621 *rx_buf8 = readb_relaxed(spi->base + STM32H7_SPI_RXDR);
375 spi->rx_len -= sizeof(u8); 622 spi->rx_len -= sizeof(u8);
376 } 623 }
377 624
378 sr = readl_relaxed(spi->base + STM32_SPI_SR); 625 sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
379 rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT; 626 rxplvl = (sr & STM32H7_SPI_SR_RXPLVL) >>
627 STM32H7_SPI_SR_RXPLVL_SHIFT;
380 } 628 }
381 629
382 dev_dbg(spi->dev, "%s%s: %d bytes left\n", __func__, 630 dev_dbg(spi->dev, "%s%s: %d bytes left\n", __func__,
@@ -386,26 +634,76 @@ static void stm32_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
386/** 634/**
387 * stm32_spi_enable - Enable SPI controller 635 * stm32_spi_enable - Enable SPI controller
388 * @spi: pointer to the spi controller data structure 636 * @spi: pointer to the spi controller data structure
389 *
390 * SPI data transfer is enabled but spi_ker_ck is idle.
391 * SPI_CFG1 and SPI_CFG2 are now write protected.
392 */ 637 */
393static void stm32_spi_enable(struct stm32_spi *spi) 638static void stm32_spi_enable(struct stm32_spi *spi)
394{ 639{
395 dev_dbg(spi->dev, "enable controller\n"); 640 dev_dbg(spi->dev, "enable controller\n");
396 641
397 stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE); 642 stm32_spi_set_bits(spi, spi->cfg->regs->en.reg,
643 spi->cfg->regs->en.mask);
398} 644}
399 645
400/** 646/**
401 * stm32_spi_disable - Disable SPI controller 647 * stm32f4_spi_disable - Disable SPI controller
648 * @spi: pointer to the spi controller data structure
649 */
650static void stm32f4_spi_disable(struct stm32_spi *spi)
651{
652 unsigned long flags;
653 u32 sr;
654
655 dev_dbg(spi->dev, "disable controller\n");
656
657 spin_lock_irqsave(&spi->lock, flags);
658
659 if (!(readl_relaxed(spi->base + STM32F4_SPI_CR1) &
660 STM32F4_SPI_CR1_SPE)) {
661 spin_unlock_irqrestore(&spi->lock, flags);
662 return;
663 }
664
665 /* Disable interrupts */
666 stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXEIE |
667 STM32F4_SPI_CR2_RXNEIE |
668 STM32F4_SPI_CR2_ERRIE);
669
670 /* Wait until BSY = 0 */
671 if (readl_relaxed_poll_timeout_atomic(spi->base + STM32F4_SPI_SR,
672 sr, !(sr & STM32F4_SPI_SR_BSY),
673 10, 100000) < 0) {
674 dev_warn(spi->dev, "disabling condition timeout\n");
675 }
676
677 if (spi->cur_usedma && spi->dma_tx)
678 dmaengine_terminate_all(spi->dma_tx);
679 if (spi->cur_usedma && spi->dma_rx)
680 dmaengine_terminate_all(spi->dma_rx);
681
682 stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE);
683
684 stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN |
685 STM32F4_SPI_CR2_RXDMAEN);
686
687 /* Sequence to clear OVR flag */
688 readl_relaxed(spi->base + STM32F4_SPI_DR);
689 readl_relaxed(spi->base + STM32F4_SPI_SR);
690
691 spin_unlock_irqrestore(&spi->lock, flags);
692}
693
694/**
695 * stm32h7_spi_disable - Disable SPI controller
402 * @spi: pointer to the spi controller data structure 696 * @spi: pointer to the spi controller data structure
403 * 697 *
404 * RX-Fifo is flushed when SPI controller is disabled. To prevent any data 698 * RX-Fifo is flushed when SPI controller is disabled. To prevent any data
405 * loss, use stm32_spi_read_rxfifo(flush) to read the remaining bytes in 699 * loss, use stm32h7_spi_read_rxfifo(flush) to read the remaining bytes in
406 * RX-Fifo. 700 * RX-Fifo.
701 * Normally, if TSIZE has been configured, we should relax the hardware at the
702 * reception of the EOT interrupt. But in case of error, EOT will not be
703 * raised. So the subsystem unprepare_message call allows us to properly
704 * complete the transfer from an hardware point of view.
407 */ 705 */
408static void stm32_spi_disable(struct stm32_spi *spi) 706static void stm32h7_spi_disable(struct stm32_spi *spi)
409{ 707{
410 unsigned long flags; 708 unsigned long flags;
411 u32 cr1, sr; 709 u32 cr1, sr;
@@ -414,23 +712,23 @@ static void stm32_spi_disable(struct stm32_spi *spi)
414 712
415 spin_lock_irqsave(&spi->lock, flags); 713 spin_lock_irqsave(&spi->lock, flags);
416 714
417 cr1 = readl_relaxed(spi->base + STM32_SPI_CR1); 715 cr1 = readl_relaxed(spi->base + STM32H7_SPI_CR1);
418 716
419 if (!(cr1 & SPI_CR1_SPE)) { 717 if (!(cr1 & STM32H7_SPI_CR1_SPE)) {
420 spin_unlock_irqrestore(&spi->lock, flags); 718 spin_unlock_irqrestore(&spi->lock, flags);
421 return; 719 return;
422 } 720 }
423 721
424 /* Wait on EOT or suspend the flow */ 722 /* Wait on EOT or suspend the flow */
425 if (readl_relaxed_poll_timeout_atomic(spi->base + STM32_SPI_SR, 723 if (readl_relaxed_poll_timeout_atomic(spi->base + STM32H7_SPI_SR,
426 sr, !(sr & SPI_SR_EOT), 724 sr, !(sr & STM32H7_SPI_SR_EOT),
427 10, 100000) < 0) { 725 10, 100000) < 0) {
428 if (cr1 & SPI_CR1_CSTART) { 726 if (cr1 & STM32H7_SPI_CR1_CSTART) {
429 writel_relaxed(cr1 | SPI_CR1_CSUSP, 727 writel_relaxed(cr1 | STM32H7_SPI_CR1_CSUSP,
430 spi->base + STM32_SPI_CR1); 728 spi->base + STM32H7_SPI_CR1);
431 if (readl_relaxed_poll_timeout_atomic( 729 if (readl_relaxed_poll_timeout_atomic(
432 spi->base + STM32_SPI_SR, 730 spi->base + STM32H7_SPI_SR,
433 sr, !(sr & SPI_SR_SUSP), 731 sr, !(sr & STM32H7_SPI_SR_SUSP),
434 10, 100000) < 0) 732 10, 100000) < 0)
435 dev_warn(spi->dev, 733 dev_warn(spi->dev,
436 "Suspend request timeout\n"); 734 "Suspend request timeout\n");
@@ -438,21 +736,21 @@ static void stm32_spi_disable(struct stm32_spi *spi)
438 } 736 }
439 737
440 if (!spi->cur_usedma && spi->rx_buf && (spi->rx_len > 0)) 738 if (!spi->cur_usedma && spi->rx_buf && (spi->rx_len > 0))
441 stm32_spi_read_rxfifo(spi, true); 739 stm32h7_spi_read_rxfifo(spi, true);
442 740
443 if (spi->cur_usedma && spi->tx_buf) 741 if (spi->cur_usedma && spi->dma_tx)
444 dmaengine_terminate_all(spi->dma_tx); 742 dmaengine_terminate_all(spi->dma_tx);
445 if (spi->cur_usedma && spi->rx_buf) 743 if (spi->cur_usedma && spi->dma_rx)
446 dmaengine_terminate_all(spi->dma_rx); 744 dmaengine_terminate_all(spi->dma_rx);
447 745
448 stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE); 746 stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
449 747
450 stm32_spi_clr_bits(spi, STM32_SPI_CFG1, SPI_CFG1_TXDMAEN | 748 stm32_spi_clr_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN |
451 SPI_CFG1_RXDMAEN); 749 STM32H7_SPI_CFG1_RXDMAEN);
452 750
453 /* Disable interrupts and clear status flags */ 751 /* Disable interrupts and clear status flags */
454 writel_relaxed(0, spi->base + STM32_SPI_IER); 752 writel_relaxed(0, spi->base + STM32H7_SPI_IER);
455 writel_relaxed(SPI_IFCR_ALL, spi->base + STM32_SPI_IFCR); 753 writel_relaxed(STM32H7_SPI_IFCR_ALL, spi->base + STM32H7_SPI_IFCR);
456 754
457 spin_unlock_irqrestore(&spi->lock, flags); 755 spin_unlock_irqrestore(&spi->lock, flags);
458} 756}
@@ -460,26 +758,136 @@ static void stm32_spi_disable(struct stm32_spi *spi)
460/** 758/**
461 * stm32_spi_can_dma - Determine if the transfer is eligible for DMA use 759 * stm32_spi_can_dma - Determine if the transfer is eligible for DMA use
462 * 760 *
463 * If the current transfer size is greater than fifo size, use DMA. 761 * If driver has fifo and the current transfer size is greater than fifo size,
762 * use DMA. Otherwise use DMA for transfer longer than defined DMA min bytes.
464 */ 763 */
465static bool stm32_spi_can_dma(struct spi_master *master, 764static bool stm32_spi_can_dma(struct spi_master *master,
466 struct spi_device *spi_dev, 765 struct spi_device *spi_dev,
467 struct spi_transfer *transfer) 766 struct spi_transfer *transfer)
468{ 767{
768 unsigned int dma_size;
469 struct stm32_spi *spi = spi_master_get_devdata(master); 769 struct stm32_spi *spi = spi_master_get_devdata(master);
470 770
771 if (spi->cfg->has_fifo)
772 dma_size = spi->fifo_size;
773 else
774 dma_size = SPI_DMA_MIN_BYTES;
775
471 dev_dbg(spi->dev, "%s: %s\n", __func__, 776 dev_dbg(spi->dev, "%s: %s\n", __func__,
472 (transfer->len > spi->fifo_size) ? "true" : "false"); 777 (transfer->len > dma_size) ? "true" : "false");
778
779 return (transfer->len > dma_size);
780}
781
782/**
783 * stm32f4_spi_irq_event - Interrupt handler for SPI controller events
784 * @irq: interrupt line
785 * @dev_id: SPI controller master interface
786 */
787static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
788{
789 struct spi_master *master = dev_id;
790 struct stm32_spi *spi = spi_master_get_devdata(master);
791 u32 sr, mask = 0;
792 unsigned long flags;
793 bool end = false;
794
795 spin_lock_irqsave(&spi->lock, flags);
796
797 sr = readl_relaxed(spi->base + STM32F4_SPI_SR);
798 /*
799 * BSY flag is not handled in interrupt but it is normal behavior when
800 * this flag is set.
801 */
802 sr &= ~STM32F4_SPI_SR_BSY;
803
804 if (!spi->cur_usedma && (spi->cur_comm == SPI_SIMPLEX_TX ||
805 spi->cur_comm == SPI_3WIRE_TX)) {
806 /* OVR flag shouldn't be handled for TX only mode */
807 sr &= ~STM32F4_SPI_SR_OVR | STM32F4_SPI_SR_RXNE;
808 mask |= STM32F4_SPI_SR_TXE;
809 }
810
811 if (!spi->cur_usedma && spi->cur_comm == SPI_FULL_DUPLEX) {
812 /* TXE flag is set and is handled when RXNE flag occurs */
813 sr &= ~STM32F4_SPI_SR_TXE;
814 mask |= STM32F4_SPI_SR_RXNE | STM32F4_SPI_SR_OVR;
815 }
816
817 if (!(sr & mask)) {
818 dev_dbg(spi->dev, "spurious IT (sr=0x%08x)\n", sr);
819 spin_unlock_irqrestore(&spi->lock, flags);
820 return IRQ_NONE;
821 }
822
823 if (sr & STM32F4_SPI_SR_OVR) {
824 dev_warn(spi->dev, "Overrun: received value discarded\n");
825
826 /* Sequence to clear OVR flag */
827 readl_relaxed(spi->base + STM32F4_SPI_DR);
828 readl_relaxed(spi->base + STM32F4_SPI_SR);
829
830 /*
831 * If overrun is detected, it means that something went wrong,
832 * so stop the current transfer. Transfer can wait for next
833 * RXNE but DR is already read and end never happens.
834 */
835 end = true;
836 goto end_irq;
837 }
838
839 if (sr & STM32F4_SPI_SR_TXE) {
840 if (spi->tx_buf)
841 stm32f4_spi_write_tx(spi);
842 if (spi->tx_len == 0)
843 end = true;
844 }
845
846 if (sr & STM32F4_SPI_SR_RXNE) {
847 stm32f4_spi_read_rx(spi);
848 if (spi->rx_len == 0)
849 end = true;
850 else /* Load data for discontinuous mode */
851 stm32f4_spi_write_tx(spi);
852 }
853
854end_irq:
855 if (end) {
856 /* Immediately disable interrupts to do not generate new one */
857 stm32_spi_clr_bits(spi, STM32F4_SPI_CR2,
858 STM32F4_SPI_CR2_TXEIE |
859 STM32F4_SPI_CR2_RXNEIE |
860 STM32F4_SPI_CR2_ERRIE);
861 spin_unlock_irqrestore(&spi->lock, flags);
862 return IRQ_WAKE_THREAD;
863 }
864
865 spin_unlock_irqrestore(&spi->lock, flags);
866 return IRQ_HANDLED;
867}
868
869/**
870 * stm32f4_spi_irq_thread - Thread of interrupt handler for SPI controller
871 * @irq: interrupt line
872 * @dev_id: SPI controller master interface
873 */
874static irqreturn_t stm32f4_spi_irq_thread(int irq, void *dev_id)
875{
876 struct spi_master *master = dev_id;
877 struct stm32_spi *spi = spi_master_get_devdata(master);
878
879 spi_finalize_current_transfer(master);
880 stm32f4_spi_disable(spi);
473 881
474 return (transfer->len > spi->fifo_size); 882 return IRQ_HANDLED;
475} 883}
476 884
477/** 885/**
478 * stm32_spi_irq - Interrupt handler for SPI controller events 886 * stm32h7_spi_irq_thread - Thread of interrupt handler for SPI controller
479 * @irq: interrupt line 887 * @irq: interrupt line
480 * @dev_id: SPI controller master interface 888 * @dev_id: SPI controller master interface
481 */ 889 */
482static irqreturn_t stm32_spi_irq(int irq, void *dev_id) 890static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
483{ 891{
484 struct spi_master *master = dev_id; 892 struct spi_master *master = dev_id;
485 struct stm32_spi *spi = spi_master_get_devdata(master); 893 struct stm32_spi *spi = spi_master_get_devdata(master);
@@ -489,19 +897,19 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id)
489 897
490 spin_lock_irqsave(&spi->lock, flags); 898 spin_lock_irqsave(&spi->lock, flags);
491 899
492 sr = readl_relaxed(spi->base + STM32_SPI_SR); 900 sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
493 ier = readl_relaxed(spi->base + STM32_SPI_IER); 901 ier = readl_relaxed(spi->base + STM32H7_SPI_IER);
494 902
495 mask = ier; 903 mask = ier;
496 /* EOTIE is triggered on EOT, SUSP and TXC events. */ 904 /* EOTIE is triggered on EOT, SUSP and TXC events. */
497 mask |= SPI_SR_SUSP; 905 mask |= STM32H7_SPI_SR_SUSP;
498 /* 906 /*
499 * When TXTF is set, DXPIE and TXPIE are cleared. So in case of 907 * When TXTF is set, DXPIE and TXPIE are cleared. So in case of
500 * Full-Duplex, need to poll RXP event to know if there are remaining 908 * Full-Duplex, need to poll RXP event to know if there are remaining
501 * data, before disabling SPI. 909 * data, before disabling SPI.
502 */ 910 */
503 if (spi->rx_buf && !spi->cur_usedma) 911 if (spi->rx_buf && !spi->cur_usedma)
504 mask |= SPI_SR_RXP; 912 mask |= STM32H7_SPI_SR_RXP;
505 913
506 if (!(sr & mask)) { 914 if (!(sr & mask)) {
507 dev_dbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n", 915 dev_dbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
@@ -510,10 +918,10 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id)
510 return IRQ_NONE; 918 return IRQ_NONE;
511 } 919 }
512 920
513 if (sr & SPI_SR_SUSP) { 921 if (sr & STM32H7_SPI_SR_SUSP) {
514 dev_warn(spi->dev, "Communication suspended\n"); 922 dev_warn(spi->dev, "Communication suspended\n");
515 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) 923 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
516 stm32_spi_read_rxfifo(spi, false); 924 stm32h7_spi_read_rxfifo(spi, false);
517 /* 925 /*
518 * If communication is suspended while using DMA, it means 926 * If communication is suspended while using DMA, it means
519 * that something went wrong, so stop the current transfer 927 * that something went wrong, so stop the current transfer
@@ -522,15 +930,15 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id)
522 end = true; 930 end = true;
523 } 931 }
524 932
525 if (sr & SPI_SR_MODF) { 933 if (sr & STM32H7_SPI_SR_MODF) {
526 dev_warn(spi->dev, "Mode fault: transfer aborted\n"); 934 dev_warn(spi->dev, "Mode fault: transfer aborted\n");
527 end = true; 935 end = true;
528 } 936 }
529 937
530 if (sr & SPI_SR_OVR) { 938 if (sr & STM32H7_SPI_SR_OVR) {
531 dev_warn(spi->dev, "Overrun: received value discarded\n"); 939 dev_warn(spi->dev, "Overrun: received value discarded\n");
532 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) 940 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
533 stm32_spi_read_rxfifo(spi, false); 941 stm32h7_spi_read_rxfifo(spi, false);
534 /* 942 /*
535 * If overrun is detected while using DMA, it means that 943 * If overrun is detected while using DMA, it means that
536 * something went wrong, so stop the current transfer 944 * something went wrong, so stop the current transfer
@@ -539,27 +947,27 @@ static irqreturn_t stm32_spi_irq(int irq, void *dev_id)
539 end = true; 947 end = true;
540 } 948 }
541 949
542 if (sr & SPI_SR_EOT) { 950 if (sr & STM32H7_SPI_SR_EOT) {
543 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) 951 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
544 stm32_spi_read_rxfifo(spi, true); 952 stm32h7_spi_read_rxfifo(spi, true);
545 end = true; 953 end = true;
546 } 954 }
547 955
548 if (sr & SPI_SR_TXP) 956 if (sr & STM32H7_SPI_SR_TXP)
549 if (!spi->cur_usedma && (spi->tx_buf && (spi->tx_len > 0))) 957 if (!spi->cur_usedma && (spi->tx_buf && (spi->tx_len > 0)))
550 stm32_spi_write_txfifo(spi); 958 stm32h7_spi_write_txfifo(spi);
551 959
552 if (sr & SPI_SR_RXP) 960 if (sr & STM32H7_SPI_SR_RXP)
553 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) 961 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
554 stm32_spi_read_rxfifo(spi, false); 962 stm32h7_spi_read_rxfifo(spi, false);
555 963
556 writel_relaxed(mask, spi->base + STM32_SPI_IFCR); 964 writel_relaxed(mask, spi->base + STM32H7_SPI_IFCR);
557 965
558 spin_unlock_irqrestore(&spi->lock, flags); 966 spin_unlock_irqrestore(&spi->lock, flags);
559 967
560 if (end) { 968 if (end) {
561 spi_finalize_current_transfer(master); 969 spi_finalize_current_transfer(master);
562 stm32_spi_disable(spi); 970 stm32h7_spi_disable(spi);
563 } 971 }
564 972
565 return IRQ_HANDLED; 973 return IRQ_HANDLED;
@@ -598,7 +1006,7 @@ static int stm32_spi_prepare_msg(struct spi_master *master,
598 struct spi_device *spi_dev = msg->spi; 1006 struct spi_device *spi_dev = msg->spi;
599 struct device_node *np = spi_dev->dev.of_node; 1007 struct device_node *np = spi_dev->dev.of_node;
600 unsigned long flags; 1008 unsigned long flags;
601 u32 cfg2_clrb = 0, cfg2_setb = 0; 1009 u32 clrb = 0, setb = 0;
602 1010
603 /* SPI slave device may need time between data frames */ 1011 /* SPI slave device may need time between data frames */
604 spi->cur_midi = 0; 1012 spi->cur_midi = 0;
@@ -606,19 +1014,19 @@ static int stm32_spi_prepare_msg(struct spi_master *master,
606 dev_dbg(spi->dev, "%dns inter-data idleness\n", spi->cur_midi); 1014 dev_dbg(spi->dev, "%dns inter-data idleness\n", spi->cur_midi);
607 1015
608 if (spi_dev->mode & SPI_CPOL) 1016 if (spi_dev->mode & SPI_CPOL)
609 cfg2_setb |= SPI_CFG2_CPOL; 1017 setb |= spi->cfg->regs->cpol.mask;
610 else 1018 else
611 cfg2_clrb |= SPI_CFG2_CPOL; 1019 clrb |= spi->cfg->regs->cpol.mask;
612 1020
613 if (spi_dev->mode & SPI_CPHA) 1021 if (spi_dev->mode & SPI_CPHA)
614 cfg2_setb |= SPI_CFG2_CPHA; 1022 setb |= spi->cfg->regs->cpha.mask;
615 else 1023 else
616 cfg2_clrb |= SPI_CFG2_CPHA; 1024 clrb |= spi->cfg->regs->cpha.mask;
617 1025
618 if (spi_dev->mode & SPI_LSB_FIRST) 1026 if (spi_dev->mode & SPI_LSB_FIRST)
619 cfg2_setb |= SPI_CFG2_LSBFRST; 1027 setb |= spi->cfg->regs->lsb_first.mask;
620 else 1028 else
621 cfg2_clrb |= SPI_CFG2_LSBFRST; 1029 clrb |= spi->cfg->regs->lsb_first.mask;
622 1030
623 dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d\n", 1031 dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d\n",
624 spi_dev->mode & SPI_CPOL, 1032 spi_dev->mode & SPI_CPOL,
@@ -628,11 +1036,12 @@ static int stm32_spi_prepare_msg(struct spi_master *master,
628 1036
629 spin_lock_irqsave(&spi->lock, flags); 1037 spin_lock_irqsave(&spi->lock, flags);
630 1038
631 if (cfg2_clrb || cfg2_setb) 1039 /* CPOL, CPHA and LSB FIRST bits have common register */
1040 if (clrb || setb)
632 writel_relaxed( 1041 writel_relaxed(
633 (readl_relaxed(spi->base + STM32_SPI_CFG2) & 1042 (readl_relaxed(spi->base + spi->cfg->regs->cpol.reg) &
634 ~cfg2_clrb) | cfg2_setb, 1043 ~clrb) | setb,
635 spi->base + STM32_SPI_CFG2); 1044 spi->base + spi->cfg->regs->cpol.reg);
636 1045
637 spin_unlock_irqrestore(&spi->lock, flags); 1046 spin_unlock_irqrestore(&spi->lock, flags);
638 1047
@@ -640,12 +1049,40 @@ static int stm32_spi_prepare_msg(struct spi_master *master,
640} 1049}
641 1050
642/** 1051/**
643 * stm32_spi_dma_cb - dma callback 1052 * stm32f4_spi_dma_tx_cb - dma callback
1053 *
1054 * DMA callback is called when the transfer is complete for DMA TX channel.
1055 */
1056static void stm32f4_spi_dma_tx_cb(void *data)
1057{
1058 struct stm32_spi *spi = data;
1059
1060 if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
1061 spi_finalize_current_transfer(spi->master);
1062 stm32f4_spi_disable(spi);
1063 }
1064}
1065
1066/**
1067 * stm32f4_spi_dma_rx_cb - dma callback
1068 *
1069 * DMA callback is called when the transfer is complete for DMA RX channel.
1070 */
1071static void stm32f4_spi_dma_rx_cb(void *data)
1072{
1073 struct stm32_spi *spi = data;
1074
1075 spi_finalize_current_transfer(spi->master);
1076 stm32f4_spi_disable(spi);
1077}
1078
1079/**
1080 * stm32h7_spi_dma_cb - dma callback
644 * 1081 *
645 * DMA callback is called when the transfer is complete or when an error 1082 * DMA callback is called when the transfer is complete or when an error
646 * occurs. If the transfer is complete, EOT flag is raised. 1083 * occurs. If the transfer is complete, EOT flag is raised.
647 */ 1084 */
648static void stm32_spi_dma_cb(void *data) 1085static void stm32h7_spi_dma_cb(void *data)
649{ 1086{
650 struct stm32_spi *spi = data; 1087 struct stm32_spi *spi = data;
651 unsigned long flags; 1088 unsigned long flags;
@@ -653,11 +1090,11 @@ static void stm32_spi_dma_cb(void *data)
653 1090
654 spin_lock_irqsave(&spi->lock, flags); 1091 spin_lock_irqsave(&spi->lock, flags);
655 1092
656 sr = readl_relaxed(spi->base + STM32_SPI_SR); 1093 sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
657 1094
658 spin_unlock_irqrestore(&spi->lock, flags); 1095 spin_unlock_irqrestore(&spi->lock, flags);
659 1096
660 if (!(sr & SPI_SR_EOT)) 1097 if (!(sr & STM32H7_SPI_SR_EOT))
661 dev_warn(spi->dev, "DMA error (sr=0x%08x)\n", sr); 1098 dev_warn(spi->dev, "DMA error (sr=0x%08x)\n", sr);
662 1099
663 /* Now wait for EOT, or SUSP or OVR in case of error */ 1100 /* Now wait for EOT, or SUSP or OVR in case of error */
@@ -681,23 +1118,27 @@ static void stm32_spi_dma_config(struct stm32_spi *spi,
681 else 1118 else
682 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; 1119 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
683 1120
684 /* Valid for DMA Half or Full Fifo threshold */ 1121 if (spi->cfg->has_fifo) {
685 if (spi->cur_fthlv == 2) 1122 /* Valid for DMA Half or Full Fifo threshold */
1123 if (spi->cur_fthlv == 2)
1124 maxburst = 1;
1125 else
1126 maxburst = spi->cur_fthlv;
1127 } else {
686 maxburst = 1; 1128 maxburst = 1;
687 else 1129 }
688 maxburst = spi->cur_fthlv;
689 1130
690 memset(dma_conf, 0, sizeof(struct dma_slave_config)); 1131 memset(dma_conf, 0, sizeof(struct dma_slave_config));
691 dma_conf->direction = dir; 1132 dma_conf->direction = dir;
692 if (dma_conf->direction == DMA_DEV_TO_MEM) { /* RX */ 1133 if (dma_conf->direction == DMA_DEV_TO_MEM) { /* RX */
693 dma_conf->src_addr = spi->phys_addr + STM32_SPI_RXDR; 1134 dma_conf->src_addr = spi->phys_addr + spi->cfg->regs->rx.reg;
694 dma_conf->src_addr_width = buswidth; 1135 dma_conf->src_addr_width = buswidth;
695 dma_conf->src_maxburst = maxburst; 1136 dma_conf->src_maxburst = maxburst;
696 1137
697 dev_dbg(spi->dev, "Rx DMA config buswidth=%d, maxburst=%d\n", 1138 dev_dbg(spi->dev, "Rx DMA config buswidth=%d, maxburst=%d\n",
698 buswidth, maxburst); 1139 buswidth, maxburst);
699 } else if (dma_conf->direction == DMA_MEM_TO_DEV) { /* TX */ 1140 } else if (dma_conf->direction == DMA_MEM_TO_DEV) { /* TX */
700 dma_conf->dst_addr = spi->phys_addr + STM32_SPI_TXDR; 1141 dma_conf->dst_addr = spi->phys_addr + spi->cfg->regs->tx.reg;
701 dma_conf->dst_addr_width = buswidth; 1142 dma_conf->dst_addr_width = buswidth;
702 dma_conf->dst_maxburst = maxburst; 1143 dma_conf->dst_maxburst = maxburst;
703 1144
@@ -707,27 +1148,68 @@ static void stm32_spi_dma_config(struct stm32_spi *spi,
707} 1148}
708 1149
709/** 1150/**
710 * stm32_spi_transfer_one_irq - transfer a single spi_transfer using 1151 * stm32f4_spi_transfer_one_irq - transfer a single spi_transfer using
711 * interrupts 1152 * interrupts
712 * 1153 *
713 * It must returns 0 if the transfer is finished or 1 if the transfer is still 1154 * It must returns 0 if the transfer is finished or 1 if the transfer is still
714 * in progress. 1155 * in progress.
715 */ 1156 */
716static int stm32_spi_transfer_one_irq(struct stm32_spi *spi) 1157static int stm32f4_spi_transfer_one_irq(struct stm32_spi *spi)
1158{
1159 unsigned long flags;
1160 u32 cr2 = 0;
1161
1162 /* Enable the interrupts relative to the current communication mode */
1163 if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
1164 cr2 |= STM32F4_SPI_CR2_TXEIE;
1165 } else if (spi->cur_comm == SPI_FULL_DUPLEX) {
1166 /* In transmit-only mode, the OVR flag is set in the SR register
1167 * since the received data are never read. Therefore set OVR
1168 * interrupt only when rx buffer is available.
1169 */
1170 cr2 |= STM32F4_SPI_CR2_RXNEIE | STM32F4_SPI_CR2_ERRIE;
1171 } else {
1172 return -EINVAL;
1173 }
1174
1175 spin_lock_irqsave(&spi->lock, flags);
1176
1177 stm32_spi_set_bits(spi, STM32F4_SPI_CR2, cr2);
1178
1179 stm32_spi_enable(spi);
1180
1181 /* starting data transfer when buffer is loaded */
1182 if (spi->tx_buf)
1183 stm32f4_spi_write_tx(spi);
1184
1185 spin_unlock_irqrestore(&spi->lock, flags);
1186
1187 return 1;
1188}
1189
1190/**
1191 * stm32h7_spi_transfer_one_irq - transfer a single spi_transfer using
1192 * interrupts
1193 *
1194 * It must returns 0 if the transfer is finished or 1 if the transfer is still
1195 * in progress.
1196 */
1197static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi)
717{ 1198{
718 unsigned long flags; 1199 unsigned long flags;
719 u32 ier = 0; 1200 u32 ier = 0;
720 1201
721 /* Enable the interrupts relative to the current communication mode */ 1202 /* Enable the interrupts relative to the current communication mode */
722 if (spi->tx_buf && spi->rx_buf) /* Full Duplex */ 1203 if (spi->tx_buf && spi->rx_buf) /* Full Duplex */
723 ier |= SPI_IER_DXPIE; 1204 ier |= STM32H7_SPI_IER_DXPIE;
724 else if (spi->tx_buf) /* Half-Duplex TX dir or Simplex TX */ 1205 else if (spi->tx_buf) /* Half-Duplex TX dir or Simplex TX */
725 ier |= SPI_IER_TXPIE; 1206 ier |= STM32H7_SPI_IER_TXPIE;
726 else if (spi->rx_buf) /* Half-Duplex RX dir or Simplex RX */ 1207 else if (spi->rx_buf) /* Half-Duplex RX dir or Simplex RX */
727 ier |= SPI_IER_RXPIE; 1208 ier |= STM32H7_SPI_IER_RXPIE;
728 1209
729 /* Enable the interrupts relative to the end of transfer */ 1210 /* Enable the interrupts relative to the end of transfer */
730 ier |= SPI_IER_EOTIE | SPI_IER_TXTFIE | SPI_IER_OVRIE | SPI_IER_MODFIE; 1211 ier |= STM32H7_SPI_IER_EOTIE | STM32H7_SPI_IER_TXTFIE |
1212 STM32H7_SPI_IER_OVRIE | STM32H7_SPI_IER_MODFIE;
731 1213
732 spin_lock_irqsave(&spi->lock, flags); 1214 spin_lock_irqsave(&spi->lock, flags);
733 1215
@@ -735,11 +1217,11 @@ static int stm32_spi_transfer_one_irq(struct stm32_spi *spi)
735 1217
736 /* Be sure to have data in fifo before starting data transfer */ 1218 /* Be sure to have data in fifo before starting data transfer */
737 if (spi->tx_buf) 1219 if (spi->tx_buf)
738 stm32_spi_write_txfifo(spi); 1220 stm32h7_spi_write_txfifo(spi);
739 1221
740 stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_CSTART); 1222 stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
741 1223
742 writel_relaxed(ier, spi->base + STM32_SPI_IER); 1224 writel_relaxed(ier, spi->base + STM32H7_SPI_IER);
743 1225
744 spin_unlock_irqrestore(&spi->lock, flags); 1226 spin_unlock_irqrestore(&spi->lock, flags);
745 1227
@@ -747,6 +1229,43 @@ static int stm32_spi_transfer_one_irq(struct stm32_spi *spi)
747} 1229}
748 1230
749/** 1231/**
1232 * stm32f4_spi_transfer_one_dma_start - Set SPI driver registers to start
1233 * transfer using DMA
1234 */
1235static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi)
1236{
1237 /* In DMA mode end of transfer is handled by DMA TX or RX callback. */
1238 if (spi->cur_comm == SPI_SIMPLEX_RX || spi->cur_comm == SPI_3WIRE_RX ||
1239 spi->cur_comm == SPI_FULL_DUPLEX) {
1240 /*
1241 * In transmit-only mode, the OVR flag is set in the SR register
1242 * since the received data are never read. Therefore set OVR
1243 * interrupt only when rx buffer is available.
1244 */
1245 stm32_spi_set_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_ERRIE);
1246 }
1247
1248 stm32_spi_enable(spi);
1249}
1250
1251/**
1252 * stm32h7_spi_transfer_one_dma_start - Set SPI driver registers to start
1253 * transfer using DMA
1254 */
1255static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
1256{
1257 /* Enable the interrupts relative to the end of transfer */
1258 stm32_spi_set_bits(spi, STM32H7_SPI_IER, STM32H7_SPI_IER_EOTIE |
1259 STM32H7_SPI_IER_TXTFIE |
1260 STM32H7_SPI_IER_OVRIE |
1261 STM32H7_SPI_IER_MODFIE);
1262
1263 stm32_spi_enable(spi);
1264
1265 stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
1266}
1267
1268/**
750 * stm32_spi_transfer_one_dma - transfer a single spi_transfer using DMA 1269 * stm32_spi_transfer_one_dma - transfer a single spi_transfer using DMA
751 * 1270 *
752 * It must returns 0 if the transfer is finished or 1 if the transfer is still 1271 * It must returns 0 if the transfer is finished or 1 if the transfer is still
@@ -758,17 +1277,17 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
758 struct dma_slave_config tx_dma_conf, rx_dma_conf; 1277 struct dma_slave_config tx_dma_conf, rx_dma_conf;
759 struct dma_async_tx_descriptor *tx_dma_desc, *rx_dma_desc; 1278 struct dma_async_tx_descriptor *tx_dma_desc, *rx_dma_desc;
760 unsigned long flags; 1279 unsigned long flags;
761 u32 ier = 0;
762 1280
763 spin_lock_irqsave(&spi->lock, flags); 1281 spin_lock_irqsave(&spi->lock, flags);
764 1282
765 rx_dma_desc = NULL; 1283 rx_dma_desc = NULL;
766 if (spi->rx_buf) { 1284 if (spi->rx_buf && spi->dma_rx) {
767 stm32_spi_dma_config(spi, &rx_dma_conf, DMA_DEV_TO_MEM); 1285 stm32_spi_dma_config(spi, &rx_dma_conf, DMA_DEV_TO_MEM);
768 dmaengine_slave_config(spi->dma_rx, &rx_dma_conf); 1286 dmaengine_slave_config(spi->dma_rx, &rx_dma_conf);
769 1287
770 /* Enable Rx DMA request */ 1288 /* Enable Rx DMA request */
771 stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_RXDMAEN); 1289 stm32_spi_set_bits(spi, spi->cfg->regs->dma_rx_en.reg,
1290 spi->cfg->regs->dma_rx_en.mask);
772 1291
773 rx_dma_desc = dmaengine_prep_slave_sg( 1292 rx_dma_desc = dmaengine_prep_slave_sg(
774 spi->dma_rx, xfer->rx_sg.sgl, 1293 spi->dma_rx, xfer->rx_sg.sgl,
@@ -778,7 +1297,7 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
778 } 1297 }
779 1298
780 tx_dma_desc = NULL; 1299 tx_dma_desc = NULL;
781 if (spi->tx_buf) { 1300 if (spi->tx_buf && spi->dma_tx) {
782 stm32_spi_dma_config(spi, &tx_dma_conf, DMA_MEM_TO_DEV); 1301 stm32_spi_dma_config(spi, &tx_dma_conf, DMA_MEM_TO_DEV);
783 dmaengine_slave_config(spi->dma_tx, &tx_dma_conf); 1302 dmaengine_slave_config(spi->dma_tx, &tx_dma_conf);
784 1303
@@ -789,12 +1308,15 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
789 DMA_PREP_INTERRUPT); 1308 DMA_PREP_INTERRUPT);
790 } 1309 }
791 1310
792 if ((spi->tx_buf && !tx_dma_desc) || 1311 if ((spi->tx_buf && spi->dma_tx && !tx_dma_desc) ||
793 (spi->rx_buf && !rx_dma_desc)) 1312 (spi->rx_buf && spi->dma_rx && !rx_dma_desc))
1313 goto dma_desc_error;
1314
1315 if (spi->cur_comm == SPI_FULL_DUPLEX && (!tx_dma_desc || !rx_dma_desc))
794 goto dma_desc_error; 1316 goto dma_desc_error;
795 1317
796 if (rx_dma_desc) { 1318 if (rx_dma_desc) {
797 rx_dma_desc->callback = stm32_spi_dma_cb; 1319 rx_dma_desc->callback = spi->cfg->dma_rx_cb;
798 rx_dma_desc->callback_param = spi; 1320 rx_dma_desc->callback_param = spi;
799 1321
800 if (dma_submit_error(dmaengine_submit(rx_dma_desc))) { 1322 if (dma_submit_error(dmaengine_submit(rx_dma_desc))) {
@@ -806,8 +1328,9 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
806 } 1328 }
807 1329
808 if (tx_dma_desc) { 1330 if (tx_dma_desc) {
809 if (spi->cur_comm == SPI_SIMPLEX_TX) { 1331 if (spi->cur_comm == SPI_SIMPLEX_TX ||
810 tx_dma_desc->callback = stm32_spi_dma_cb; 1332 spi->cur_comm == SPI_3WIRE_TX) {
1333 tx_dma_desc->callback = spi->cfg->dma_tx_cb;
811 tx_dma_desc->callback_param = spi; 1334 tx_dma_desc->callback_param = spi;
812 } 1335 }
813 1336
@@ -819,130 +1342,278 @@ static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
819 dma_async_issue_pending(spi->dma_tx); 1342 dma_async_issue_pending(spi->dma_tx);
820 1343
821 /* Enable Tx DMA request */ 1344 /* Enable Tx DMA request */
822 stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_TXDMAEN); 1345 stm32_spi_set_bits(spi, spi->cfg->regs->dma_tx_en.reg,
1346 spi->cfg->regs->dma_tx_en.mask);
823 } 1347 }
824 1348
825 /* Enable the interrupts relative to the end of transfer */ 1349 spi->cfg->transfer_one_dma_start(spi);
826 ier |= SPI_IER_EOTIE | SPI_IER_TXTFIE | SPI_IER_OVRIE | SPI_IER_MODFIE;
827 writel_relaxed(ier, spi->base + STM32_SPI_IER);
828
829 stm32_spi_enable(spi);
830
831 stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_CSTART);
832 1350
833 spin_unlock_irqrestore(&spi->lock, flags); 1351 spin_unlock_irqrestore(&spi->lock, flags);
834 1352
835 return 1; 1353 return 1;
836 1354
837dma_submit_error: 1355dma_submit_error:
838 if (spi->rx_buf) 1356 if (spi->dma_rx)
839 dmaengine_terminate_all(spi->dma_rx); 1357 dmaengine_terminate_all(spi->dma_rx);
840 1358
841dma_desc_error: 1359dma_desc_error:
842 stm32_spi_clr_bits(spi, STM32_SPI_CFG1, SPI_CFG1_RXDMAEN); 1360 stm32_spi_clr_bits(spi, spi->cfg->regs->dma_rx_en.reg,
1361 spi->cfg->regs->dma_rx_en.mask);
843 1362
844 spin_unlock_irqrestore(&spi->lock, flags); 1363 spin_unlock_irqrestore(&spi->lock, flags);
845 1364
846 dev_info(spi->dev, "DMA issue: fall back to irq transfer\n"); 1365 dev_info(spi->dev, "DMA issue: fall back to irq transfer\n");
847 1366
848 return stm32_spi_transfer_one_irq(spi); 1367 spi->cur_usedma = false;
1368 return spi->cfg->transfer_one_irq(spi);
849} 1369}
850 1370
851/** 1371/**
852 * stm32_spi_transfer_one_setup - common setup to transfer a single 1372 * stm32f4_spi_set_bpw - Configure bits per word
853 * spi_transfer either using DMA or 1373 * @spi: pointer to the spi controller data structure
854 * interrupts.
855 */ 1374 */
856static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, 1375static void stm32f4_spi_set_bpw(struct stm32_spi *spi)
857 struct spi_device *spi_dev,
858 struct spi_transfer *transfer)
859{ 1376{
860 unsigned long flags; 1377 if (spi->cur_bpw == 16)
861 u32 cfg1_clrb = 0, cfg1_setb = 0, cfg2_clrb = 0, cfg2_setb = 0; 1378 stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF);
862 u32 mode, nb_words; 1379 else
863 int ret = 0; 1380 stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF);
864 1381}
865 spin_lock_irqsave(&spi->lock, flags);
866 1382
867 if (spi->cur_bpw != transfer->bits_per_word) { 1383/**
868 u32 bpw, fthlv; 1384 * stm32h7_spi_set_bpw - configure bits per word
1385 * @spi: pointer to the spi controller data structure
1386 */
1387static void stm32h7_spi_set_bpw(struct stm32_spi *spi)
1388{
1389 u32 bpw, fthlv;
1390 u32 cfg1_clrb = 0, cfg1_setb = 0;
869 1391
870 spi->cur_bpw = transfer->bits_per_word; 1392 bpw = spi->cur_bpw - 1;
871 bpw = spi->cur_bpw - 1;
872 1393
873 cfg1_clrb |= SPI_CFG1_DSIZE; 1394 cfg1_clrb |= STM32H7_SPI_CFG1_DSIZE;
874 cfg1_setb |= (bpw << SPI_CFG1_DSIZE_SHIFT) & SPI_CFG1_DSIZE; 1395 cfg1_setb |= (bpw << STM32H7_SPI_CFG1_DSIZE_SHIFT) &
1396 STM32H7_SPI_CFG1_DSIZE;
875 1397
876 spi->cur_fthlv = stm32_spi_prepare_fthlv(spi); 1398 spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi);
877 fthlv = spi->cur_fthlv - 1; 1399 fthlv = spi->cur_fthlv - 1;
878 1400
879 cfg1_clrb |= SPI_CFG1_FTHLV; 1401 cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV;
880 cfg1_setb |= (fthlv << SPI_CFG1_FTHLV_SHIFT) & SPI_CFG1_FTHLV; 1402 cfg1_setb |= (fthlv << STM32H7_SPI_CFG1_FTHLV_SHIFT) &
881 } 1403 STM32H7_SPI_CFG1_FTHLV;
882 1404
883 if (spi->cur_speed != transfer->speed_hz) { 1405 writel_relaxed(
884 int mbr; 1406 (readl_relaxed(spi->base + STM32H7_SPI_CFG1) &
1407 ~cfg1_clrb) | cfg1_setb,
1408 spi->base + STM32H7_SPI_CFG1);
1409}
885 1410
886 /* Update spi->cur_speed with real clock speed */ 1411/**
887 mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz); 1412 * stm32_spi_set_mbr - Configure baud rate divisor in master mode
888 if (mbr < 0) { 1413 * @spi: pointer to the spi controller data structure
889 ret = mbr; 1414 * @mbrdiv: baud rate divisor value
890 goto out; 1415 */
891 } 1416static void stm32_spi_set_mbr(struct stm32_spi *spi, u32 mbrdiv)
1417{
1418 u32 clrb = 0, setb = 0;
892 1419
893 transfer->speed_hz = spi->cur_speed; 1420 clrb |= spi->cfg->regs->br.mask;
1421 setb |= ((u32)mbrdiv << spi->cfg->regs->br.shift) &
1422 spi->cfg->regs->br.mask;
894 1423
895 cfg1_clrb |= SPI_CFG1_MBR; 1424 writel_relaxed((readl_relaxed(spi->base + spi->cfg->regs->br.reg) &
896 cfg1_setb |= ((u32)mbr << SPI_CFG1_MBR_SHIFT) & SPI_CFG1_MBR; 1425 ~clrb) | setb,
897 } 1426 spi->base + spi->cfg->regs->br.reg);
1427}
898 1428
899 if (cfg1_clrb || cfg1_setb) 1429/**
900 writel_relaxed((readl_relaxed(spi->base + STM32_SPI_CFG1) & 1430 * stm32_spi_communication_type - return transfer communication type
901 ~cfg1_clrb) | cfg1_setb, 1431 * @spi_dev: pointer to the spi device
902 spi->base + STM32_SPI_CFG1); 1432 * transfer: pointer to spi transfer
1433 */
1434static unsigned int stm32_spi_communication_type(struct spi_device *spi_dev,
1435 struct spi_transfer *transfer)
1436{
1437 unsigned int type = SPI_FULL_DUPLEX;
903 1438
904 mode = SPI_FULL_DUPLEX;
905 if (spi_dev->mode & SPI_3WIRE) { /* MISO/MOSI signals shared */ 1439 if (spi_dev->mode & SPI_3WIRE) { /* MISO/MOSI signals shared */
906 /* 1440 /*
907 * SPI_3WIRE and xfer->tx_buf != NULL and xfer->rx_buf != NULL 1441 * SPI_3WIRE and xfer->tx_buf != NULL and xfer->rx_buf != NULL
908 * is forbidden und unvalidated by SPI subsystem so depending 1442 * is forbidden and unvalidated by SPI subsystem so depending
909 * on the valid buffer, we can determine the direction of the 1443 * on the valid buffer, we can determine the direction of the
910 * transfer. 1444 * transfer.
911 */ 1445 */
912 mode = SPI_HALF_DUPLEX;
913 if (!transfer->tx_buf) 1446 if (!transfer->tx_buf)
914 stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_HDDIR); 1447 type = SPI_3WIRE_RX;
915 else if (!transfer->rx_buf) 1448 else
916 stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_HDDIR); 1449 type = SPI_3WIRE_TX;
917 } else { 1450 } else {
918 if (!transfer->tx_buf) 1451 if (!transfer->tx_buf)
919 mode = SPI_SIMPLEX_RX; 1452 type = SPI_SIMPLEX_RX;
920 else if (!transfer->rx_buf) 1453 else if (!transfer->rx_buf)
921 mode = SPI_SIMPLEX_TX; 1454 type = SPI_SIMPLEX_TX;
1455 }
1456
1457 return type;
1458}
1459
1460/**
1461 * stm32f4_spi_set_mode - configure communication mode
1462 * @spi: pointer to the spi controller data structure
1463 * @comm_type: type of communication to configure
1464 */
1465static int stm32f4_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
1466{
1467 if (comm_type == SPI_3WIRE_TX || comm_type == SPI_SIMPLEX_TX) {
1468 stm32_spi_set_bits(spi, STM32F4_SPI_CR1,
1469 STM32F4_SPI_CR1_BIDIMODE |
1470 STM32F4_SPI_CR1_BIDIOE);
1471 } else if (comm_type == SPI_FULL_DUPLEX) {
1472 stm32_spi_clr_bits(spi, STM32F4_SPI_CR1,
1473 STM32F4_SPI_CR1_BIDIMODE |
1474 STM32F4_SPI_CR1_BIDIOE);
1475 } else {
1476 return -EINVAL;
922 } 1477 }
923 if (spi->cur_comm != mode) {
924 spi->cur_comm = mode;
925 1478
926 cfg2_clrb |= SPI_CFG2_COMM; 1479 return 0;
927 cfg2_setb |= (mode << SPI_CFG2_COMM_SHIFT) & SPI_CFG2_COMM; 1480}
1481
1482/**
1483 * stm32h7_spi_set_mode - configure communication mode
1484 * @spi: pointer to the spi controller data structure
1485 * @comm_type: type of communication to configure
1486 */
1487static int stm32h7_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
1488{
1489 u32 mode;
1490 u32 cfg2_clrb = 0, cfg2_setb = 0;
1491
1492 if (comm_type == SPI_3WIRE_RX) {
1493 mode = STM32H7_SPI_HALF_DUPLEX;
1494 stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_HDDIR);
1495 } else if (comm_type == SPI_3WIRE_TX) {
1496 mode = STM32H7_SPI_HALF_DUPLEX;
1497 stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_HDDIR);
1498 } else if (comm_type == SPI_SIMPLEX_RX) {
1499 mode = STM32H7_SPI_SIMPLEX_RX;
1500 } else if (comm_type == SPI_SIMPLEX_TX) {
1501 mode = STM32H7_SPI_SIMPLEX_TX;
1502 } else {
1503 mode = STM32H7_SPI_FULL_DUPLEX;
928 } 1504 }
929 1505
930 cfg2_clrb |= SPI_CFG2_MIDI; 1506 cfg2_clrb |= STM32H7_SPI_CFG2_COMM;
931 if ((transfer->len > 1) && (spi->cur_midi > 0)) { 1507 cfg2_setb |= (mode << STM32H7_SPI_CFG2_COMM_SHIFT) &
1508 STM32H7_SPI_CFG2_COMM;
1509
1510 writel_relaxed(
1511 (readl_relaxed(spi->base + STM32H7_SPI_CFG2) &
1512 ~cfg2_clrb) | cfg2_setb,
1513 spi->base + STM32H7_SPI_CFG2);
1514
1515 return 0;
1516}
1517
1518/**
1519 * stm32h7_spi_data_idleness - configure minimum time delay inserted between two
1520 * consecutive data frames in master mode
1521 * @spi: pointer to the spi controller data structure
1522 * @len: transfer len
1523 */
1524static void stm32h7_spi_data_idleness(struct stm32_spi *spi, u32 len)
1525{
1526 u32 cfg2_clrb = 0, cfg2_setb = 0;
1527
1528 cfg2_clrb |= STM32H7_SPI_CFG2_MIDI;
1529 if ((len > 1) && (spi->cur_midi > 0)) {
932 u32 sck_period_ns = DIV_ROUND_UP(SPI_1HZ_NS, spi->cur_speed); 1530 u32 sck_period_ns = DIV_ROUND_UP(SPI_1HZ_NS, spi->cur_speed);
933 u32 midi = min((u32)DIV_ROUND_UP(spi->cur_midi, sck_period_ns), 1531 u32 midi = min((u32)DIV_ROUND_UP(spi->cur_midi, sck_period_ns),
934 (u32)SPI_CFG2_MIDI >> SPI_CFG2_MIDI_SHIFT); 1532 (u32)STM32H7_SPI_CFG2_MIDI >>
1533 STM32H7_SPI_CFG2_MIDI_SHIFT);
935 1534
936 dev_dbg(spi->dev, "period=%dns, midi=%d(=%dns)\n", 1535 dev_dbg(spi->dev, "period=%dns, midi=%d(=%dns)\n",
937 sck_period_ns, midi, midi * sck_period_ns); 1536 sck_period_ns, midi, midi * sck_period_ns);
1537 cfg2_setb |= (midi << STM32H7_SPI_CFG2_MIDI_SHIFT) &
1538 STM32H7_SPI_CFG2_MIDI;
1539 }
1540
1541 writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CFG2) &
1542 ~cfg2_clrb) | cfg2_setb,
1543 spi->base + STM32H7_SPI_CFG2);
1544}
1545
1546/**
1547 * stm32h7_spi_number_of_data - configure number of data at current transfer
1548 * @spi: pointer to the spi controller data structure
1549 * @len: transfer length
1550 */
1551static int stm32h7_spi_number_of_data(struct stm32_spi *spi, u32 nb_words)
1552{
1553 u32 cr2_clrb = 0, cr2_setb = 0;
1554
1555 if (nb_words <= (STM32H7_SPI_CR2_TSIZE >>
1556 STM32H7_SPI_CR2_TSIZE_SHIFT)) {
1557 cr2_clrb |= STM32H7_SPI_CR2_TSIZE;
1558 cr2_setb = nb_words << STM32H7_SPI_CR2_TSIZE_SHIFT;
1559 writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CR2) &
1560 ~cr2_clrb) | cr2_setb,
1561 spi->base + STM32H7_SPI_CR2);
1562 } else {
1563 return -EMSGSIZE;
1564 }
1565
1566 return 0;
1567}
1568
1569/**
1570 * stm32_spi_transfer_one_setup - common setup to transfer a single
1571 * spi_transfer either using DMA or
1572 * interrupts.
1573 */
1574static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
1575 struct spi_device *spi_dev,
1576 struct spi_transfer *transfer)
1577{
1578 unsigned long flags;
1579 unsigned int comm_type;
1580 int nb_words, ret = 0;
1581
1582 spin_lock_irqsave(&spi->lock, flags);
1583
1584 if (spi->cur_bpw != transfer->bits_per_word) {
1585 spi->cur_bpw = transfer->bits_per_word;
1586 spi->cfg->set_bpw(spi);
1587 }
938 1588
939 cfg2_setb |= (midi << SPI_CFG2_MIDI_SHIFT) & SPI_CFG2_MIDI; 1589 if (spi->cur_speed != transfer->speed_hz) {
1590 int mbr;
1591
1592 /* Update spi->cur_speed with real clock speed */
1593 mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
1594 spi->cfg->baud_rate_div_min,
1595 spi->cfg->baud_rate_div_max);
1596 if (mbr < 0) {
1597 ret = mbr;
1598 goto out;
1599 }
1600
1601 transfer->speed_hz = spi->cur_speed;
1602 stm32_spi_set_mbr(spi, mbr);
940 } 1603 }
941 1604
942 if (cfg2_clrb || cfg2_setb) 1605 comm_type = stm32_spi_communication_type(spi_dev, transfer);
943 writel_relaxed((readl_relaxed(spi->base + STM32_SPI_CFG2) & 1606 if (spi->cur_comm != comm_type) {
944 ~cfg2_clrb) | cfg2_setb, 1607 ret = spi->cfg->set_mode(spi, comm_type);
945 spi->base + STM32_SPI_CFG2); 1608
1609 if (ret < 0)
1610 goto out;
1611
1612 spi->cur_comm = comm_type;
1613 }
1614
1615 if (spi->cfg->set_data_idleness)
1616 spi->cfg->set_data_idleness(spi, transfer->len);
946 1617
947 if (spi->cur_bpw <= 8) 1618 if (spi->cur_bpw <= 8)
948 nb_words = transfer->len; 1619 nb_words = transfer->len;
@@ -950,13 +1621,11 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
950 nb_words = DIV_ROUND_UP(transfer->len * 8, 16); 1621 nb_words = DIV_ROUND_UP(transfer->len * 8, 16);
951 else 1622 else
952 nb_words = DIV_ROUND_UP(transfer->len * 8, 32); 1623 nb_words = DIV_ROUND_UP(transfer->len * 8, 32);
953 nb_words <<= SPI_CR2_TSIZE_SHIFT;
954 1624
955 if (nb_words <= SPI_CR2_TSIZE) { 1625 if (spi->cfg->set_number_of_data) {
956 writel_relaxed(nb_words, spi->base + STM32_SPI_CR2); 1626 ret = spi->cfg->set_number_of_data(spi, nb_words);
957 } else { 1627 if (ret < 0)
958 ret = -EMSGSIZE; 1628 goto out;
959 goto out;
960 } 1629 }
961 1630
962 spi->cur_xferlen = transfer->len; 1631 spi->cur_xferlen = transfer->len;
@@ -997,7 +1666,7 @@ static int stm32_spi_transfer_one(struct spi_master *master,
997 spi->rx_len = spi->rx_buf ? transfer->len : 0; 1666 spi->rx_len = spi->rx_buf ? transfer->len : 0;
998 1667
999 spi->cur_usedma = (master->can_dma && 1668 spi->cur_usedma = (master->can_dma &&
1000 stm32_spi_can_dma(master, spi_dev, transfer)); 1669 master->can_dma(master, spi_dev, transfer));
1001 1670
1002 ret = stm32_spi_transfer_one_setup(spi, spi_dev, transfer); 1671 ret = stm32_spi_transfer_one_setup(spi, spi_dev, transfer);
1003 if (ret) { 1672 if (ret) {
@@ -1008,47 +1677,73 @@ static int stm32_spi_transfer_one(struct spi_master *master,
1008 if (spi->cur_usedma) 1677 if (spi->cur_usedma)
1009 return stm32_spi_transfer_one_dma(spi, transfer); 1678 return stm32_spi_transfer_one_dma(spi, transfer);
1010 else 1679 else
1011 return stm32_spi_transfer_one_irq(spi); 1680 return spi->cfg->transfer_one_irq(spi);
1012} 1681}
1013 1682
1014/** 1683/**
1015 * stm32_spi_unprepare_msg - relax the hardware 1684 * stm32_spi_unprepare_msg - relax the hardware
1016 *
1017 * Normally, if TSIZE has been configured, we should relax the hardware at the
1018 * reception of the EOT interrupt. But in case of error, EOT will not be
1019 * raised. So the subsystem unprepare_message call allows us to properly
1020 * complete the transfer from an hardware point of view.
1021 */ 1685 */
1022static int stm32_spi_unprepare_msg(struct spi_master *master, 1686static int stm32_spi_unprepare_msg(struct spi_master *master,
1023 struct spi_message *msg) 1687 struct spi_message *msg)
1024{ 1688{
1025 struct stm32_spi *spi = spi_master_get_devdata(master); 1689 struct stm32_spi *spi = spi_master_get_devdata(master);
1026 1690
1027 stm32_spi_disable(spi); 1691 spi->cfg->disable(spi);
1692
1693 return 0;
1694}
1695
1696/**
1697 * stm32f4_spi_config - Configure SPI controller as SPI master
1698 */
1699static int stm32f4_spi_config(struct stm32_spi *spi)
1700{
1701 unsigned long flags;
1702
1703 spin_lock_irqsave(&spi->lock, flags);
1704
1705 /* Ensure I2SMOD bit is kept cleared */
1706 stm32_spi_clr_bits(spi, STM32F4_SPI_I2SCFGR,
1707 STM32F4_SPI_I2SCFGR_I2SMOD);
1708
1709 /*
1710 * - SS input value high
1711 * - transmitter half duplex direction
1712 * - Set the master mode (default Motorola mode)
1713 * - Consider 1 master/n slaves configuration and
1714 * SS input value is determined by the SSI bit
1715 */
1716 stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SSI |
1717 STM32F4_SPI_CR1_BIDIOE |
1718 STM32F4_SPI_CR1_MSTR |
1719 STM32F4_SPI_CR1_SSM);
1720
1721 spin_unlock_irqrestore(&spi->lock, flags);
1028 1722
1029 return 0; 1723 return 0;
1030} 1724}
1031 1725
1032/** 1726/**
1033 * stm32_spi_config - Configure SPI controller as SPI master 1727 * stm32h7_spi_config - Configure SPI controller as SPI master
1034 */ 1728 */
1035static int stm32_spi_config(struct stm32_spi *spi) 1729static int stm32h7_spi_config(struct stm32_spi *spi)
1036{ 1730{
1037 unsigned long flags; 1731 unsigned long flags;
1038 1732
1039 spin_lock_irqsave(&spi->lock, flags); 1733 spin_lock_irqsave(&spi->lock, flags);
1040 1734
1041 /* Ensure I2SMOD bit is kept cleared */ 1735 /* Ensure I2SMOD bit is kept cleared */
1042 stm32_spi_clr_bits(spi, STM32_SPI_I2SCFGR, SPI_I2SCFGR_I2SMOD); 1736 stm32_spi_clr_bits(spi, STM32H7_SPI_I2SCFGR,
1737 STM32H7_SPI_I2SCFGR_I2SMOD);
1043 1738
1044 /* 1739 /*
1045 * - SS input value high 1740 * - SS input value high
1046 * - transmitter half duplex direction 1741 * - transmitter half duplex direction
1047 * - automatic communication suspend when RX-Fifo is full 1742 * - automatic communication suspend when RX-Fifo is full
1048 */ 1743 */
1049 stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SSI | 1744 stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SSI |
1050 SPI_CR1_HDDIR | 1745 STM32H7_SPI_CR1_HDDIR |
1051 SPI_CR1_MASRX); 1746 STM32H7_SPI_CR1_MASRX);
1052 1747
1053 /* 1748 /*
1054 * - Set the master mode (default Motorola mode) 1749 * - Set the master mode (default Motorola mode)
@@ -1056,17 +1751,56 @@ static int stm32_spi_config(struct stm32_spi *spi)
1056 * SS input value is determined by the SSI bit 1751 * SS input value is determined by the SSI bit
1057 * - keep control of all associated GPIOs 1752 * - keep control of all associated GPIOs
1058 */ 1753 */
1059 stm32_spi_set_bits(spi, STM32_SPI_CFG2, SPI_CFG2_MASTER | 1754 stm32_spi_set_bits(spi, STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_MASTER |
1060 SPI_CFG2_SSM | 1755 STM32H7_SPI_CFG2_SSM |
1061 SPI_CFG2_AFCNTR); 1756 STM32H7_SPI_CFG2_AFCNTR);
1062 1757
1063 spin_unlock_irqrestore(&spi->lock, flags); 1758 spin_unlock_irqrestore(&spi->lock, flags);
1064 1759
1065 return 0; 1760 return 0;
1066} 1761}
1067 1762
1763static const struct stm32_spi_cfg stm32f4_spi_cfg = {
1764 .regs = &stm32f4_spi_regspec,
1765 .get_bpw_mask = stm32f4_spi_get_bpw_mask,
1766 .disable = stm32f4_spi_disable,
1767 .config = stm32f4_spi_config,
1768 .set_bpw = stm32f4_spi_set_bpw,
1769 .set_mode = stm32f4_spi_set_mode,
1770 .transfer_one_dma_start = stm32f4_spi_transfer_one_dma_start,
1771 .dma_tx_cb = stm32f4_spi_dma_tx_cb,
1772 .dma_rx_cb = stm32f4_spi_dma_rx_cb,
1773 .transfer_one_irq = stm32f4_spi_transfer_one_irq,
1774 .irq_handler_event = stm32f4_spi_irq_event,
1775 .irq_handler_thread = stm32f4_spi_irq_thread,
1776 .baud_rate_div_min = STM32F4_SPI_BR_DIV_MIN,
1777 .baud_rate_div_max = STM32F4_SPI_BR_DIV_MAX,
1778 .has_fifo = false,
1779};
1780
1781static const struct stm32_spi_cfg stm32h7_spi_cfg = {
1782 .regs = &stm32h7_spi_regspec,
1783 .get_fifo_size = stm32h7_spi_get_fifo_size,
1784 .get_bpw_mask = stm32h7_spi_get_bpw_mask,
1785 .disable = stm32h7_spi_disable,
1786 .config = stm32h7_spi_config,
1787 .set_bpw = stm32h7_spi_set_bpw,
1788 .set_mode = stm32h7_spi_set_mode,
1789 .set_data_idleness = stm32h7_spi_data_idleness,
1790 .set_number_of_data = stm32h7_spi_number_of_data,
1791 .transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start,
1792 .dma_rx_cb = stm32h7_spi_dma_cb,
1793 .dma_tx_cb = stm32h7_spi_dma_cb,
1794 .transfer_one_irq = stm32h7_spi_transfer_one_irq,
1795 .irq_handler_thread = stm32h7_spi_irq_thread,
1796 .baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN,
1797 .baud_rate_div_max = STM32H7_SPI_MBR_DIV_MAX,
1798 .has_fifo = true,
1799};
1800
1068static const struct of_device_id stm32_spi_of_match[] = { 1801static const struct of_device_id stm32_spi_of_match[] = {
1069 { .compatible = "st,stm32h7-spi", }, 1802 { .compatible = "st,stm32h7-spi", .data = (void *)&stm32h7_spi_cfg },
1803 { .compatible = "st,stm32f4-spi", .data = (void *)&stm32f4_spi_cfg },
1070 {}, 1804 {},
1071}; 1805};
1072MODULE_DEVICE_TABLE(of, stm32_spi_of_match); 1806MODULE_DEVICE_TABLE(of, stm32_spi_of_match);
@@ -1090,12 +1824,17 @@ static int stm32_spi_probe(struct platform_device *pdev)
1090 spi->master = master; 1824 spi->master = master;
1091 spin_lock_init(&spi->lock); 1825 spin_lock_init(&spi->lock);
1092 1826
1827 spi->cfg = (const struct stm32_spi_cfg *)
1828 of_match_device(pdev->dev.driver->of_match_table,
1829 &pdev->dev)->data;
1830
1093 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1831 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1094 spi->base = devm_ioremap_resource(&pdev->dev, res); 1832 spi->base = devm_ioremap_resource(&pdev->dev, res);
1095 if (IS_ERR(spi->base)) { 1833 if (IS_ERR(spi->base)) {
1096 ret = PTR_ERR(spi->base); 1834 ret = PTR_ERR(spi->base);
1097 goto err_master_put; 1835 goto err_master_put;
1098 } 1836 }
1837
1099 spi->phys_addr = (dma_addr_t)res->start; 1838 spi->phys_addr = (dma_addr_t)res->start;
1100 1839
1101 spi->irq = platform_get_irq(pdev, 0); 1840 spi->irq = platform_get_irq(pdev, 0);
@@ -1104,16 +1843,17 @@ static int stm32_spi_probe(struct platform_device *pdev)
1104 ret = -ENOENT; 1843 ret = -ENOENT;
1105 goto err_master_put; 1844 goto err_master_put;
1106 } 1845 }
1107 ret = devm_request_threaded_irq(&pdev->dev, spi->irq, NULL, 1846 ret = devm_request_threaded_irq(&pdev->dev, spi->irq,
1108 stm32_spi_irq, IRQF_ONESHOT, 1847 spi->cfg->irq_handler_event,
1109 pdev->name, master); 1848 spi->cfg->irq_handler_thread,
1849 IRQF_ONESHOT, pdev->name, master);
1110 if (ret) { 1850 if (ret) {
1111 dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq, 1851 dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq,
1112 ret); 1852 ret);
1113 goto err_master_put; 1853 goto err_master_put;
1114 } 1854 }
1115 1855
1116 spi->clk = devm_clk_get(&pdev->dev, 0); 1856 spi->clk = devm_clk_get(&pdev->dev, NULL);
1117 if (IS_ERR(spi->clk)) { 1857 if (IS_ERR(spi->clk)) {
1118 ret = PTR_ERR(spi->clk); 1858 ret = PTR_ERR(spi->clk);
1119 dev_err(&pdev->dev, "clk get failed: %d\n", ret); 1859 dev_err(&pdev->dev, "clk get failed: %d\n", ret);
@@ -1139,9 +1879,10 @@ static int stm32_spi_probe(struct platform_device *pdev)
1139 reset_control_deassert(spi->rst); 1879 reset_control_deassert(spi->rst);
1140 } 1880 }
1141 1881
1142 spi->fifo_size = stm32_spi_get_fifo_size(spi); 1882 if (spi->cfg->has_fifo)
1883 spi->fifo_size = spi->cfg->get_fifo_size(spi);
1143 1884
1144 ret = stm32_spi_config(spi); 1885 ret = spi->cfg->config(spi);
1145 if (ret) { 1886 if (ret) {
1146 dev_err(&pdev->dev, "controller configuration failed: %d\n", 1887 dev_err(&pdev->dev, "controller configuration failed: %d\n",
1147 ret); 1888 ret);
@@ -1151,11 +1892,11 @@ static int stm32_spi_probe(struct platform_device *pdev)
1151 master->dev.of_node = pdev->dev.of_node; 1892 master->dev.of_node = pdev->dev.of_node;
1152 master->auto_runtime_pm = true; 1893 master->auto_runtime_pm = true;
1153 master->bus_num = pdev->id; 1894 master->bus_num = pdev->id;
1154 master->mode_bits = SPI_MODE_3 | SPI_CS_HIGH | SPI_LSB_FIRST | 1895 master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST |
1155 SPI_3WIRE | SPI_LOOP; 1896 SPI_3WIRE;
1156 master->bits_per_word_mask = stm32_spi_get_bpw_mask(spi); 1897 master->bits_per_word_mask = spi->cfg->get_bpw_mask(spi);
1157 master->max_speed_hz = spi->clk_rate / SPI_MBR_DIV_MIN; 1898 master->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min;
1158 master->min_speed_hz = spi->clk_rate / SPI_MBR_DIV_MAX; 1899 master->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max;
1159 master->setup = stm32_spi_setup; 1900 master->setup = stm32_spi_setup;
1160 master->prepare_message = stm32_spi_prepare_msg; 1901 master->prepare_message = stm32_spi_prepare_msg;
1161 master->transfer_one = stm32_spi_transfer_one; 1902 master->transfer_one = stm32_spi_transfer_one;
@@ -1233,7 +1974,7 @@ static int stm32_spi_remove(struct platform_device *pdev)
1233 struct spi_master *master = platform_get_drvdata(pdev); 1974 struct spi_master *master = platform_get_drvdata(pdev);
1234 struct stm32_spi *spi = spi_master_get_devdata(master); 1975 struct stm32_spi *spi = spi_master_get_devdata(master);
1235 1976
1236 stm32_spi_disable(spi); 1977 spi->cfg->disable(spi);
1237 1978
1238 if (master->dma_tx) 1979 if (master->dma_tx)
1239 dma_release_channel(master->dma_tx); 1980 dma_release_channel(master->dma_tx);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 9a7def7c3237..93986f879b09 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -19,6 +19,7 @@
19#include <linux/spi/spi.h> 19#include <linux/spi/spi.h>
20#include <linux/spi/spi-mem.h> 20#include <linux/spi/spi-mem.h>
21#include <linux/of_gpio.h> 21#include <linux/of_gpio.h>
22#include <linux/gpio/consumer.h>
22#include <linux/pm_runtime.h> 23#include <linux/pm_runtime.h>
23#include <linux/pm_domain.h> 24#include <linux/pm_domain.h>
24#include <linux/property.h> 25#include <linux/property.h>
@@ -578,7 +579,10 @@ int spi_add_device(struct spi_device *spi)
578 goto done; 579 goto done;
579 } 580 }
580 581
581 if (ctlr->cs_gpios) 582 /* Descriptors take precedence */
583 if (ctlr->cs_gpiods)
584 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
585 else if (ctlr->cs_gpios)
582 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select]; 586 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
583 587
584 /* Drivers may modify this initial i/o setup, but will 588 /* Drivers may modify this initial i/o setup, but will
@@ -772,10 +776,21 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
772 if (spi->mode & SPI_CS_HIGH) 776 if (spi->mode & SPI_CS_HIGH)
773 enable = !enable; 777 enable = !enable;
774 778
775 if (gpio_is_valid(spi->cs_gpio)) { 779 if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
776 /* Honour the SPI_NO_CS flag */ 780 /*
777 if (!(spi->mode & SPI_NO_CS)) 781 * Honour the SPI_NO_CS flag and invert the enable line, as
778 gpio_set_value(spi->cs_gpio, !enable); 782 * active low is default for SPI. Execution paths that handle
783 * polarity inversion in gpiolib (such as device tree) will
784 * enforce active high using the SPI_CS_HIGH resulting in a
785 * double inversion through the code above.
786 */
787 if (!(spi->mode & SPI_NO_CS)) {
788 if (spi->cs_gpiod)
789 gpiod_set_value_cansleep(spi->cs_gpiod,
790 !enable);
791 else
792 gpio_set_value_cansleep(spi->cs_gpio, !enable);
793 }
779 /* Some SPI masters need both GPIO CS & slave_select */ 794 /* Some SPI masters need both GPIO CS & slave_select */
780 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) && 795 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
781 spi->controller->set_cs) 796 spi->controller->set_cs)
@@ -1615,13 +1630,21 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1615 spi->mode |= SPI_CPHA; 1630 spi->mode |= SPI_CPHA;
1616 if (of_property_read_bool(nc, "spi-cpol")) 1631 if (of_property_read_bool(nc, "spi-cpol"))
1617 spi->mode |= SPI_CPOL; 1632 spi->mode |= SPI_CPOL;
1618 if (of_property_read_bool(nc, "spi-cs-high"))
1619 spi->mode |= SPI_CS_HIGH;
1620 if (of_property_read_bool(nc, "spi-3wire")) 1633 if (of_property_read_bool(nc, "spi-3wire"))
1621 spi->mode |= SPI_3WIRE; 1634 spi->mode |= SPI_3WIRE;
1622 if (of_property_read_bool(nc, "spi-lsb-first")) 1635 if (of_property_read_bool(nc, "spi-lsb-first"))
1623 spi->mode |= SPI_LSB_FIRST; 1636 spi->mode |= SPI_LSB_FIRST;
1624 1637
1638 /*
1639 * For descriptors associated with the device, polarity inversion is
1640 * handled in the gpiolib, so all chip selects are "active high" in
1641 * the logical sense, the gpiolib will invert the line if need be.
1642 */
1643 if (ctlr->use_gpio_descriptors)
1644 spi->mode |= SPI_CS_HIGH;
1645 else if (of_property_read_bool(nc, "spi-cs-high"))
1646 spi->mode |= SPI_CS_HIGH;
1647
1625 /* Device DUAL/QUAD mode */ 1648 /* Device DUAL/QUAD mode */
1626 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 1649 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1627 switch (value) { 1650 switch (value) {
@@ -2137,6 +2160,60 @@ static int of_spi_register_master(struct spi_controller *ctlr)
2137} 2160}
2138#endif 2161#endif
2139 2162
2163/**
2164 * spi_get_gpio_descs() - grab chip select GPIOs for the master
2165 * @ctlr: The SPI master to grab GPIO descriptors for
2166 */
2167static int spi_get_gpio_descs(struct spi_controller *ctlr)
2168{
2169 int nb, i;
2170 struct gpio_desc **cs;
2171 struct device *dev = &ctlr->dev;
2172
2173 nb = gpiod_count(dev, "cs");
2174 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2175
2176 /* No GPIOs at all is fine, else return the error */
2177 if (nb == 0 || nb == -ENOENT)
2178 return 0;
2179 else if (nb < 0)
2180 return nb;
2181
2182 cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2183 GFP_KERNEL);
2184 if (!cs)
2185 return -ENOMEM;
2186 ctlr->cs_gpiods = cs;
2187
2188 for (i = 0; i < nb; i++) {
2189 /*
2190 * Most chipselects are active low, the inverted
2191 * semantics are handled by special quirks in gpiolib,
2192 * so initializing them GPIOD_OUT_LOW here means
2193 * "unasserted", in most cases this will drive the physical
2194 * line high.
2195 */
2196 cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
2197 GPIOD_OUT_LOW);
2198
2199 if (cs[i]) {
2200 /*
2201 * If we find a CS GPIO, name it after the device and
2202 * chip select line.
2203 */
2204 char *gpioname;
2205
2206 gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
2207 dev_name(dev), i);
2208 if (!gpioname)
2209 return -ENOMEM;
2210 gpiod_set_consumer_name(cs[i], gpioname);
2211 }
2212 }
2213
2214 return 0;
2215}
2216
2140static int spi_controller_check_ops(struct spi_controller *ctlr) 2217static int spi_controller_check_ops(struct spi_controller *ctlr)
2141{ 2218{
2142 /* 2219 /*
@@ -2199,9 +2276,21 @@ int spi_register_controller(struct spi_controller *ctlr)
2199 return status; 2276 return status;
2200 2277
2201 if (!spi_controller_is_slave(ctlr)) { 2278 if (!spi_controller_is_slave(ctlr)) {
2202 status = of_spi_register_master(ctlr); 2279 if (ctlr->use_gpio_descriptors) {
2203 if (status) 2280 status = spi_get_gpio_descs(ctlr);
2204 return status; 2281 if (status)
2282 return status;
2283 /*
2284 * A controller using GPIO descriptors always
2285 * supports SPI_CS_HIGH if need be.
2286 */
2287 ctlr->mode_bits |= SPI_CS_HIGH;
2288 } else {
2289 /* Legacy code path for GPIOs from DT */
2290 status = of_spi_register_master(ctlr);
2291 if (status)
2292 return status;
2293 }
2205 } 2294 }
2206 2295
2207 /* even if it's just one always-selected device, there must 2296 /* even if it's just one always-selected device, there must
@@ -2915,6 +3004,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2915 * cs_change is set for each transfer. 3004 * cs_change is set for each transfer.
2916 */ 3005 */
2917 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) || 3006 if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3007 spi->cs_gpiod ||
2918 gpio_is_valid(spi->cs_gpio))) { 3008 gpio_is_valid(spi->cs_gpio))) {
2919 size_t maxsize; 3009 size_t maxsize;
2920 int ret; 3010 int ret;
@@ -2961,6 +3051,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2961 * it is not set for this transfer. 3051 * it is not set for this transfer.
2962 * Set transfer tx_nbits and rx_nbits as single transfer default 3052 * Set transfer tx_nbits and rx_nbits as single transfer default
2963 * (SPI_NBITS_SINGLE) if it is not set for this transfer. 3053 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3054 * Ensure transfer word_delay is at least as long as that required by
3055 * device itself.
2964 */ 3056 */
2965 message->frame_length = 0; 3057 message->frame_length = 0;
2966 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3058 list_for_each_entry(xfer, &message->transfers, transfer_list) {
@@ -3031,6 +3123,9 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3031 !(spi->mode & SPI_RX_QUAD)) 3123 !(spi->mode & SPI_RX_QUAD))
3032 return -EINVAL; 3124 return -EINVAL;
3033 } 3125 }
3126
3127 if (xfer->word_delay_usecs < spi->word_delay_usecs)
3128 xfer->word_delay_usecs = spi->word_delay_usecs;
3034 } 3129 }
3035 3130
3036 message->status = -EINPROGRESS; 3131 message->status = -EINPROGRESS;