aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAmelie Delaunay <amelie.delaunay@st.com>2017-06-21 10:32:06 -0400
committerMark Brown <broonie@kernel.org>2017-06-21 11:15:54 -0400
commitdcbe0d84dfa5a3e72b8e6ce622cd5ac78abbcab8 (patch)
treed7f09d12be8c0ace2f90d66f7753ad3fac5fabec
parent82a29bf9952acd1be7e76783604686abeb4e5b1d (diff)
spi: add driver for STM32 SPI controller
The STM32 Serial Peripheral Interface (SPI) can be used to communicate with external devices while using the specific synchronous protocol. It supports a half-duplex, full-duplex and simplex synchronous, serial communication with external devices with 4-bit to 16/32-bit per word. It has two 8x/16x 8-bit embedded Rx and TxFIFOs with DMA capability. It can operate in master or slave mode. Signed-off-by: Amelie Delaunay <amelie.delaunay@st.com> Signed-off-by: Mark Brown <broonie@kernel.org>
-rw-r--r--drivers/spi/Kconfig10
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-stm32.c1266
3 files changed, 1277 insertions, 0 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 1761c9004fc1..36f3f90f07a2 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -619,6 +619,16 @@ config SPI_SIRF
619 help 619 help
620 SPI driver for CSR SiRFprimaII SoCs 620 SPI driver for CSR SiRFprimaII SoCs
621 621
622config SPI_STM32
623 tristate "STMicroelectronics STM32 SPI controller"
624 depends on ARCH_STM32 || COMPILE_TEST
625 help
626 SPI driver for STMicroelectonics STM32 SoCs.
627
628 STM32 SPI controller supports DMA and PIO modes. When DMA
629 is not available, the driver automatically falls back to
630 PIO mode.
631
622config SPI_ST_SSC4 632config SPI_ST_SSC4
623 tristate "STMicroelectronics SPI SSC-based driver" 633 tristate "STMicroelectronics SPI SSC-based driver"
624 depends on ARCH_STI || COMPILE_TEST 634 depends on ARCH_STI || COMPILE_TEST
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index b375a7a89216..6b0749cc28bf 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
89obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o 89obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
90obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o 90obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
91obj-$(CONFIG_SPI_SIRF) += spi-sirf.o 91obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
92obj-$(CONFIG_SPI_STM32) += spi-stm32.o
92obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o 93obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
93obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o 94obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
94obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o 95obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
new file mode 100644
index 000000000000..0553f61ab3b7
--- /dev/null
+++ b/drivers/spi/spi-stm32.c
@@ -0,0 +1,1266 @@
1/*
2 * STMicroelectronics STM32 SPI Controller driver (master mode only)
3 *
4 * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
5 * Author(s): Amelie Delaunay <amelie.delaunay@st.com> for STMicroelectronics.
6 *
7 * License terms: GPL V2.0.
8 *
9 * spi_stm32 driver is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published by
11 * the Free Software Foundation.
12 *
13 * spi_stm32 driver is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
16 * details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * spi_stm32 driver. If not, see <http://www.gnu.org/licenses/>.
20 */
21#include <linux/debugfs.h>
22#include <linux/clk.h>
23#include <linux/delay.h>
24#include <linux/dmaengine.h>
25#include <linux/gpio.h>
26#include <linux/interrupt.h>
27#include <linux/iopoll.h>
28#include <linux/module.h>
29#include <linux/of_platform.h>
30#include <linux/reset.h>
31#include <linux/spi/spi.h>
32
33#define DRIVER_NAME "spi_stm32"
34
35/* STM32 SPI registers */
36#define STM32_SPI_CR1 0x00
37#define STM32_SPI_CR2 0x04
38#define STM32_SPI_CFG1 0x08
39#define STM32_SPI_CFG2 0x0C
40#define STM32_SPI_IER 0x10
41#define STM32_SPI_SR 0x14
42#define STM32_SPI_IFCR 0x18
43#define STM32_SPI_TXDR 0x20
44#define STM32_SPI_RXDR 0x30
45#define STM32_SPI_I2SCFGR 0x50
46
47/* STM32_SPI_CR1 bit fields */
48#define SPI_CR1_SPE BIT(0)
49#define SPI_CR1_MASRX BIT(8)
50#define SPI_CR1_CSTART BIT(9)
51#define SPI_CR1_CSUSP BIT(10)
52#define SPI_CR1_HDDIR BIT(11)
53#define SPI_CR1_SSI BIT(12)
54
55/* STM32_SPI_CR2 bit fields */
56#define SPI_CR2_TSIZE_SHIFT 0
57#define SPI_CR2_TSIZE GENMASK(15, 0)
58
59/* STM32_SPI_CFG1 bit fields */
60#define SPI_CFG1_DSIZE_SHIFT 0
61#define SPI_CFG1_DSIZE GENMASK(4, 0)
62#define SPI_CFG1_FTHLV_SHIFT 5
63#define SPI_CFG1_FTHLV GENMASK(8, 5)
64#define SPI_CFG1_RXDMAEN BIT(14)
65#define SPI_CFG1_TXDMAEN BIT(15)
66#define SPI_CFG1_MBR_SHIFT 28
67#define SPI_CFG1_MBR GENMASK(30, 28)
68#define SPI_CFG1_MBR_MIN 0
69#define SPI_CFG1_MBR_MAX (GENMASK(30, 28) >> 28)
70
71/* STM32_SPI_CFG2 bit fields */
72#define SPI_CFG2_MIDI_SHIFT 4
73#define SPI_CFG2_MIDI GENMASK(7, 4)
74#define SPI_CFG2_COMM_SHIFT 17
75#define SPI_CFG2_COMM GENMASK(18, 17)
76#define SPI_CFG2_SP_SHIFT 19
77#define SPI_CFG2_SP GENMASK(21, 19)
78#define SPI_CFG2_MASTER BIT(22)
79#define SPI_CFG2_LSBFRST BIT(23)
80#define SPI_CFG2_CPHA BIT(24)
81#define SPI_CFG2_CPOL BIT(25)
82#define SPI_CFG2_SSM BIT(26)
83#define SPI_CFG2_AFCNTR BIT(31)
84
85/* STM32_SPI_IER bit fields */
86#define SPI_IER_RXPIE BIT(0)
87#define SPI_IER_TXPIE BIT(1)
88#define SPI_IER_DXPIE BIT(2)
89#define SPI_IER_EOTIE BIT(3)
90#define SPI_IER_TXTFIE BIT(4)
91#define SPI_IER_OVRIE BIT(6)
92#define SPI_IER_MODFIE BIT(9)
93#define SPI_IER_ALL GENMASK(10, 0)
94
95/* STM32_SPI_SR bit fields */
96#define SPI_SR_RXP BIT(0)
97#define SPI_SR_TXP BIT(1)
98#define SPI_SR_EOT BIT(3)
99#define SPI_SR_OVR BIT(6)
100#define SPI_SR_MODF BIT(9)
101#define SPI_SR_SUSP BIT(11)
102#define SPI_SR_RXPLVL_SHIFT 13
103#define SPI_SR_RXPLVL GENMASK(14, 13)
104#define SPI_SR_RXWNE BIT(15)
105
106/* STM32_SPI_IFCR bit fields */
107#define SPI_IFCR_ALL GENMASK(11, 3)
108
109/* STM32_SPI_I2SCFGR bit fields */
110#define SPI_I2SCFGR_I2SMOD BIT(0)
111
112/* SPI Master Baud Rate min/max divisor */
113#define SPI_MBR_DIV_MIN (2 << SPI_CFG1_MBR_MIN)
114#define SPI_MBR_DIV_MAX (2 << SPI_CFG1_MBR_MAX)
115
116/* SPI Communication mode */
117#define SPI_FULL_DUPLEX 0
118#define SPI_SIMPLEX_TX 1
119#define SPI_SIMPLEX_RX 2
120#define SPI_HALF_DUPLEX 3
121
122#define SPI_1HZ_NS 1000000000
123
124/**
125 * struct stm32_spi - private data of the SPI controller
126 * @dev: driver model representation of the controller
127 * @master: controller master interface
128 * @base: virtual memory area
129 * @clk: hw kernel clock feeding the SPI clock generator
130 * @clk_rate: rate of the hw kernel clock feeding the SPI clock generator
131 * @rst: SPI controller reset line
132 * @lock: prevent I/O concurrent access
133 * @irq: SPI controller interrupt line
134 * @fifo_size: size of the embedded fifo in bytes
135 * @cur_midi: master inter-data idleness in ns
136 * @cur_speed: speed configured in Hz
137 * @cur_bpw: number of bits in a single SPI data frame
138 * @cur_fthlv: fifo threshold level (data frames in a single data packet)
139 * @cur_comm: SPI communication mode
140 * @cur_xferlen: current transfer length in bytes
141 * @cur_usedma: boolean to know if dma is used in current transfer
142 * @tx_buf: data to be written, or NULL
143 * @rx_buf: data to be read, or NULL
144 * @tx_len: number of data to be written in bytes
145 * @rx_len: number of data to be read in bytes
146 * @dma_tx: dma channel for TX transfer
147 * @dma_rx: dma channel for RX transfer
148 * @phys_addr: SPI registers physical base address
149 */
150struct stm32_spi {
151 struct device *dev;
152 struct spi_master *master;
153 void __iomem *base;
154 struct clk *clk;
155 u32 clk_rate;
156 struct reset_control *rst;
157 spinlock_t lock; /* prevent I/O concurrent access */
158 int irq;
159 unsigned int fifo_size;
160
161 unsigned int cur_midi;
162 unsigned int cur_speed;
163 unsigned int cur_bpw;
164 unsigned int cur_fthlv;
165 unsigned int cur_comm;
166 unsigned int cur_xferlen;
167 bool cur_usedma;
168
169 const void *tx_buf;
170 void *rx_buf;
171 int tx_len;
172 int rx_len;
173 struct dma_chan *dma_tx;
174 struct dma_chan *dma_rx;
175 dma_addr_t phys_addr;
176};
177
178static inline void stm32_spi_set_bits(struct stm32_spi *spi,
179 u32 offset, u32 bits)
180{
181 writel_relaxed(readl_relaxed(spi->base + offset) | bits,
182 spi->base + offset);
183}
184
185static inline void stm32_spi_clr_bits(struct stm32_spi *spi,
186 u32 offset, u32 bits)
187{
188 writel_relaxed(readl_relaxed(spi->base + offset) & ~bits,
189 spi->base + offset);
190}
191
192/**
193 * stm32_spi_get_fifo_size - Return fifo size
194 * @spi: pointer to the spi controller data structure
195 */
196static int stm32_spi_get_fifo_size(struct stm32_spi *spi)
197{
198 unsigned long flags;
199 u32 count = 0;
200
201 spin_lock_irqsave(&spi->lock, flags);
202
203 stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
204
205 while (readl_relaxed(spi->base + STM32_SPI_SR) & SPI_SR_TXP)
206 writeb_relaxed(++count, spi->base + STM32_SPI_TXDR);
207
208 stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
209
210 spin_unlock_irqrestore(&spi->lock, flags);
211
212 dev_dbg(spi->dev, "%d x 8-bit fifo size\n", count);
213
214 return count;
215}
216
217/**
218 * stm32_spi_get_bpw_mask - Return bits per word mask
219 * @spi: pointer to the spi controller data structure
220 */
221static int stm32_spi_get_bpw_mask(struct stm32_spi *spi)
222{
223 unsigned long flags;
224 u32 cfg1, max_bpw;
225
226 spin_lock_irqsave(&spi->lock, flags);
227
228 /*
229 * The most significant bit at DSIZE bit field is reserved when the
230 * maximum data size of periperal instances is limited to 16-bit
231 */
232 stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_DSIZE);
233
234 cfg1 = readl_relaxed(spi->base + STM32_SPI_CFG1);
235 max_bpw = (cfg1 & SPI_CFG1_DSIZE) >> SPI_CFG1_DSIZE_SHIFT;
236 max_bpw += 1;
237
238 spin_unlock_irqrestore(&spi->lock, flags);
239
240 dev_dbg(spi->dev, "%d-bit maximum data frame\n", max_bpw);
241
242 return SPI_BPW_RANGE_MASK(4, max_bpw);
243}
244
245/**
246 * stm32_spi_prepare_mbr - Determine SPI_CFG1.MBR value
247 * @spi: pointer to the spi controller data structure
248 * @speed_hz: requested speed
249 *
250 * Return SPI_CFG1.MBR value in case of success or -EINVAL
251 */
252static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz)
253{
254 u32 div, mbrdiv;
255
256 div = DIV_ROUND_UP(spi->clk_rate, speed_hz);
257
258 /*
259 * SPI framework set xfer->speed_hz to master->max_speed_hz if
260 * xfer->speed_hz is greater than master->max_speed_hz, and it returns
261 * an error when xfer->speed_hz is lower than master->min_speed_hz, so
262 * no need to check it there.
263 * However, we need to ensure the following calculations.
264 */
265 if ((div < SPI_MBR_DIV_MIN) &&
266 (div > SPI_MBR_DIV_MAX))
267 return -EINVAL;
268
269 /* Determine the first power of 2 greater than or equal to div */
270 mbrdiv = (div & (div - 1)) ? fls(div) : fls(div) - 1;
271
272 spi->cur_speed = spi->clk_rate / (1 << mbrdiv);
273
274 return mbrdiv - 1;
275}
276
277/**
278 * stm32_spi_prepare_fthlv - Determine FIFO threshold level
279 * @spi: pointer to the spi controller data structure
280 */
281static u32 stm32_spi_prepare_fthlv(struct stm32_spi *spi)
282{
283 u32 fthlv, half_fifo;
284
285 /* data packet should not exceed 1/2 of fifo space */
286 half_fifo = (spi->fifo_size / 2);
287
288 fthlv = (spi->cur_bpw <= 8) ? half_fifo :
289 (spi->cur_bpw <= 16) ? (half_fifo / 2) :
290 (half_fifo / 4);
291
292 /* align packet size with data registers access */
293 if (spi->cur_bpw > 8)
294 fthlv -= (fthlv % 2); /* multiple of 2 */
295 else
296 fthlv -= (fthlv % 4); /* multiple of 4 */
297
298 return fthlv;
299}
300
301/**
302 * stm32_spi_write_txfifo - Write bytes in Transmit Data Register
303 * @spi: pointer to the spi controller data structure
304 *
305 * Read from tx_buf depends on remaining bytes to avoid to read beyond
306 * tx_buf end.
307 */
308static void stm32_spi_write_txfifo(struct stm32_spi *spi)
309{
310 while ((spi->tx_len > 0) &&
311 (readl_relaxed(spi->base + STM32_SPI_SR) & SPI_SR_TXP)) {
312 u32 offs = spi->cur_xferlen - spi->tx_len;
313
314 if (spi->tx_len >= sizeof(u32)) {
315 const u32 *tx_buf32 = (const u32 *)(spi->tx_buf + offs);
316
317 writel_relaxed(*tx_buf32, spi->base + STM32_SPI_TXDR);
318 spi->tx_len -= sizeof(u32);
319 } else if (spi->tx_len >= sizeof(u16)) {
320 const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
321
322 writew_relaxed(*tx_buf16, spi->base + STM32_SPI_TXDR);
323 spi->tx_len -= sizeof(u16);
324 } else {
325 const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
326
327 writeb_relaxed(*tx_buf8, spi->base + STM32_SPI_TXDR);
328 spi->tx_len -= sizeof(u8);
329 }
330 }
331
332 dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->tx_len);
333}
334
335/**
336 * stm32_spi_read_rxfifo - Read bytes in Receive Data Register
337 * @spi: pointer to the spi controller data structure
338 *
339 * Write in rx_buf depends on remaining bytes to avoid to write beyond
340 * rx_buf end.
341 */
342static void stm32_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
343{
344 u32 sr = readl_relaxed(spi->base + STM32_SPI_SR);
345 u32 rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT;
346
347 while ((spi->rx_len > 0) &&
348 ((sr & SPI_SR_RXP) ||
349 (flush && ((sr & SPI_SR_RXWNE) || (rxplvl > 0))))) {
350 u32 offs = spi->cur_xferlen - spi->rx_len;
351
352 if ((spi->rx_len >= sizeof(u32)) ||
353 (flush && (sr & SPI_SR_RXWNE))) {
354 u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs);
355
356 *rx_buf32 = readl_relaxed(spi->base + STM32_SPI_RXDR);
357 spi->rx_len -= sizeof(u32);
358 } else if ((spi->rx_len >= sizeof(u16)) ||
359 (flush && (rxplvl >= 2 || spi->cur_bpw > 8))) {
360 u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
361
362 *rx_buf16 = readw_relaxed(spi->base + STM32_SPI_RXDR);
363 spi->rx_len -= sizeof(u16);
364 } else {
365 u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
366
367 *rx_buf8 = readb_relaxed(spi->base + STM32_SPI_RXDR);
368 spi->rx_len -= sizeof(u8);
369 }
370
371 sr = readl_relaxed(spi->base + STM32_SPI_SR);
372 rxplvl = (sr & SPI_SR_RXPLVL) >> SPI_SR_RXPLVL_SHIFT;
373 }
374
375 dev_dbg(spi->dev, "%s%s: %d bytes left\n", __func__,
376 flush ? "(flush)" : "", spi->rx_len);
377}
378
379/**
380 * stm32_spi_enable - Enable SPI controller
381 * @spi: pointer to the spi controller data structure
382 *
383 * SPI data transfer is enabled but spi_ker_ck is idle.
384 * SPI_CFG1 and SPI_CFG2 are now write protected.
385 */
386static void stm32_spi_enable(struct stm32_spi *spi)
387{
388 dev_dbg(spi->dev, "enable controller\n");
389
390 stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
391}
392
393/**
394 * stm32_spi_disable - Disable SPI controller
395 * @spi: pointer to the spi controller data structure
396 *
397 * RX-Fifo is flushed when SPI controller is disabled. To prevent any data
398 * loss, use stm32_spi_read_rxfifo(flush) to read the remaining bytes in
399 * RX-Fifo.
400 */
401static void stm32_spi_disable(struct stm32_spi *spi)
402{
403 unsigned long flags;
404 u32 cr1, sr;
405
406 dev_dbg(spi->dev, "disable controller\n");
407
408 spin_lock_irqsave(&spi->lock, flags);
409
410 cr1 = readl_relaxed(spi->base + STM32_SPI_CR1);
411
412 if (!(cr1 & SPI_CR1_SPE)) {
413 spin_unlock_irqrestore(&spi->lock, flags);
414 return;
415 }
416
417 /* Wait on EOT or suspend the flow */
418 if (readl_relaxed_poll_timeout_atomic(spi->base + STM32_SPI_SR,
419 sr, !(sr & SPI_SR_EOT),
420 10, 100000) < 0) {
421 if (cr1 & SPI_CR1_CSTART) {
422 writel_relaxed(cr1 | SPI_CR1_CSUSP,
423 spi->base + STM32_SPI_CR1);
424 if (readl_relaxed_poll_timeout_atomic(
425 spi->base + STM32_SPI_SR,
426 sr, !(sr & SPI_SR_SUSP),
427 10, 100000) < 0)
428 dev_warn(spi->dev,
429 "Suspend request timeout\n");
430 }
431 }
432
433 if (!spi->cur_usedma && spi->rx_buf && (spi->rx_len > 0))
434 stm32_spi_read_rxfifo(spi, true);
435
436 if (spi->cur_usedma && spi->tx_buf)
437 dmaengine_terminate_all(spi->dma_tx);
438 if (spi->cur_usedma && spi->rx_buf)
439 dmaengine_terminate_all(spi->dma_rx);
440
441 stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_SPE);
442
443 stm32_spi_clr_bits(spi, STM32_SPI_CFG1, SPI_CFG1_TXDMAEN |
444 SPI_CFG1_RXDMAEN);
445
446 /* Disable interrupts and clear status flags */
447 writel_relaxed(0, spi->base + STM32_SPI_IER);
448 writel_relaxed(SPI_IFCR_ALL, spi->base + STM32_SPI_IFCR);
449
450 spin_unlock_irqrestore(&spi->lock, flags);
451}
452
453/**
454 * stm32_spi_can_dma - Determine if the transfer is eligible for DMA use
455 *
456 * If the current transfer size is greater than fifo size, use DMA.
457 */
458static bool stm32_spi_can_dma(struct spi_master *master,
459 struct spi_device *spi_dev,
460 struct spi_transfer *transfer)
461{
462 struct stm32_spi *spi = spi_master_get_devdata(master);
463
464 dev_dbg(spi->dev, "%s: %s\n", __func__,
465 (!!(transfer->len > spi->fifo_size)) ? "true" : "false");
466
467 return !!(transfer->len > spi->fifo_size);
468}
469
470/**
471 * stm32_spi_irq - Interrupt handler for SPI controller events
472 * @irq: interrupt line
473 * @dev_id: SPI controller master interface
474 */
475static irqreturn_t stm32_spi_irq(int irq, void *dev_id)
476{
477 struct spi_master *master = dev_id;
478 struct stm32_spi *spi = spi_master_get_devdata(master);
479 u32 sr, ier, mask;
480 unsigned long flags;
481 bool end = false;
482
483 spin_lock_irqsave(&spi->lock, flags);
484
485 sr = readl_relaxed(spi->base + STM32_SPI_SR);
486 ier = readl_relaxed(spi->base + STM32_SPI_IER);
487
488 mask = ier;
489 /* EOTIE is triggered on EOT, SUSP and TXC events. */
490 mask |= SPI_SR_SUSP;
491 /*
492 * When TXTF is set, DXPIE and TXPIE are cleared. So in case of
493 * Full-Duplex, need to poll RXP event to know if there are remaining
494 * data, before disabling SPI.
495 */
496 mask |= ((spi->rx_buf && !spi->cur_usedma) ? SPI_SR_RXP : 0);
497
498 if (!(sr & mask)) {
499 dev_dbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
500 sr, ier);
501 spin_unlock_irqrestore(&spi->lock, flags);
502 return IRQ_NONE;
503 }
504
505 if (sr & SPI_SR_SUSP) {
506 dev_warn(spi->dev, "Communication suspended\n");
507 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
508 stm32_spi_read_rxfifo(spi, false);
509 }
510
511 if (sr & SPI_SR_MODF) {
512 dev_warn(spi->dev, "Mode fault: transfer aborted\n");
513 end = true;
514 }
515
516 if (sr & SPI_SR_OVR) {
517 dev_warn(spi->dev, "Overrun: received value discarded\n");
518 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
519 stm32_spi_read_rxfifo(spi, false);
520 }
521
522 if (sr & SPI_SR_EOT) {
523 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
524 stm32_spi_read_rxfifo(spi, true);
525 end = true;
526 }
527
528 if (sr & SPI_SR_TXP)
529 if (!spi->cur_usedma && (spi->tx_buf && (spi->tx_len > 0)))
530 stm32_spi_write_txfifo(spi);
531
532 if (sr & SPI_SR_RXP)
533 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
534 stm32_spi_read_rxfifo(spi, false);
535
536 writel_relaxed(mask, spi->base + STM32_SPI_IFCR);
537
538 spin_unlock_irqrestore(&spi->lock, flags);
539
540 if (end) {
541 spi_finalize_current_transfer(master);
542 stm32_spi_disable(spi);
543 }
544
545 return IRQ_HANDLED;
546}
547
548/**
549 * stm32_spi_setup - setup device chip select
550 */
551static int stm32_spi_setup(struct spi_device *spi_dev)
552{
553 int ret = 0;
554
555 if (!gpio_is_valid(spi_dev->cs_gpio)) {
556 dev_err(&spi_dev->dev, "%d is not a valid gpio\n",
557 spi_dev->cs_gpio);
558 return -EINVAL;
559 }
560
561 dev_dbg(&spi_dev->dev, "%s: set gpio%d output %s\n", __func__,
562 spi_dev->cs_gpio,
563 (spi_dev->mode & SPI_CS_HIGH) ? "low" : "high");
564
565 ret = gpio_direction_output(spi_dev->cs_gpio,
566 !(spi_dev->mode & SPI_CS_HIGH));
567
568 return ret;
569}
570
571/**
572 * stm32_spi_prepare_msg - set up the controller to transfer a single message
573 */
574static int stm32_spi_prepare_msg(struct spi_master *master,
575 struct spi_message *msg)
576{
577 struct stm32_spi *spi = spi_master_get_devdata(master);
578 struct spi_device *spi_dev = msg->spi;
579 struct device_node *np = spi_dev->dev.of_node;
580 unsigned long flags;
581 u32 cfg2_clrb = 0, cfg2_setb = 0;
582
583 /* SPI slave device may need time between data frames */
584 spi->cur_midi = 0;
585 if (np && !of_property_read_u32(np, "st,spi-midi", &spi->cur_midi))
586 dev_dbg(spi->dev, "%dns inter-data idleness\n", spi->cur_midi);
587
588 if (spi_dev->mode & SPI_CPOL)
589 cfg2_setb |= SPI_CFG2_CPOL;
590 else
591 cfg2_clrb |= SPI_CFG2_CPOL;
592
593 if (spi_dev->mode & SPI_CPHA)
594 cfg2_setb |= SPI_CFG2_CPHA;
595 else
596 cfg2_clrb |= SPI_CFG2_CPHA;
597
598 if (spi_dev->mode & SPI_LSB_FIRST)
599 cfg2_setb |= SPI_CFG2_LSBFRST;
600 else
601 cfg2_clrb |= SPI_CFG2_LSBFRST;
602
603 dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d\n",
604 spi_dev->mode & SPI_CPOL,
605 spi_dev->mode & SPI_CPHA,
606 spi_dev->mode & SPI_LSB_FIRST,
607 spi_dev->mode & SPI_CS_HIGH);
608
609 spin_lock_irqsave(&spi->lock, flags);
610
611 if (cfg2_clrb || cfg2_setb)
612 writel_relaxed(
613 (readl_relaxed(spi->base + STM32_SPI_CFG2) &
614 ~cfg2_clrb) | cfg2_setb,
615 spi->base + STM32_SPI_CFG2);
616
617 spin_unlock_irqrestore(&spi->lock, flags);
618
619 return 0;
620}
621
622/**
623 * stm32_spi_dma_cb - dma callback
624 *
625 * DMA callback is called when the transfer is complete or when an error
626 * occurs. If the transfer is complete, EOT flag is raised.
627 */
628static void stm32_spi_dma_cb(void *data)
629{
630 struct stm32_spi *spi = data;
631 unsigned long flags;
632 u32 sr;
633
634 spin_lock_irqsave(&spi->lock, flags);
635
636 sr = readl_relaxed(spi->base + STM32_SPI_SR);
637
638 spin_unlock_irqrestore(&spi->lock, flags);
639
640 if (!(sr & SPI_SR_EOT)) {
641 dev_warn(spi->dev, "DMA callback (sr=0x%08x)\n", sr);
642
643 spi_finalize_current_transfer(spi->master);
644 stm32_spi_disable(spi);
645 }
646}
647
648/**
649 * stm32_spi_dma_config - configure dma slave channel depending on current
650 * transfer bits_per_word.
651 */
652static void stm32_spi_dma_config(struct stm32_spi *spi,
653 struct dma_slave_config *dma_conf,
654 enum dma_transfer_direction dir)
655{
656 enum dma_slave_buswidth buswidth;
657 u32 maxburst;
658
659 buswidth = (spi->cur_bpw <= 8) ? DMA_SLAVE_BUSWIDTH_1_BYTE :
660 (spi->cur_bpw <= 16) ? DMA_SLAVE_BUSWIDTH_2_BYTES :
661 DMA_SLAVE_BUSWIDTH_4_BYTES;
662
663 /* Valid for DMA Half or Full Fifo threshold */
664 maxburst = (spi->cur_fthlv == 2) ? 1 : spi->cur_fthlv;
665
666 memset(dma_conf, 0, sizeof(struct dma_slave_config));
667 dma_conf->direction = dir;
668 if (dma_conf->direction == DMA_DEV_TO_MEM) { /* RX */
669 dma_conf->src_addr = spi->phys_addr + STM32_SPI_RXDR;
670 dma_conf->src_addr_width = buswidth;
671 dma_conf->src_maxburst = maxburst;
672
673 dev_dbg(spi->dev, "Rx DMA config buswidth=%d, maxburst=%d\n",
674 buswidth, maxburst);
675 } else if (dma_conf->direction == DMA_MEM_TO_DEV) { /* TX */
676 dma_conf->dst_addr = spi->phys_addr + STM32_SPI_TXDR;
677 dma_conf->dst_addr_width = buswidth;
678 dma_conf->dst_maxburst = maxburst;
679
680 dev_dbg(spi->dev, "Tx DMA config buswidth=%d, maxburst=%d\n",
681 buswidth, maxburst);
682 }
683}
684
685/**
686 * stm32_spi_transfer_one_irq - transfer a single spi_transfer using
687 * interrupts
688 *
689 * It must returns 0 if the transfer is finished or 1 if the transfer is still
690 * in progress.
691 */
692static int stm32_spi_transfer_one_irq(struct stm32_spi *spi)
693{
694 unsigned long flags;
695 u32 ier = 0;
696
697 /* Enable the interrupts relative to the current communication mode */
698 if (spi->tx_buf && spi->rx_buf) /* Full Duplex */
699 ier |= SPI_IER_DXPIE;
700 else if (spi->tx_buf) /* Half-Duplex TX dir or Simplex TX */
701 ier |= SPI_IER_TXPIE;
702 else if (spi->rx_buf) /* Half-Duplex RX dir or Simplex RX */
703 ier |= SPI_IER_RXPIE;
704
705 /* Enable the interrupts relative to the end of transfer */
706 ier |= SPI_IER_EOTIE | SPI_IER_TXTFIE | SPI_IER_OVRIE | SPI_IER_MODFIE;
707
708 spin_lock_irqsave(&spi->lock, flags);
709
710 stm32_spi_enable(spi);
711
712 /* Be sure to have data in fifo before starting data transfer */
713 if (spi->tx_buf)
714 stm32_spi_write_txfifo(spi);
715
716 stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_CSTART);
717
718 writel_relaxed(ier, spi->base + STM32_SPI_IER);
719
720 spin_unlock_irqrestore(&spi->lock, flags);
721
722 return 1;
723}
724
725/**
726 * stm32_spi_transfer_one_dma - transfer a single spi_transfer using DMA
727 *
728 * It must returns 0 if the transfer is finished or 1 if the transfer is still
729 * in progress.
730 */
731static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
732 struct spi_transfer *xfer)
733{
734 struct dma_slave_config tx_dma_conf, rx_dma_conf;
735 struct dma_async_tx_descriptor *tx_dma_desc, *rx_dma_desc;
736 unsigned long flags;
737 u32 ier = 0;
738
739 spin_lock_irqsave(&spi->lock, flags);
740
741 rx_dma_desc = NULL;
742 if (spi->rx_buf) {
743 stm32_spi_dma_config(spi, &rx_dma_conf, DMA_DEV_TO_MEM);
744 dmaengine_slave_config(spi->dma_rx, &rx_dma_conf);
745
746 /* Enable Rx DMA request */
747 stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_RXDMAEN);
748
749 rx_dma_desc = dmaengine_prep_slave_sg(
750 spi->dma_rx, xfer->rx_sg.sgl,
751 xfer->rx_sg.nents,
752 rx_dma_conf.direction,
753 DMA_PREP_INTERRUPT);
754
755 rx_dma_desc->callback = stm32_spi_dma_cb;
756 rx_dma_desc->callback_param = spi;
757 }
758
759 tx_dma_desc = NULL;
760 if (spi->tx_buf) {
761 stm32_spi_dma_config(spi, &tx_dma_conf, DMA_MEM_TO_DEV);
762 dmaengine_slave_config(spi->dma_tx, &tx_dma_conf);
763
764 tx_dma_desc = dmaengine_prep_slave_sg(
765 spi->dma_tx, xfer->tx_sg.sgl,
766 xfer->tx_sg.nents,
767 tx_dma_conf.direction,
768 DMA_PREP_INTERRUPT);
769
770 if (spi->cur_comm == SPI_SIMPLEX_TX) {
771 tx_dma_desc->callback = stm32_spi_dma_cb;
772 tx_dma_desc->callback_param = spi;
773 }
774 }
775
776 if ((spi->tx_buf && !tx_dma_desc) ||
777 (spi->rx_buf && !rx_dma_desc))
778 goto dma_desc_error;
779
780 if (rx_dma_desc) {
781 if (dma_submit_error(dmaengine_submit(rx_dma_desc))) {
782 dev_err(spi->dev, "Rx DMA submit failed\n");
783 goto dma_desc_error;
784 }
785 /* Enable Rx DMA channel */
786 dma_async_issue_pending(spi->dma_rx);
787 }
788
789 if (tx_dma_desc) {
790 if (dma_submit_error(dmaengine_submit(tx_dma_desc))) {
791 dev_err(spi->dev, "Tx DMA submit failed\n");
792 goto dma_submit_error;
793 }
794 /* Enable Tx DMA channel */
795 dma_async_issue_pending(spi->dma_tx);
796
797 /* Enable Tx DMA request */
798 stm32_spi_set_bits(spi, STM32_SPI_CFG1, SPI_CFG1_TXDMAEN);
799 }
800
801 /* Enable the interrupts relative to the end of transfer */
802 ier |= SPI_IER_EOTIE | SPI_IER_TXTFIE | SPI_IER_OVRIE | SPI_IER_MODFIE;
803 writel_relaxed(ier, spi->base + STM32_SPI_IER);
804
805 stm32_spi_enable(spi);
806
807 stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_CSTART);
808
809 spin_unlock_irqrestore(&spi->lock, flags);
810
811 return 1;
812
813dma_submit_error:
814 if (spi->rx_buf)
815 dmaengine_terminate_all(spi->dma_rx);
816
817dma_desc_error:
818 stm32_spi_clr_bits(spi, STM32_SPI_CFG1, SPI_CFG1_RXDMAEN);
819
820 spin_unlock_irqrestore(&spi->lock, flags);
821
822 dev_info(spi->dev, "DMA issue: fall back to irq transfer\n");
823
824 return stm32_spi_transfer_one_irq(spi);
825}
826
827/**
828 * stm32_spi_transfer_one_setup - common setup to transfer a single
829 * spi_transfer either using DMA or
830 * interrupts.
831 */
832static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
833 struct spi_device *spi_dev,
834 struct spi_transfer *transfer)
835{
836 unsigned long flags;
837 u32 cfg1_clrb = 0, cfg1_setb = 0, cfg2_clrb = 0, cfg2_setb = 0;
838 u32 mode, nb_words;
839 int ret = 0;
840
841 spin_lock_irqsave(&spi->lock, flags);
842
843 if (spi->cur_bpw != transfer->bits_per_word) {
844 u32 bpw, fthlv;
845
846 spi->cur_bpw = transfer->bits_per_word;
847 bpw = spi->cur_bpw - 1;
848
849 cfg1_clrb |= SPI_CFG1_DSIZE;
850 cfg1_setb |= (bpw << SPI_CFG1_DSIZE_SHIFT) & SPI_CFG1_DSIZE;
851
852 spi->cur_fthlv = stm32_spi_prepare_fthlv(spi);
853 fthlv = spi->cur_fthlv - 1;
854
855 cfg1_clrb |= SPI_CFG1_FTHLV;
856 cfg1_setb |= (fthlv << SPI_CFG1_FTHLV_SHIFT) & SPI_CFG1_FTHLV;
857 }
858
859 if (spi->cur_speed != transfer->speed_hz) {
860 u32 mbr;
861
862 /* Update spi->cur_speed with real clock speed */
863 mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz);
864 if (mbr < 0) {
865 ret = mbr;
866 goto out;
867 }
868
869 transfer->speed_hz = spi->cur_speed;
870
871 cfg1_clrb |= SPI_CFG1_MBR;
872 cfg1_setb |= (mbr << SPI_CFG1_MBR_SHIFT) & SPI_CFG1_MBR;
873 }
874
875 if (cfg1_clrb || cfg1_setb)
876 writel_relaxed((readl_relaxed(spi->base + STM32_SPI_CFG1) &
877 ~cfg1_clrb) | cfg1_setb,
878 spi->base + STM32_SPI_CFG1);
879
880 mode = SPI_FULL_DUPLEX;
881 if (spi_dev->mode & SPI_3WIRE) { /* MISO/MOSI signals shared */
882 /*
883 * SPI_3WIRE and xfer->tx_buf != NULL and xfer->rx_buf != NULL
884 * is forbidden und unvalidated by SPI subsystem so depending
885 * on the valid buffer, we can determine the direction of the
886 * transfer.
887 */
888 mode = SPI_HALF_DUPLEX;
889 if (!transfer->tx_buf)
890 stm32_spi_clr_bits(spi, STM32_SPI_CR1, SPI_CR1_HDDIR);
891 else if (!transfer->rx_buf)
892 stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_HDDIR);
893 } else {
894 if (!transfer->tx_buf)
895 mode = SPI_SIMPLEX_RX;
896 else if (!transfer->rx_buf)
897 mode = SPI_SIMPLEX_TX;
898 }
899 if (spi->cur_comm != mode) {
900 spi->cur_comm = mode;
901
902 cfg2_clrb |= SPI_CFG2_COMM;
903 cfg2_setb |= (mode << SPI_CFG2_COMM_SHIFT) & SPI_CFG2_COMM;
904 }
905
906 cfg2_clrb |= SPI_CFG2_MIDI;
907 if ((transfer->len > 1) && (spi->cur_midi > 0)) {
908 u32 sck_period_ns = DIV_ROUND_UP(SPI_1HZ_NS, spi->cur_speed);
909 u32 midi = min((u32)DIV_ROUND_UP(spi->cur_midi, sck_period_ns),
910 (u32)SPI_CFG2_MIDI >> SPI_CFG2_MIDI_SHIFT);
911
912 dev_dbg(spi->dev, "period=%dns, midi=%d(=%dns)\n",
913 sck_period_ns, midi, midi * sck_period_ns);
914
915 cfg2_setb |= (midi << SPI_CFG2_MIDI_SHIFT) & SPI_CFG2_MIDI;
916 }
917
918 if (cfg2_clrb || cfg2_setb)
919 writel_relaxed((readl_relaxed(spi->base + STM32_SPI_CFG2) &
920 ~cfg2_clrb) | cfg2_setb,
921 spi->base + STM32_SPI_CFG2);
922
923 nb_words = DIV_ROUND_UP(transfer->len * 8,
924 (spi->cur_bpw <= 8) ? 8 :
925 (spi->cur_bpw <= 16) ? 16 : 32);
926 nb_words <<= SPI_CR2_TSIZE_SHIFT;
927
928 if (nb_words <= SPI_CR2_TSIZE) {
929 writel_relaxed(nb_words, spi->base + STM32_SPI_CR2);
930 } else {
931 ret = -EMSGSIZE;
932 goto out;
933 }
934
935 spi->cur_xferlen = transfer->len;
936
937 dev_dbg(spi->dev, "transfer communication mode set to %d\n",
938 spi->cur_comm);
939 dev_dbg(spi->dev,
940 "data frame of %d-bit, data packet of %d data frames\n",
941 spi->cur_bpw, spi->cur_fthlv);
942 dev_dbg(spi->dev, "speed set to %dHz\n", spi->cur_speed);
943 dev_dbg(spi->dev, "transfer of %d bytes (%d data frames)\n",
944 spi->cur_xferlen, nb_words);
945 dev_dbg(spi->dev, "dma %s\n",
946 (spi->cur_usedma) ? "enabled" : "disabled");
947
948out:
949 spin_unlock_irqrestore(&spi->lock, flags);
950
951 return ret;
952}
953
954/**
955 * stm32_spi_transfer_one - transfer a single spi_transfer
956 *
957 * It must return 0 if the transfer is finished or 1 if the transfer is still
958 * in progress.
959 */
960static int stm32_spi_transfer_one(struct spi_master *master,
961 struct spi_device *spi_dev,
962 struct spi_transfer *transfer)
963{
964 struct stm32_spi *spi = spi_master_get_devdata(master);
965 int ret;
966
967 spi->tx_buf = transfer->tx_buf;
968 spi->rx_buf = transfer->rx_buf;
969 spi->tx_len = spi->tx_buf ? transfer->len : 0;
970 spi->rx_len = spi->rx_buf ? transfer->len : 0;
971
972 spi->cur_usedma = stm32_spi_can_dma(master, spi_dev, transfer);
973
974 ret = stm32_spi_transfer_one_setup(spi, spi_dev, transfer);
975 if (ret) {
976 dev_err(spi->dev, "SPI transfer setup failed\n");
977 return ret;
978 }
979
980 if (spi->cur_usedma)
981 return stm32_spi_transfer_one_dma(spi, transfer);
982 else
983 return stm32_spi_transfer_one_irq(spi);
984}
985
986/**
987 * stm32_spi_unprepare_msg - relax the hardware
988 *
989 * Normally, if TSIZE has been configured, we should relax the hardware at the
990 * reception of the EOT interrupt. But in case of error, EOT will not be
991 * raised. So the subsystem unprepare_message call allows us to properly
992 * complete the transfer from an hardware point of view.
993 */
994static int stm32_spi_unprepare_msg(struct spi_master *master,
995 struct spi_message *msg)
996{
997 struct stm32_spi *spi = spi_master_get_devdata(master);
998
999 stm32_spi_disable(spi);
1000
1001 return 0;
1002}
1003
1004/**
1005 * stm32_spi_config - Configure SPI controller as SPI master
1006 */
1007static int stm32_spi_config(struct stm32_spi *spi)
1008{
1009 unsigned long flags;
1010
1011 spin_lock_irqsave(&spi->lock, flags);
1012
1013 /* Ensure I2SMOD bit is kept cleared */
1014 stm32_spi_clr_bits(spi, STM32_SPI_I2SCFGR, SPI_I2SCFGR_I2SMOD);
1015
1016 /*
1017 * - SS input value high
1018 * - transmitter half duplex direction
1019 * - automatic communication suspend when RX-Fifo is full
1020 */
1021 stm32_spi_set_bits(spi, STM32_SPI_CR1, SPI_CR1_SSI |
1022 SPI_CR1_HDDIR |
1023 SPI_CR1_MASRX);
1024
1025 /*
1026 * - Set the master mode (default Motorola mode)
1027 * - Consider 1 master/n slaves configuration and
1028 * SS input value is determined by the SSI bit
1029 * - keep control of all associated GPIOs
1030 */
1031 stm32_spi_set_bits(spi, STM32_SPI_CFG2, SPI_CFG2_MASTER |
1032 SPI_CFG2_SSM |
1033 SPI_CFG2_AFCNTR);
1034
1035 spin_unlock_irqrestore(&spi->lock, flags);
1036
1037 return 0;
1038}
1039
1040static const struct of_device_id stm32_spi_of_match[] = {
1041 { .compatible = "st,stm32-spi", },
1042 {},
1043};
1044MODULE_DEVICE_TABLE(of, stm32_spi_of_match);
1045
1046static int stm32_spi_probe(struct platform_device *pdev)
1047{
1048 struct spi_master *master;
1049 struct stm32_spi *spi;
1050 struct resource *res;
1051 int i, ret;
1052
1053 master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
1054 if (!master) {
1055 dev_err(&pdev->dev, "spi master allocation failed\n");
1056 return -ENOMEM;
1057 }
1058 platform_set_drvdata(pdev, master);
1059
1060 spi = spi_master_get_devdata(master);
1061 spi->dev = &pdev->dev;
1062 spi->master = master;
1063 spin_lock_init(&spi->lock);
1064
1065 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1066 spi->base = devm_ioremap_resource(&pdev->dev, res);
1067 if (IS_ERR(spi->base)) {
1068 ret = PTR_ERR(spi->base);
1069 goto err_master_put;
1070 }
1071 spi->phys_addr = (dma_addr_t)res->start;
1072
1073 spi->irq = platform_get_irq(pdev, 0);
1074 if (spi->irq <= 0) {
1075 dev_err(&pdev->dev, "no irq: %d\n", spi->irq);
1076 ret = -ENOENT;
1077 goto err_master_put;
1078 }
1079 ret = devm_request_threaded_irq(&pdev->dev, spi->irq, NULL,
1080 stm32_spi_irq, IRQF_ONESHOT,
1081 pdev->name, master);
1082 if (ret) {
1083 dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq,
1084 ret);
1085 goto err_master_put;
1086 }
1087
1088 spi->clk = devm_clk_get(&pdev->dev, 0);
1089 if (IS_ERR(spi->clk)) {
1090 ret = PTR_ERR(spi->clk);
1091 dev_err(&pdev->dev, "clk get failed: %d\n", ret);
1092 goto err_master_put;
1093 }
1094
1095 ret = clk_prepare_enable(spi->clk);
1096 if (ret) {
1097 dev_err(&pdev->dev, "clk enable failed: %d\n", ret);
1098 goto err_master_put;
1099 }
1100 spi->clk_rate = clk_get_rate(spi->clk);
1101 if (!spi->clk_rate) {
1102 dev_err(&pdev->dev, "clk rate = 0\n");
1103 ret = -EINVAL;
1104 goto err_master_put;
1105 }
1106
1107 spi->rst = devm_reset_control_get(&pdev->dev, NULL);
1108 if (!IS_ERR(spi->rst)) {
1109 reset_control_assert(spi->rst);
1110 udelay(2);
1111 reset_control_deassert(spi->rst);
1112 }
1113
1114 spi->fifo_size = stm32_spi_get_fifo_size(spi);
1115
1116 ret = stm32_spi_config(spi);
1117 if (ret) {
1118 dev_err(&pdev->dev, "controller configuration failed: %d\n",
1119 ret);
1120 goto err_clk_disable;
1121 }
1122
1123 master->dev.of_node = pdev->dev.of_node;
1124 master->auto_runtime_pm = true;
1125 master->bus_num = pdev->id;
1126 master->mode_bits = SPI_MODE_3 | SPI_CS_HIGH | SPI_LSB_FIRST |
1127 SPI_3WIRE | SPI_LOOP;
1128 master->bits_per_word_mask = stm32_spi_get_bpw_mask(spi);
1129 master->max_speed_hz = spi->clk_rate / SPI_MBR_DIV_MIN;
1130 master->min_speed_hz = spi->clk_rate / SPI_MBR_DIV_MAX;
1131 master->setup = stm32_spi_setup;
1132 master->prepare_message = stm32_spi_prepare_msg;
1133 master->transfer_one = stm32_spi_transfer_one;
1134 master->unprepare_message = stm32_spi_unprepare_msg;
1135
1136 spi->dma_tx = dma_request_slave_channel(spi->dev, "tx");
1137 if (!spi->dma_tx)
1138 dev_warn(&pdev->dev, "failed to request tx dma channel\n");
1139 else
1140 master->dma_tx = spi->dma_tx;
1141
1142 spi->dma_rx = dma_request_slave_channel(spi->dev, "rx");
1143 if (!spi->dma_rx)
1144 dev_warn(&pdev->dev, "failed to request rx dma channel\n");
1145 else
1146 master->dma_rx = spi->dma_rx;
1147
1148 if (spi->dma_tx || spi->dma_rx)
1149 master->can_dma = stm32_spi_can_dma;
1150
1151 ret = devm_spi_register_master(&pdev->dev, master);
1152 if (ret) {
1153 dev_err(&pdev->dev, "spi master registration failed: %d\n",
1154 ret);
1155 goto err_dma_release;
1156 }
1157
1158 if (!master->cs_gpios) {
1159 dev_err(&pdev->dev, "no CS gpios available\n");
1160 ret = -EINVAL;
1161 goto err_dma_release;
1162 }
1163
1164 for (i = 0; i < master->num_chipselect; i++) {
1165 if (!gpio_is_valid(master->cs_gpios[i])) {
1166 dev_err(&pdev->dev, "%i is not a valid gpio\n",
1167 master->cs_gpios[i]);
1168 ret = -EINVAL;
1169 goto err_dma_release;
1170 }
1171
1172 ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
1173 DRIVER_NAME);
1174 if (ret) {
1175 dev_err(&pdev->dev, "can't get CS gpio %i\n",
1176 master->cs_gpios[i]);
1177 goto err_dma_release;
1178 }
1179 }
1180
1181 dev_info(&pdev->dev, "driver initialized\n");
1182
1183 return 0;
1184
1185err_dma_release:
1186 if (spi->dma_tx)
1187 dma_release_channel(spi->dma_tx);
1188 if (spi->dma_rx)
1189 dma_release_channel(spi->dma_rx);
1190err_clk_disable:
1191 clk_disable_unprepare(spi->clk);
1192err_master_put:
1193 spi_master_put(master);
1194
1195 return ret;
1196}
1197
1198static int stm32_spi_remove(struct platform_device *pdev)
1199{
1200 struct spi_master *master = platform_get_drvdata(pdev);
1201 struct stm32_spi *spi = spi_master_get_devdata(master);
1202
1203 stm32_spi_disable(spi);
1204
1205 if (master->dma_tx)
1206 dma_release_channel(master->dma_tx);
1207 if (master->dma_rx)
1208 dma_release_channel(master->dma_rx);
1209
1210 clk_disable_unprepare(spi->clk);
1211
1212 return 0;
1213}
1214
1215#ifdef CONFIG_PM_SLEEP
1216static int stm32_spi_suspend(struct device *dev)
1217{
1218 struct spi_master *master = dev_get_drvdata(dev);
1219 struct stm32_spi *spi = spi_master_get_devdata(master);
1220 int ret;
1221
1222 ret = spi_master_suspend(master);
1223 if (ret)
1224 return ret;
1225
1226 clk_disable_unprepare(spi->clk);
1227
1228 return ret;
1229}
1230
1231static int stm32_spi_resume(struct device *dev)
1232{
1233 struct spi_master *master = dev_get_drvdata(dev);
1234 struct stm32_spi *spi = spi_master_get_devdata(master);
1235 int ret;
1236
1237 ret = clk_prepare_enable(spi->clk);
1238 if (ret)
1239 return ret;
1240 ret = spi_master_resume(master);
1241 if (ret)
1242 clk_disable_unprepare(spi->clk);
1243
1244 return ret;
1245}
1246#endif
1247
1248static SIMPLE_DEV_PM_OPS(stm32_spi_pm_ops,
1249 stm32_spi_suspend, stm32_spi_resume);
1250
1251static struct platform_driver stm32_spi_driver = {
1252 .probe = stm32_spi_probe,
1253 .remove = stm32_spi_remove,
1254 .driver = {
1255 .name = DRIVER_NAME,
1256 .pm = &stm32_spi_pm_ops,
1257 .of_match_table = stm32_spi_of_match,
1258 },
1259};
1260
1261module_platform_driver(stm32_spi_driver);
1262
1263MODULE_ALIAS("platform:" DRIVER_NAME);
1264MODULE_DESCRIPTION("STMicroelectronics STM32 SPI Controller driver");
1265MODULE_AUTHOR("Amelie Delaunay <amelie.delaunay@st.com>");
1266MODULE_LICENSE("GPL v2");