summaryrefslogtreecommitdiffstats
path: root/drivers/spi
diff options
context:
space:
mode:
authorGirish Mahadevan <girishm@codeaurora.org>2018-10-03 09:44:25 -0400
committerMark Brown <broonie@kernel.org>2018-10-11 10:28:02 -0400
commit561de45f72bd5f9b3f166bdd6151d4d0fe9e6534 (patch)
tree6e13bed954d5fcb0281172ece14bbbd72891d9ed /drivers/spi
parent4b8ce2f707778f436b8f976cecfdcd984d9d546e (diff)
spi: spi-geni-qcom: Add SPI driver support for GENI based QUP
This driver supports GENI based SPI Controller in the Qualcomm SOCs. The Qualcomm Generic Interface (GENI) is a programmable module supporting a wide range of serial interfaces including SPI. This driver supports SPI operations using FIFO mode of transfer. Signed-off-by: Girish Mahadevan <girishm@codeaurora.org> Signed-off-by: Dilip Kota <dkota@codeaurora.org> Signed-off-by: Alok Chauhan <alokc@codeaurora.org> Reviewed-by: Douglas Anderson <dianders@chromium.org> Tested-by: Douglas Anderson <dianders@chromium.org> Reviewed-by: Stephen Boyd <swboyd@chromium.org> Signed-off-by: Mark Brown <broonie@kernel.org>
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig12
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-geni-qcom.c703
3 files changed, 716 insertions, 0 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 881d499c69b2..7feab4eb9857 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -539,6 +539,18 @@ config SPI_QUP
539 This driver can also be built as a module. If so, the module 539 This driver can also be built as a module. If so, the module
540 will be called spi_qup. 540 will be called spi_qup.
541 541
542config SPI_QCOM_GENI
543 tristate "Qualcomm GENI based SPI controller"
544 depends on QCOM_GENI_SE
545 help
546 This driver supports GENI serial engine based SPI controller in
547 master mode on the Qualcomm Technologies Inc.'s SoCs. If you say
548 yes to this option, support will be included for the built-in SPI
549 interface on the Qualcomm Technologies Inc.'s SoCs.
550
551 This driver can also be built as a module. If so, the module
552 will be called spi-geni-qcom.
553
542config SPI_S3C24XX 554config SPI_S3C24XX
543 tristate "Samsung S3C24XX series SPI" 555 tristate "Samsung S3C24XX series SPI"
544 depends on ARCH_S3C24XX 556 depends on ARCH_S3C24XX
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 0eb8b185d8e5..7e7df53fc73a 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -74,6 +74,7 @@ obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
74spi-pxa2xx-platform-objs := spi-pxa2xx.o spi-pxa2xx-dma.o 74spi-pxa2xx-platform-objs := spi-pxa2xx.o spi-pxa2xx-dma.o
75obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o 75obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
76obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o 76obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
77obj-$(CONFIG_SPI_QCOM_GENI) += spi-geni-qcom.o
77obj-$(CONFIG_SPI_QCOM_QSPI) += spi-qcom-qspi.o 78obj-$(CONFIG_SPI_QCOM_QSPI) += spi-qcom-qspi.o
78obj-$(CONFIG_SPI_QUP) += spi-qup.o 79obj-$(CONFIG_SPI_QUP) += spi-qup.o
79obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o 80obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
new file mode 100644
index 000000000000..6432ecc4e2ca
--- /dev/null
+++ b/drivers/spi/spi-geni-qcom.c
@@ -0,0 +1,703 @@
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
3
4#include <linux/clk.h>
5#include <linux/interrupt.h>
6#include <linux/io.h>
7#include <linux/log2.h>
8#include <linux/module.h>
9#include <linux/of.h>
10#include <linux/platform_device.h>
11#include <linux/pm_runtime.h>
12#include <linux/qcom-geni-se.h>
13#include <linux/spi/spi.h>
14#include <linux/spinlock.h>
15
16/* SPI SE specific registers and respective register fields */
17#define SE_SPI_CPHA 0x224
18#define CPHA BIT(0)
19
20#define SE_SPI_LOOPBACK 0x22c
21#define LOOPBACK_ENABLE 0x1
22#define NORMAL_MODE 0x0
23#define LOOPBACK_MSK GENMASK(1, 0)
24
25#define SE_SPI_CPOL 0x230
26#define CPOL BIT(2)
27
28#define SE_SPI_DEMUX_OUTPUT_INV 0x24c
29#define CS_DEMUX_OUTPUT_INV_MSK GENMASK(3, 0)
30
31#define SE_SPI_DEMUX_SEL 0x250
32#define CS_DEMUX_OUTPUT_SEL GENMASK(3, 0)
33
34#define SE_SPI_TRANS_CFG 0x25c
35#define CS_TOGGLE BIT(0)
36
37#define SE_SPI_WORD_LEN 0x268
38#define WORD_LEN_MSK GENMASK(9, 0)
39#define MIN_WORD_LEN 4
40
41#define SE_SPI_TX_TRANS_LEN 0x26c
42#define SE_SPI_RX_TRANS_LEN 0x270
43#define TRANS_LEN_MSK GENMASK(23, 0)
44
45#define SE_SPI_PRE_POST_CMD_DLY 0x274
46
47#define SE_SPI_DELAY_COUNTERS 0x278
48#define SPI_INTER_WORDS_DELAY_MSK GENMASK(9, 0)
49#define SPI_CS_CLK_DELAY_MSK GENMASK(19, 10)
50#define SPI_CS_CLK_DELAY_SHFT 10
51
52/* M_CMD OP codes for SPI */
53#define SPI_TX_ONLY 1
54#define SPI_RX_ONLY 2
55#define SPI_FULL_DUPLEX 3
56#define SPI_TX_RX 7
57#define SPI_CS_ASSERT 8
58#define SPI_CS_DEASSERT 9
59#define SPI_SCK_ONLY 10
60/* M_CMD params for SPI */
61#define SPI_PRE_CMD_DELAY BIT(0)
62#define TIMESTAMP_BEFORE BIT(1)
63#define FRAGMENTATION BIT(2)
64#define TIMESTAMP_AFTER BIT(3)
65#define POST_CMD_DELAY BIT(4)
66
67/* SPI M_COMMAND OPCODE */
68enum spi_mcmd_code {
69 CMD_NONE,
70 CMD_XFER,
71 CMD_CS,
72 CMD_CANCEL,
73};
74
75
76struct spi_geni_master {
77 struct geni_se se;
78 struct device *dev;
79 u32 tx_fifo_depth;
80 u32 fifo_width_bits;
81 u32 tx_wm;
82 unsigned long cur_speed_hz;
83 unsigned int cur_bits_per_word;
84 unsigned int tx_rem_bytes;
85 unsigned int rx_rem_bytes;
86 const struct spi_transfer *cur_xfer;
87 struct completion xfer_done;
88 unsigned int oversampling;
89 spinlock_t lock;
90 unsigned int cur_mcmd;
91 int irq;
92};
93
94static void handle_fifo_timeout(struct spi_master *spi,
95 struct spi_message *msg);
96
97static int get_spi_clk_cfg(unsigned int speed_hz,
98 struct spi_geni_master *mas,
99 unsigned int *clk_idx,
100 unsigned int *clk_div)
101{
102 unsigned long sclk_freq;
103 unsigned int actual_hz;
104 struct geni_se *se = &mas->se;
105 int ret;
106
107 ret = geni_se_clk_freq_match(&mas->se,
108 speed_hz * mas->oversampling,
109 clk_idx, &sclk_freq, false);
110 if (ret) {
111 dev_err(mas->dev, "Failed(%d) to find src clk for %dHz\n",
112 ret, speed_hz);
113 return ret;
114 }
115
116 *clk_div = DIV_ROUND_UP(sclk_freq, mas->oversampling * speed_hz);
117 actual_hz = sclk_freq / (mas->oversampling * *clk_div);
118
119 dev_dbg(mas->dev, "req %u=>%u sclk %lu, idx %d, div %d\n", speed_hz,
120 actual_hz, sclk_freq, *clk_idx, *clk_div);
121 ret = clk_set_rate(se->clk, sclk_freq);
122 if (ret)
123 dev_err(mas->dev, "clk_set_rate failed %d\n", ret);
124 return ret;
125}
126
127static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
128{
129 struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
130 struct spi_master *spi = dev_get_drvdata(mas->dev);
131 struct geni_se *se = &mas->se;
132 unsigned long timeout;
133
134 reinit_completion(&mas->xfer_done);
135 pm_runtime_get_sync(mas->dev);
136 if (!(slv->mode & SPI_CS_HIGH))
137 set_flag = !set_flag;
138
139 mas->cur_mcmd = CMD_CS;
140 if (set_flag)
141 geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
142 else
143 geni_se_setup_m_cmd(se, SPI_CS_DEASSERT, 0);
144
145 timeout = wait_for_completion_timeout(&mas->xfer_done, HZ);
146 if (!timeout)
147 handle_fifo_timeout(spi, NULL);
148
149 pm_runtime_put(mas->dev);
150}
151
152static void spi_setup_word_len(struct spi_geni_master *mas, u16 mode,
153 unsigned int bits_per_word)
154{
155 unsigned int pack_words;
156 bool msb_first = (mode & SPI_LSB_FIRST) ? false : true;
157 struct geni_se *se = &mas->se;
158 u32 word_len;
159
160 word_len = readl(se->base + SE_SPI_WORD_LEN);
161
162 /*
163 * If bits_per_word isn't a byte aligned value, set the packing to be
164 * 1 SPI word per FIFO word.
165 */
166 if (!(mas->fifo_width_bits % bits_per_word))
167 pack_words = mas->fifo_width_bits / bits_per_word;
168 else
169 pack_words = 1;
170 word_len &= ~WORD_LEN_MSK;
171 word_len |= ((bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK);
172 geni_se_config_packing(&mas->se, bits_per_word, pack_words, msb_first,
173 true, true);
174 writel(word_len, se->base + SE_SPI_WORD_LEN);
175}
176
177static int setup_fifo_params(struct spi_device *spi_slv,
178 struct spi_master *spi)
179{
180 struct spi_geni_master *mas = spi_master_get_devdata(spi);
181 struct geni_se *se = &mas->se;
182 u32 loopback_cfg, cpol, cpha, demux_output_inv;
183 u32 demux_sel, clk_sel, m_clk_cfg, idx, div;
184 int ret;
185
186 loopback_cfg = readl(se->base + SE_SPI_LOOPBACK);
187 cpol = readl(se->base + SE_SPI_CPOL);
188 cpha = readl(se->base + SE_SPI_CPHA);
189 demux_output_inv = 0;
190 loopback_cfg &= ~LOOPBACK_MSK;
191 cpol &= ~CPOL;
192 cpha &= ~CPHA;
193
194 if (spi_slv->mode & SPI_LOOP)
195 loopback_cfg |= LOOPBACK_ENABLE;
196
197 if (spi_slv->mode & SPI_CPOL)
198 cpol |= CPOL;
199
200 if (spi_slv->mode & SPI_CPHA)
201 cpha |= CPHA;
202
203 if (spi_slv->mode & SPI_CS_HIGH)
204 demux_output_inv = BIT(spi_slv->chip_select);
205
206 demux_sel = spi_slv->chip_select;
207 mas->cur_speed_hz = spi_slv->max_speed_hz;
208 mas->cur_bits_per_word = spi_slv->bits_per_word;
209
210 ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, &idx, &div);
211 if (ret) {
212 dev_err(mas->dev, "Err setting clks ret(%d) for %ld\n",
213 ret, mas->cur_speed_hz);
214 return ret;
215 }
216
217 clk_sel = idx & CLK_SEL_MSK;
218 m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN;
219 spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
220 writel(loopback_cfg, se->base + SE_SPI_LOOPBACK);
221 writel(demux_sel, se->base + SE_SPI_DEMUX_SEL);
222 writel(cpha, se->base + SE_SPI_CPHA);
223 writel(cpol, se->base + SE_SPI_CPOL);
224 writel(demux_output_inv, se->base + SE_SPI_DEMUX_OUTPUT_INV);
225 writel(clk_sel, se->base + SE_GENI_CLK_SEL);
226 writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
227 return 0;
228}
229
230static int spi_geni_prepare_message(struct spi_master *spi,
231 struct spi_message *spi_msg)
232{
233 int ret;
234 struct spi_geni_master *mas = spi_master_get_devdata(spi);
235 struct geni_se *se = &mas->se;
236
237 geni_se_select_mode(se, GENI_SE_FIFO);
238 reinit_completion(&mas->xfer_done);
239 ret = setup_fifo_params(spi_msg->spi, spi);
240 if (ret)
241 dev_err(mas->dev, "Couldn't select mode %d\n", ret);
242 return ret;
243}
244
245static int spi_geni_init(struct spi_geni_master *mas)
246{
247 struct geni_se *se = &mas->se;
248 unsigned int proto, major, minor, ver;
249
250 pm_runtime_get_sync(mas->dev);
251
252 proto = geni_se_read_proto(se);
253 if (proto != GENI_SE_SPI) {
254 dev_err(mas->dev, "Invalid proto %d\n", proto);
255 pm_runtime_put(mas->dev);
256 return -ENXIO;
257 }
258 mas->tx_fifo_depth = geni_se_get_tx_fifo_depth(se);
259
260 /* Width of Tx and Rx FIFO is same */
261 mas->fifo_width_bits = geni_se_get_tx_fifo_width(se);
262
263 /*
264 * Hardware programming guide suggests to configure
265 * RX FIFO RFR level to fifo_depth-2.
266 */
267 geni_se_init(se, 0x0, mas->tx_fifo_depth - 2);
268 /* Transmit an entire FIFO worth of data per IRQ */
269 mas->tx_wm = 1;
270 ver = geni_se_get_qup_hw_version(se);
271 major = GENI_SE_VERSION_MAJOR(ver);
272 minor = GENI_SE_VERSION_MINOR(ver);
273
274 if (major == 1 && minor == 0)
275 mas->oversampling = 2;
276 else
277 mas->oversampling = 1;
278
279 pm_runtime_put(mas->dev);
280 return 0;
281}
282
283static void setup_fifo_xfer(struct spi_transfer *xfer,
284 struct spi_geni_master *mas,
285 u16 mode, struct spi_master *spi)
286{
287 u32 m_cmd = 0;
288 u32 spi_tx_cfg, len;
289 struct geni_se *se = &mas->se;
290
291 spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
292 if (xfer->bits_per_word != mas->cur_bits_per_word) {
293 spi_setup_word_len(mas, mode, xfer->bits_per_word);
294 mas->cur_bits_per_word = xfer->bits_per_word;
295 }
296
297 /* Speed and bits per word can be overridden per transfer */
298 if (xfer->speed_hz != mas->cur_speed_hz) {
299 int ret;
300 u32 clk_sel, m_clk_cfg;
301 unsigned int idx, div;
302
303 ret = get_spi_clk_cfg(xfer->speed_hz, mas, &idx, &div);
304 if (ret) {
305 dev_err(mas->dev, "Err setting clks:%d\n", ret);
306 return;
307 }
308 /*
309 * SPI core clock gets configured with the requested frequency
310 * or the frequency closer to the requested frequency.
311 * For that reason requested frequency is stored in the
312 * cur_speed_hz and referred in the consecutive transfer instead
313 * of calling clk_get_rate() API.
314 */
315 mas->cur_speed_hz = xfer->speed_hz;
316 clk_sel = idx & CLK_SEL_MSK;
317 m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN;
318 writel(clk_sel, se->base + SE_GENI_CLK_SEL);
319 writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
320 }
321
322 mas->tx_rem_bytes = 0;
323 mas->rx_rem_bytes = 0;
324 if (xfer->tx_buf && xfer->rx_buf)
325 m_cmd = SPI_FULL_DUPLEX;
326 else if (xfer->tx_buf)
327 m_cmd = SPI_TX_ONLY;
328 else if (xfer->rx_buf)
329 m_cmd = SPI_RX_ONLY;
330
331 spi_tx_cfg &= ~CS_TOGGLE;
332
333 if (!(mas->cur_bits_per_word % MIN_WORD_LEN))
334 len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word;
335 else
336 len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
337 len &= TRANS_LEN_MSK;
338
339 mas->cur_xfer = xfer;
340 if (m_cmd & SPI_TX_ONLY) {
341 mas->tx_rem_bytes = xfer->len;
342 writel(len, se->base + SE_SPI_TX_TRANS_LEN);
343 }
344
345 if (m_cmd & SPI_RX_ONLY) {
346 writel(len, se->base + SE_SPI_RX_TRANS_LEN);
347 mas->rx_rem_bytes = xfer->len;
348 }
349 writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
350 mas->cur_mcmd = CMD_XFER;
351 geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
352
353 /*
354 * TX_WATERMARK_REG should be set after SPI configuration and
355 * setting up GENI SE engine, as driver starts data transfer
356 * for the watermark interrupt.
357 */
358 if (m_cmd & SPI_TX_ONLY)
359 writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
360}
361
362static void handle_fifo_timeout(struct spi_master *spi,
363 struct spi_message *msg)
364{
365 struct spi_geni_master *mas = spi_master_get_devdata(spi);
366 unsigned long time_left, flags;
367 struct geni_se *se = &mas->se;
368
369 spin_lock_irqsave(&mas->lock, flags);
370 reinit_completion(&mas->xfer_done);
371 mas->cur_mcmd = CMD_CANCEL;
372 geni_se_cancel_m_cmd(se);
373 writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
374 spin_unlock_irqrestore(&mas->lock, flags);
375 time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
376 if (time_left)
377 return;
378
379 spin_lock_irqsave(&mas->lock, flags);
380 reinit_completion(&mas->xfer_done);
381 geni_se_abort_m_cmd(se);
382 spin_unlock_irqrestore(&mas->lock, flags);
383 time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
384 if (!time_left)
385 dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
386}
387
388static int spi_geni_transfer_one(struct spi_master *spi,
389 struct spi_device *slv,
390 struct spi_transfer *xfer)
391{
392 struct spi_geni_master *mas = spi_master_get_devdata(spi);
393
394 /* Terminate and return success for 0 byte length transfer */
395 if (!xfer->len)
396 return 0;
397
398 setup_fifo_xfer(xfer, mas, slv->mode, spi);
399 return 1;
400}
401
402static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas)
403{
404 /*
405 * Calculate how many bytes we'll put in each FIFO word. If the
406 * transfer words don't pack cleanly into a FIFO word we'll just put
407 * one transfer word in each FIFO word. If they do pack we'll pack 'em.
408 */
409 if (mas->fifo_width_bits % mas->cur_bits_per_word)
410 return roundup_pow_of_two(DIV_ROUND_UP(mas->cur_bits_per_word,
411 BITS_PER_BYTE));
412
413 return mas->fifo_width_bits / BITS_PER_BYTE;
414}
415
416static void geni_spi_handle_tx(struct spi_geni_master *mas)
417{
418 struct geni_se *se = &mas->se;
419 unsigned int max_bytes;
420 const u8 *tx_buf;
421 unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
422 unsigned int i = 0;
423
424 max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
425 if (mas->tx_rem_bytes < max_bytes)
426 max_bytes = mas->tx_rem_bytes;
427
428 tx_buf = mas->cur_xfer->tx_buf + mas->cur_xfer->len - mas->tx_rem_bytes;
429 while (i < max_bytes) {
430 unsigned int j;
431 unsigned int bytes_to_write;
432 u32 fifo_word = 0;
433 u8 *fifo_byte = (u8 *)&fifo_word;
434
435 bytes_to_write = min(bytes_per_fifo_word, max_bytes - i);
436 for (j = 0; j < bytes_to_write; j++)
437 fifo_byte[j] = tx_buf[i++];
438 iowrite32_rep(se->base + SE_GENI_TX_FIFOn, &fifo_word, 1);
439 }
440 mas->tx_rem_bytes -= max_bytes;
441 if (!mas->tx_rem_bytes)
442 writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
443}
444
445static void geni_spi_handle_rx(struct spi_geni_master *mas)
446{
447 struct geni_se *se = &mas->se;
448 u32 rx_fifo_status;
449 unsigned int rx_bytes;
450 unsigned int rx_last_byte_valid;
451 u8 *rx_buf;
452 unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
453 unsigned int i = 0;
454
455 rx_fifo_status = readl(se->base + SE_GENI_RX_FIFO_STATUS);
456 rx_bytes = (rx_fifo_status & RX_FIFO_WC_MSK) * bytes_per_fifo_word;
457 if (rx_fifo_status & RX_LAST) {
458 rx_last_byte_valid = rx_fifo_status & RX_LAST_BYTE_VALID_MSK;
459 rx_last_byte_valid >>= RX_LAST_BYTE_VALID_SHFT;
460 if (rx_last_byte_valid && rx_last_byte_valid < 4)
461 rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
462 }
463 if (mas->rx_rem_bytes < rx_bytes)
464 rx_bytes = mas->rx_rem_bytes;
465
466 rx_buf = mas->cur_xfer->rx_buf + mas->cur_xfer->len - mas->rx_rem_bytes;
467 while (i < rx_bytes) {
468 u32 fifo_word = 0;
469 u8 *fifo_byte = (u8 *)&fifo_word;
470 unsigned int bytes_to_read;
471 unsigned int j;
472
473 bytes_to_read = min(bytes_per_fifo_word, rx_bytes - i);
474 ioread32_rep(se->base + SE_GENI_RX_FIFOn, &fifo_word, 1);
475 for (j = 0; j < bytes_to_read; j++)
476 rx_buf[i++] = fifo_byte[j];
477 }
478 mas->rx_rem_bytes -= rx_bytes;
479}
480
481static irqreturn_t geni_spi_isr(int irq, void *data)
482{
483 struct spi_master *spi = data;
484 struct spi_geni_master *mas = spi_master_get_devdata(spi);
485 struct geni_se *se = &mas->se;
486 u32 m_irq;
487 unsigned long flags;
488 irqreturn_t ret = IRQ_HANDLED;
489
490 if (mas->cur_mcmd == CMD_NONE)
491 return IRQ_NONE;
492
493 spin_lock_irqsave(&mas->lock, flags);
494 m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
495
496 if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
497 geni_spi_handle_rx(mas);
498
499 if (m_irq & M_TX_FIFO_WATERMARK_EN)
500 geni_spi_handle_tx(mas);
501
502 if (m_irq & M_CMD_DONE_EN) {
503 if (mas->cur_mcmd == CMD_XFER)
504 spi_finalize_current_transfer(spi);
505 else if (mas->cur_mcmd == CMD_CS)
506 complete(&mas->xfer_done);
507 mas->cur_mcmd = CMD_NONE;
508 /*
509 * If this happens, then a CMD_DONE came before all the Tx
510 * buffer bytes were sent out. This is unusual, log this
511 * condition and disable the WM interrupt to prevent the
512 * system from stalling due an interrupt storm.
513 * If this happens when all Rx bytes haven't been received, log
514 * the condition.
515 * The only known time this can happen is if bits_per_word != 8
516 * and some registers that expect xfer lengths in num spi_words
517 * weren't written correctly.
518 */
519 if (mas->tx_rem_bytes) {
520 writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
521 dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
522 mas->tx_rem_bytes, mas->cur_bits_per_word);
523 }
524 if (mas->rx_rem_bytes)
525 dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
526 mas->rx_rem_bytes, mas->cur_bits_per_word);
527 }
528
529 if ((m_irq & M_CMD_CANCEL_EN) || (m_irq & M_CMD_ABORT_EN)) {
530 mas->cur_mcmd = CMD_NONE;
531 complete(&mas->xfer_done);
532 }
533
534 writel(m_irq, se->base + SE_GENI_M_IRQ_CLEAR);
535 spin_unlock_irqrestore(&mas->lock, flags);
536 return ret;
537}
538
539static int spi_geni_probe(struct platform_device *pdev)
540{
541 int ret;
542 struct spi_master *spi;
543 struct spi_geni_master *mas;
544 struct resource *res;
545 struct geni_se *se;
546
547 spi = spi_alloc_master(&pdev->dev, sizeof(*mas));
548 if (!spi)
549 return -ENOMEM;
550
551 platform_set_drvdata(pdev, spi);
552 mas = spi_master_get_devdata(spi);
553 mas->dev = &pdev->dev;
554 mas->se.dev = &pdev->dev;
555 mas->se.wrapper = dev_get_drvdata(pdev->dev.parent);
556 se = &mas->se;
557
558 spi->bus_num = -1;
559 spi->dev.of_node = pdev->dev.of_node;
560 mas->se.clk = devm_clk_get(&pdev->dev, "se");
561 if (IS_ERR(mas->se.clk)) {
562 ret = PTR_ERR(mas->se.clk);
563 dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
564 goto spi_geni_probe_err;
565 }
566
567 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
568 se->base = devm_ioremap_resource(&pdev->dev, res);
569 if (IS_ERR(se->base)) {
570 ret = PTR_ERR(se->base);
571 goto spi_geni_probe_err;
572 }
573
574 spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
575 spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
576 spi->num_chipselect = 4;
577 spi->max_speed_hz = 50000000;
578 spi->prepare_message = spi_geni_prepare_message;
579 spi->transfer_one = spi_geni_transfer_one;
580 spi->auto_runtime_pm = true;
581 spi->handle_err = handle_fifo_timeout;
582 spi->set_cs = spi_geni_set_cs;
583
584 init_completion(&mas->xfer_done);
585 spin_lock_init(&mas->lock);
586 pm_runtime_enable(&pdev->dev);
587
588 ret = spi_geni_init(mas);
589 if (ret)
590 goto spi_geni_probe_runtime_disable;
591
592 mas->irq = platform_get_irq(pdev, 0);
593 if (mas->irq < 0) {
594 ret = mas->irq;
595 dev_err(&pdev->dev, "Err getting IRQ %d\n", ret);
596 goto spi_geni_probe_runtime_disable;
597 }
598
599 ret = request_irq(mas->irq, geni_spi_isr,
600 IRQF_TRIGGER_HIGH, "spi_geni", spi);
601 if (ret)
602 goto spi_geni_probe_runtime_disable;
603
604 ret = spi_register_master(spi);
605 if (ret)
606 goto spi_geni_probe_free_irq;
607
608 return 0;
609spi_geni_probe_free_irq:
610 free_irq(mas->irq, spi);
611spi_geni_probe_runtime_disable:
612 pm_runtime_disable(&pdev->dev);
613spi_geni_probe_err:
614 spi_master_put(spi);
615 return ret;
616}
617
618static int spi_geni_remove(struct platform_device *pdev)
619{
620 struct spi_master *spi = platform_get_drvdata(pdev);
621 struct spi_geni_master *mas = spi_master_get_devdata(spi);
622
623 /* Unregister _before_ disabling pm_runtime() so we stop transfers */
624 spi_unregister_master(spi);
625
626 free_irq(mas->irq, spi);
627 pm_runtime_disable(&pdev->dev);
628 return 0;
629}
630
631static int __maybe_unused spi_geni_runtime_suspend(struct device *dev)
632{
633 struct spi_master *spi = dev_get_drvdata(dev);
634 struct spi_geni_master *mas = spi_master_get_devdata(spi);
635
636 return geni_se_resources_off(&mas->se);
637}
638
639static int __maybe_unused spi_geni_runtime_resume(struct device *dev)
640{
641 struct spi_master *spi = dev_get_drvdata(dev);
642 struct spi_geni_master *mas = spi_master_get_devdata(spi);
643
644 return geni_se_resources_on(&mas->se);
645}
646
647static int __maybe_unused spi_geni_suspend(struct device *dev)
648{
649 struct spi_master *spi = dev_get_drvdata(dev);
650 int ret;
651
652 ret = spi_master_suspend(spi);
653 if (ret)
654 return ret;
655
656 ret = pm_runtime_force_suspend(dev);
657 if (ret)
658 spi_master_resume(spi);
659
660 return ret;
661}
662
663static int __maybe_unused spi_geni_resume(struct device *dev)
664{
665 struct spi_master *spi = dev_get_drvdata(dev);
666 int ret;
667
668 ret = pm_runtime_force_resume(dev);
669 if (ret)
670 return ret;
671
672 ret = spi_master_resume(spi);
673 if (ret)
674 pm_runtime_force_suspend(dev);
675
676 return ret;
677}
678
679static const struct dev_pm_ops spi_geni_pm_ops = {
680 SET_RUNTIME_PM_OPS(spi_geni_runtime_suspend,
681 spi_geni_runtime_resume, NULL)
682 SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume)
683};
684
685static const struct of_device_id spi_geni_dt_match[] = {
686 { .compatible = "qcom,geni-spi" },
687 {}
688};
689MODULE_DEVICE_TABLE(of, spi_geni_dt_match);
690
691static struct platform_driver spi_geni_driver = {
692 .probe = spi_geni_probe,
693 .remove = spi_geni_remove,
694 .driver = {
695 .name = "geni_spi",
696 .pm = &spi_geni_pm_ops,
697 .of_match_table = spi_geni_dt_match,
698 },
699};
700module_platform_driver(spi_geni_driver);
701
702MODULE_DESCRIPTION("SPI driver for GENI based QUP cores");
703MODULE_LICENSE("GPL v2");