diff options
author | Purna Chandra Mandal <purna.mandal@microchip.com> | 2016-04-15 07:27:19 -0400 |
---|---|---|
committer | Mark Brown <broonie@kernel.org> | 2016-04-18 12:52:46 -0400 |
commit | 3270ac230f660843a7f7d631746ee2c8ee63f347 (patch) | |
tree | 86baf5cd304e50247ca4ac4bb1cf392f6515a92e /drivers | |
parent | 0a4afaae989c47fd93b73cc83d2c4a46b55aa1b7 (diff) |
spi: pic32-sqi: add SPI driver for PIC32 SQI controller.
This driver implements SPI master interface for Quad SPI
controller, specifically for accessing quad SPI flash.
It uses descriptor-based DMA transfer mode and supports
half-duplex communication for single, dual and quad SPI
transactions.
Signed-off-by: Purna Chandra Mandal <purna.mandal@microchip.com>
Cc: Mark Brown <broonie@kernel.org>
Signed-off-by: Mark Brown <broonie@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/spi/Kconfig | 6 | ||||
-rw-r--r-- | drivers/spi/Makefile | 1 | ||||
-rw-r--r-- | drivers/spi/spi-pic32-sqi.c | 768 |
3 files changed, 775 insertions, 0 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 8a8ff5051c64..281ed5da4832 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -442,6 +442,12 @@ config SPI_PIC32 | |||
442 | help | 442 | help |
443 | SPI driver for Microchip PIC32 SPI master controller. | 443 | SPI driver for Microchip PIC32 SPI master controller. |
444 | 444 | ||
445 | config SPI_PIC32_SQI | ||
446 | tristate "Microchip PIC32 Quad SPI driver" | ||
447 | depends on MACH_PIC32 || COMPILE_TEST | ||
448 | help | ||
449 | SPI driver for PIC32 Quad SPI controller. | ||
450 | |||
445 | config SPI_PL022 | 451 | config SPI_PL022 |
446 | tristate "ARM AMBA PL022 SSP controller" | 452 | tristate "ARM AMBA PL022 SSP controller" |
447 | depends on ARM_AMBA | 453 | depends on ARM_AMBA |
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 06019ed11fac..3c74d003535b 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile | |||
@@ -63,6 +63,7 @@ obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o | |||
63 | obj-$(CONFIG_SPI_TI_QSPI) += spi-ti-qspi.o | 63 | obj-$(CONFIG_SPI_TI_QSPI) += spi-ti-qspi.o |
64 | obj-$(CONFIG_SPI_ORION) += spi-orion.o | 64 | obj-$(CONFIG_SPI_ORION) += spi-orion.o |
65 | obj-$(CONFIG_SPI_PIC32) += spi-pic32.o | 65 | obj-$(CONFIG_SPI_PIC32) += spi-pic32.o |
66 | obj-$(CONFIG_SPI_PIC32_SQI) += spi-pic32-sqi.o | ||
66 | obj-$(CONFIG_SPI_PL022) += spi-pl022.o | 67 | obj-$(CONFIG_SPI_PL022) += spi-pl022.o |
67 | obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o | 68 | obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o |
68 | spi-pxa2xx-platform-objs := spi-pxa2xx.o spi-pxa2xx-dma.o | 69 | spi-pxa2xx-platform-objs := spi-pxa2xx.o spi-pxa2xx-dma.o |
diff --git a/drivers/spi/spi-pic32-sqi.c b/drivers/spi/spi-pic32-sqi.c new file mode 100644 index 000000000000..b21534782ada --- /dev/null +++ b/drivers/spi/spi-pic32-sqi.c | |||
@@ -0,0 +1,768 @@ | |||
1 | /* | ||
2 | * PIC32 Quad SPI controller driver. | ||
3 | * | ||
4 | * Purna Chandra Mandal <purna.mandal@microchip.com> | ||
5 | * Copyright (c) 2016, Microchip Technology Inc. | ||
6 | * | ||
7 | * This program is free software; you can distribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License (Version 2) as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
14 | * for more details. | ||
15 | */ | ||
16 | |||
17 | #include <linux/clk.h> | ||
18 | #include <linux/dma-mapping.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/iopoll.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/of.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/spi/spi.h> | ||
27 | |||
28 | /* SQI registers */ | ||
29 | #define PESQI_XIP_CONF1_REG 0x00 | ||
30 | #define PESQI_XIP_CONF2_REG 0x04 | ||
31 | #define PESQI_CONF_REG 0x08 | ||
32 | #define PESQI_CTRL_REG 0x0C | ||
33 | #define PESQI_CLK_CTRL_REG 0x10 | ||
34 | #define PESQI_CMD_THRES_REG 0x14 | ||
35 | #define PESQI_INT_THRES_REG 0x18 | ||
36 | #define PESQI_INT_ENABLE_REG 0x1C | ||
37 | #define PESQI_INT_STAT_REG 0x20 | ||
38 | #define PESQI_TX_DATA_REG 0x24 | ||
39 | #define PESQI_RX_DATA_REG 0x28 | ||
40 | #define PESQI_STAT1_REG 0x2C | ||
41 | #define PESQI_STAT2_REG 0x30 | ||
42 | #define PESQI_BD_CTRL_REG 0x34 | ||
43 | #define PESQI_BD_CUR_ADDR_REG 0x38 | ||
44 | #define PESQI_BD_BASE_ADDR_REG 0x40 | ||
45 | #define PESQI_BD_STAT_REG 0x44 | ||
46 | #define PESQI_BD_POLL_CTRL_REG 0x48 | ||
47 | #define PESQI_BD_TX_DMA_STAT_REG 0x4C | ||
48 | #define PESQI_BD_RX_DMA_STAT_REG 0x50 | ||
49 | #define PESQI_THRES_REG 0x54 | ||
50 | #define PESQI_INT_SIGEN_REG 0x58 | ||
51 | |||
52 | /* PESQI_CONF_REG fields */ | ||
53 | #define PESQI_MODE 0x7 | ||
54 | #define PESQI_MODE_BOOT 0 | ||
55 | #define PESQI_MODE_PIO 1 | ||
56 | #define PESQI_MODE_DMA 2 | ||
57 | #define PESQI_MODE_XIP 3 | ||
58 | #define PESQI_MODE_SHIFT 0 | ||
59 | #define PESQI_CPHA BIT(3) | ||
60 | #define PESQI_CPOL BIT(4) | ||
61 | #define PESQI_LSBF BIT(5) | ||
62 | #define PESQI_RXLATCH BIT(7) | ||
63 | #define PESQI_SERMODE BIT(8) | ||
64 | #define PESQI_WP_EN BIT(9) | ||
65 | #define PESQI_HOLD_EN BIT(10) | ||
66 | #define PESQI_BURST_EN BIT(12) | ||
67 | #define PESQI_CS_CTRL_HW BIT(15) | ||
68 | #define PESQI_SOFT_RESET BIT(16) | ||
69 | #define PESQI_LANES_SHIFT 20 | ||
70 | #define PESQI_SINGLE_LANE 0 | ||
71 | #define PESQI_DUAL_LANE 1 | ||
72 | #define PESQI_QUAD_LANE 2 | ||
73 | #define PESQI_CSEN_SHIFT 24 | ||
74 | #define PESQI_EN BIT(23) | ||
75 | |||
76 | /* PESQI_CLK_CTRL_REG fields */ | ||
77 | #define PESQI_CLK_EN BIT(0) | ||
78 | #define PESQI_CLK_STABLE BIT(1) | ||
79 | #define PESQI_CLKDIV_SHIFT 8 | ||
80 | #define PESQI_CLKDIV 0xff | ||
81 | |||
82 | /* PESQI_INT_THR/CMD_THR_REG */ | ||
83 | #define PESQI_TXTHR_MASK 0x1f | ||
84 | #define PESQI_TXTHR_SHIFT 8 | ||
85 | #define PESQI_RXTHR_MASK 0x1f | ||
86 | #define PESQI_RXTHR_SHIFT 0 | ||
87 | |||
88 | /* PESQI_INT_EN/INT_STAT/INT_SIG_EN_REG */ | ||
89 | #define PESQI_TXEMPTY BIT(0) | ||
90 | #define PESQI_TXFULL BIT(1) | ||
91 | #define PESQI_TXTHR BIT(2) | ||
92 | #define PESQI_RXEMPTY BIT(3) | ||
93 | #define PESQI_RXFULL BIT(4) | ||
94 | #define PESQI_RXTHR BIT(5) | ||
95 | #define PESQI_BDDONE BIT(9) /* BD processing complete */ | ||
96 | #define PESQI_PKTCOMP BIT(10) /* packet processing complete */ | ||
97 | #define PESQI_DMAERR BIT(11) /* error */ | ||
98 | |||
99 | /* PESQI_BD_CTRL_REG */ | ||
100 | #define PESQI_DMA_EN BIT(0) /* enable DMA engine */ | ||
101 | #define PESQI_POLL_EN BIT(1) /* enable polling */ | ||
102 | #define PESQI_BDP_START BIT(2) /* start BD processor */ | ||
103 | |||
104 | /* PESQI controller buffer descriptor */ | ||
105 | struct buf_desc { | ||
106 | u32 bd_ctrl; /* control */ | ||
107 | u32 bd_status; /* reserved */ | ||
108 | u32 bd_addr; /* DMA buffer addr */ | ||
109 | u32 bd_nextp; /* next item in chain */ | ||
110 | }; | ||
111 | |||
112 | /* bd_ctrl */ | ||
113 | #define BD_BUFLEN 0x1ff | ||
114 | #define BD_CBD_INT_EN BIT(16) /* Current BD is processed */ | ||
115 | #define BD_PKT_INT_EN BIT(17) /* All BDs of PKT processed */ | ||
116 | #define BD_LIFM BIT(18) /* last data of pkt */ | ||
117 | #define BD_LAST BIT(19) /* end of list */ | ||
118 | #define BD_DATA_RECV BIT(20) /* receive data */ | ||
119 | #define BD_DDR BIT(21) /* DDR mode */ | ||
120 | #define BD_DUAL BIT(22) /* Dual SPI */ | ||
121 | #define BD_QUAD BIT(23) /* Quad SPI */ | ||
122 | #define BD_LSBF BIT(25) /* LSB First */ | ||
123 | #define BD_STAT_CHECK BIT(27) /* Status poll */ | ||
124 | #define BD_DEVSEL_SHIFT 28 /* CS */ | ||
125 | #define BD_CS_DEASSERT BIT(30) /* de-assert CS after current BD */ | ||
126 | #define BD_EN BIT(31) /* BD owned by H/W */ | ||
127 | |||
128 | /** | ||
129 | * struct ring_desc - Representation of SQI ring descriptor | ||
130 | * @list: list element to add to free or used list. | ||
131 | * @bd: PESQI controller buffer descriptor | ||
132 | * @bd_dma: DMA address of PESQI controller buffer descriptor | ||
133 | * @xfer_len: transfer length | ||
134 | */ | ||
135 | struct ring_desc { | ||
136 | struct list_head list; | ||
137 | struct buf_desc *bd; | ||
138 | dma_addr_t bd_dma; | ||
139 | u32 xfer_len; | ||
140 | }; | ||
141 | |||
142 | /* Global constants */ | ||
143 | #define PESQI_BD_BUF_LEN_MAX 256 | ||
144 | #define PESQI_BD_COUNT 256 /* max 64KB data per spi message */ | ||
145 | |||
146 | struct pic32_sqi { | ||
147 | void __iomem *regs; | ||
148 | struct clk *sys_clk; | ||
149 | struct clk *base_clk; /* drives spi clock */ | ||
150 | struct spi_master *master; | ||
151 | int irq; | ||
152 | struct completion xfer_done; | ||
153 | struct ring_desc *ring; | ||
154 | void *bd; | ||
155 | dma_addr_t bd_dma; | ||
156 | struct list_head bd_list_free; /* free */ | ||
157 | struct list_head bd_list_used; /* allocated */ | ||
158 | struct spi_device *cur_spi; | ||
159 | u32 cur_speed; | ||
160 | u8 cur_mode; | ||
161 | }; | ||
162 | |||
163 | static inline void pic32_setbits(void __iomem *reg, u32 set) | ||
164 | { | ||
165 | writel(readl(reg) | set, reg); | ||
166 | } | ||
167 | |||
168 | static inline void pic32_clrbits(void __iomem *reg, u32 clr) | ||
169 | { | ||
170 | writel(readl(reg) & ~clr, reg); | ||
171 | } | ||
172 | |||
173 | static int pic32_sqi_set_clk_rate(struct pic32_sqi *sqi, u32 sck) | ||
174 | { | ||
175 | u32 val, div; | ||
176 | |||
177 | /* div = base_clk / (2 * spi_clk) */ | ||
178 | div = clk_get_rate(sqi->base_clk) / (2 * sck); | ||
179 | div &= PESQI_CLKDIV; | ||
180 | |||
181 | val = readl(sqi->regs + PESQI_CLK_CTRL_REG); | ||
182 | /* apply new divider */ | ||
183 | val &= ~(PESQI_CLK_STABLE | (PESQI_CLKDIV << PESQI_CLKDIV_SHIFT)); | ||
184 | val |= div << PESQI_CLKDIV_SHIFT; | ||
185 | writel(val, sqi->regs + PESQI_CLK_CTRL_REG); | ||
186 | |||
187 | /* wait for stability */ | ||
188 | return readl_poll_timeout(sqi->regs + PESQI_CLK_CTRL_REG, val, | ||
189 | val & PESQI_CLK_STABLE, 1, 5000); | ||
190 | } | ||
191 | |||
192 | static inline void pic32_sqi_enable_int(struct pic32_sqi *sqi) | ||
193 | { | ||
194 | u32 mask = PESQI_DMAERR | PESQI_BDDONE | PESQI_PKTCOMP; | ||
195 | |||
196 | writel(mask, sqi->regs + PESQI_INT_ENABLE_REG); | ||
197 | /* INT_SIGEN works as interrupt-gate to INTR line */ | ||
198 | writel(mask, sqi->regs + PESQI_INT_SIGEN_REG); | ||
199 | } | ||
200 | |||
201 | static inline void pic32_sqi_disable_int(struct pic32_sqi *sqi) | ||
202 | { | ||
203 | writel(0, sqi->regs + PESQI_INT_ENABLE_REG); | ||
204 | writel(0, sqi->regs + PESQI_INT_SIGEN_REG); | ||
205 | } | ||
206 | |||
207 | static irqreturn_t pic32_sqi_isr(int irq, void *dev_id) | ||
208 | { | ||
209 | struct pic32_sqi *sqi = dev_id; | ||
210 | u32 enable, status; | ||
211 | |||
212 | enable = readl(sqi->regs + PESQI_INT_ENABLE_REG); | ||
213 | status = readl(sqi->regs + PESQI_INT_STAT_REG); | ||
214 | |||
215 | /* check spurious interrupt */ | ||
216 | if (!status) | ||
217 | return IRQ_NONE; | ||
218 | |||
219 | if (status & PESQI_DMAERR) { | ||
220 | enable = 0; | ||
221 | goto irq_done; | ||
222 | } | ||
223 | |||
224 | if (status & PESQI_TXTHR) | ||
225 | enable &= ~(PESQI_TXTHR | PESQI_TXFULL | PESQI_TXEMPTY); | ||
226 | |||
227 | if (status & PESQI_RXTHR) | ||
228 | enable &= ~(PESQI_RXTHR | PESQI_RXFULL | PESQI_RXEMPTY); | ||
229 | |||
230 | if (status & PESQI_BDDONE) | ||
231 | enable &= ~PESQI_BDDONE; | ||
232 | |||
233 | /* packet processing completed */ | ||
234 | if (status & PESQI_PKTCOMP) { | ||
235 | /* mask all interrupts */ | ||
236 | enable = 0; | ||
237 | /* complete trasaction */ | ||
238 | complete(&sqi->xfer_done); | ||
239 | } | ||
240 | |||
241 | irq_done: | ||
242 | /* interrupts are sticky, so mask when handled */ | ||
243 | writel(enable, sqi->regs + PESQI_INT_ENABLE_REG); | ||
244 | |||
245 | return IRQ_HANDLED; | ||
246 | } | ||
247 | |||
248 | static struct ring_desc *ring_desc_get(struct pic32_sqi *sqi) | ||
249 | { | ||
250 | struct ring_desc *rdesc; | ||
251 | |||
252 | if (list_empty(&sqi->bd_list_free)) | ||
253 | return NULL; | ||
254 | |||
255 | rdesc = list_first_entry(&sqi->bd_list_free, struct ring_desc, list); | ||
256 | list_del(&rdesc->list); | ||
257 | list_add_tail(&rdesc->list, &sqi->bd_list_used); | ||
258 | return rdesc; | ||
259 | } | ||
260 | |||
261 | static void ring_desc_put(struct pic32_sqi *sqi, struct ring_desc *rdesc) | ||
262 | { | ||
263 | list_del(&rdesc->list); | ||
264 | list_add(&rdesc->list, &sqi->bd_list_free); | ||
265 | } | ||
266 | |||
267 | static int pic32_sqi_one_transfer(struct pic32_sqi *sqi, | ||
268 | struct spi_message *mesg, | ||
269 | struct spi_transfer *xfer) | ||
270 | { | ||
271 | struct spi_device *spi = mesg->spi; | ||
272 | struct scatterlist *sg, *sgl; | ||
273 | struct ring_desc *rdesc; | ||
274 | struct buf_desc *bd; | ||
275 | int nents, i; | ||
276 | u32 bd_ctrl; | ||
277 | u32 nbits; | ||
278 | |||
279 | /* Device selection */ | ||
280 | bd_ctrl = spi->chip_select << BD_DEVSEL_SHIFT; | ||
281 | |||
282 | /* half-duplex: select transfer buffer, direction and lane */ | ||
283 | if (xfer->rx_buf) { | ||
284 | bd_ctrl |= BD_DATA_RECV; | ||
285 | nbits = xfer->rx_nbits; | ||
286 | sgl = xfer->rx_sg.sgl; | ||
287 | nents = xfer->rx_sg.nents; | ||
288 | } else { | ||
289 | nbits = xfer->tx_nbits; | ||
290 | sgl = xfer->tx_sg.sgl; | ||
291 | nents = xfer->tx_sg.nents; | ||
292 | } | ||
293 | |||
294 | if (nbits & SPI_NBITS_QUAD) | ||
295 | bd_ctrl |= BD_QUAD; | ||
296 | else if (nbits & SPI_NBITS_DUAL) | ||
297 | bd_ctrl |= BD_DUAL; | ||
298 | |||
299 | /* LSB first */ | ||
300 | if (spi->mode & SPI_LSB_FIRST) | ||
301 | bd_ctrl |= BD_LSBF; | ||
302 | |||
303 | /* ownership to hardware */ | ||
304 | bd_ctrl |= BD_EN; | ||
305 | |||
306 | for_each_sg(sgl, sg, nents, i) { | ||
307 | /* get ring descriptor */ | ||
308 | rdesc = ring_desc_get(sqi); | ||
309 | if (!rdesc) | ||
310 | break; | ||
311 | |||
312 | bd = rdesc->bd; | ||
313 | |||
314 | /* BD CTRL: length */ | ||
315 | rdesc->xfer_len = sg_dma_len(sg); | ||
316 | bd->bd_ctrl = bd_ctrl; | ||
317 | bd->bd_ctrl |= rdesc->xfer_len; | ||
318 | |||
319 | /* BD STAT */ | ||
320 | bd->bd_status = 0; | ||
321 | |||
322 | /* BD BUFFER ADDRESS */ | ||
323 | bd->bd_addr = sg->dma_address; | ||
324 | } | ||
325 | |||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | static int pic32_sqi_prepare_hardware(struct spi_master *master) | ||
330 | { | ||
331 | struct pic32_sqi *sqi = spi_master_get_devdata(master); | ||
332 | |||
333 | /* enable spi interface */ | ||
334 | pic32_setbits(sqi->regs + PESQI_CONF_REG, PESQI_EN); | ||
335 | /* enable spi clk */ | ||
336 | pic32_setbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN); | ||
337 | |||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | static bool pic32_sqi_can_dma(struct spi_master *master, | ||
342 | struct spi_device *spi, | ||
343 | struct spi_transfer *x) | ||
344 | { | ||
345 | /* Do DMA irrespective of transfer size */ | ||
346 | return true; | ||
347 | } | ||
348 | |||
349 | static int pic32_sqi_one_message(struct spi_master *master, | ||
350 | struct spi_message *msg) | ||
351 | { | ||
352 | struct spi_device *spi = msg->spi; | ||
353 | struct ring_desc *rdesc, *next; | ||
354 | struct spi_transfer *xfer; | ||
355 | struct pic32_sqi *sqi; | ||
356 | int ret = 0, mode; | ||
357 | u32 val; | ||
358 | |||
359 | sqi = spi_master_get_devdata(master); | ||
360 | |||
361 | reinit_completion(&sqi->xfer_done); | ||
362 | msg->actual_length = 0; | ||
363 | |||
364 | /* We can't handle spi_transfer specific "speed_hz", "bits_per_word" | ||
365 | * and "delay_usecs". But spi_device specific speed and mode change | ||
366 | * can be handled at best during spi chip-select switch. | ||
367 | */ | ||
368 | if (sqi->cur_spi != spi) { | ||
369 | /* set spi speed */ | ||
370 | if (sqi->cur_speed != spi->max_speed_hz) { | ||
371 | sqi->cur_speed = spi->max_speed_hz; | ||
372 | ret = pic32_sqi_set_clk_rate(sqi, spi->max_speed_hz); | ||
373 | if (ret) | ||
374 | dev_warn(&spi->dev, "set_clk, %d\n", ret); | ||
375 | } | ||
376 | |||
377 | /* set spi mode */ | ||
378 | mode = spi->mode & (SPI_MODE_3 | SPI_LSB_FIRST); | ||
379 | if (sqi->cur_mode != mode) { | ||
380 | val = readl(sqi->regs + PESQI_CONF_REG); | ||
381 | val &= ~(PESQI_CPOL | PESQI_CPHA | PESQI_LSBF); | ||
382 | if (mode & SPI_CPOL) | ||
383 | val |= PESQI_CPOL; | ||
384 | if (mode & SPI_LSB_FIRST) | ||
385 | val |= PESQI_LSBF; | ||
386 | val |= PESQI_CPHA; | ||
387 | writel(val, sqi->regs + PESQI_CONF_REG); | ||
388 | |||
389 | sqi->cur_mode = mode; | ||
390 | } | ||
391 | sqi->cur_spi = spi; | ||
392 | } | ||
393 | |||
394 | /* prepare hardware desc-list(BD) for transfer(s) */ | ||
395 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
396 | ret = pic32_sqi_one_transfer(sqi, msg, xfer); | ||
397 | if (ret) { | ||
398 | dev_err(&spi->dev, "xfer %p err\n", xfer); | ||
399 | goto xfer_out; | ||
400 | } | ||
401 | } | ||
402 | |||
403 | /* BDs are prepared and chained. Now mark LAST_BD, CS_DEASSERT at last | ||
404 | * element of the list. | ||
405 | */ | ||
406 | rdesc = list_last_entry(&sqi->bd_list_used, struct ring_desc, list); | ||
407 | rdesc->bd->bd_ctrl |= BD_LAST | BD_CS_DEASSERT | | ||
408 | BD_LIFM | BD_PKT_INT_EN; | ||
409 | |||
410 | /* set base address BD list for DMA engine */ | ||
411 | rdesc = list_first_entry(&sqi->bd_list_used, struct ring_desc, list); | ||
412 | writel(rdesc->bd_dma, sqi->regs + PESQI_BD_BASE_ADDR_REG); | ||
413 | |||
414 | /* enable interrupt */ | ||
415 | pic32_sqi_enable_int(sqi); | ||
416 | |||
417 | /* enable DMA engine */ | ||
418 | val = PESQI_DMA_EN | PESQI_POLL_EN | PESQI_BDP_START; | ||
419 | writel(val, sqi->regs + PESQI_BD_CTRL_REG); | ||
420 | |||
421 | /* wait for xfer completion */ | ||
422 | ret = wait_for_completion_timeout(&sqi->xfer_done, 5 * HZ); | ||
423 | if (ret <= 0) { | ||
424 | dev_err(&sqi->master->dev, "wait timedout/interrupted\n"); | ||
425 | ret = -EIO; | ||
426 | msg->status = ret; | ||
427 | } else { | ||
428 | /* success */ | ||
429 | msg->status = 0; | ||
430 | ret = 0; | ||
431 | } | ||
432 | |||
433 | /* disable DMA */ | ||
434 | writel(0, sqi->regs + PESQI_BD_CTRL_REG); | ||
435 | |||
436 | pic32_sqi_disable_int(sqi); | ||
437 | |||
438 | xfer_out: | ||
439 | list_for_each_entry_safe_reverse(rdesc, next, | ||
440 | &sqi->bd_list_used, list) { | ||
441 | /* Update total byte transferred */ | ||
442 | msg->actual_length += rdesc->xfer_len; | ||
443 | /* release ring descr */ | ||
444 | ring_desc_put(sqi, rdesc); | ||
445 | } | ||
446 | spi_finalize_current_message(spi->master); | ||
447 | |||
448 | return ret; | ||
449 | } | ||
450 | |||
451 | static int pic32_sqi_unprepare_hardware(struct spi_master *master) | ||
452 | { | ||
453 | struct pic32_sqi *sqi = spi_master_get_devdata(master); | ||
454 | |||
455 | /* disable clk */ | ||
456 | pic32_clrbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN); | ||
457 | /* disable spi */ | ||
458 | pic32_clrbits(sqi->regs + PESQI_CONF_REG, PESQI_EN); | ||
459 | |||
460 | return 0; | ||
461 | } | ||
462 | |||
463 | /* This may be called twice for each spi dev */ | ||
464 | static int pic32_sqi_setup(struct spi_device *spi) | ||
465 | { | ||
466 | struct pic32_sqi *sqi; | ||
467 | |||
468 | if (spi_get_ctldata(spi)) { | ||
469 | dev_err(&spi->dev, "is already associated\n"); | ||
470 | return -EBUSY; | ||
471 | } | ||
472 | |||
473 | /* check word size */ | ||
474 | if (!spi->bits_per_word) { | ||
475 | dev_err(&spi->dev, "No bits_per_word defined\n"); | ||
476 | return -EINVAL; | ||
477 | } | ||
478 | |||
479 | /* check maximum SPI clk rate */ | ||
480 | if (!spi->max_speed_hz) { | ||
481 | dev_err(&spi->dev, "No max speed HZ parameter\n"); | ||
482 | return -EINVAL; | ||
483 | } | ||
484 | |||
485 | if (spi->master->max_speed_hz < spi->max_speed_hz) { | ||
486 | dev_err(&spi->dev, "max speed %u HZ is too high\n", | ||
487 | spi->max_speed_hz); | ||
488 | return -EINVAL; | ||
489 | } | ||
490 | |||
491 | sqi = spi_master_get_devdata(spi->master); | ||
492 | spi_set_ctldata(spi, (void *)sqi); | ||
493 | |||
494 | return 0; | ||
495 | } | ||
496 | |||
497 | static void pic32_sqi_cleanup(struct spi_device *spi) | ||
498 | { | ||
499 | spi_set_ctldata(spi, (void *)NULL); | ||
500 | } | ||
501 | |||
502 | static int ring_desc_ring_alloc(struct pic32_sqi *sqi) | ||
503 | { | ||
504 | struct ring_desc *rdesc; | ||
505 | struct buf_desc *bd; | ||
506 | int i; | ||
507 | |||
508 | /* allocate coherent DMAable memory for hardware buffer descriptors. */ | ||
509 | sqi->bd = dma_zalloc_coherent(&sqi->master->dev, | ||
510 | sizeof(*bd) * PESQI_BD_COUNT, | ||
511 | &sqi->bd_dma, GFP_DMA32); | ||
512 | if (!sqi->bd) { | ||
513 | dev_err(&sqi->master->dev, "failed allocating dma buffer\n"); | ||
514 | return -ENOMEM; | ||
515 | } | ||
516 | |||
517 | /* allocate software ring descriptors */ | ||
518 | sqi->ring = kcalloc(PESQI_BD_COUNT, sizeof(*rdesc), GFP_KERNEL); | ||
519 | if (!sqi->ring) { | ||
520 | dma_free_coherent(&sqi->master->dev, | ||
521 | sizeof(*bd) * PESQI_BD_COUNT, | ||
522 | sqi->bd, sqi->bd_dma); | ||
523 | return -ENOMEM; | ||
524 | } | ||
525 | |||
526 | bd = (struct buf_desc *)sqi->bd; | ||
527 | |||
528 | INIT_LIST_HEAD(&sqi->bd_list_free); | ||
529 | INIT_LIST_HEAD(&sqi->bd_list_used); | ||
530 | |||
531 | /* initialize ring-desc */ | ||
532 | for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT; i++, rdesc++) { | ||
533 | INIT_LIST_HEAD(&rdesc->list); | ||
534 | rdesc->bd = &bd[i]; | ||
535 | rdesc->bd_dma = sqi->bd_dma + (void *)&bd[i] - (void *)bd; | ||
536 | list_add_tail(&rdesc->list, &sqi->bd_list_free); | ||
537 | } | ||
538 | |||
539 | /* Prepare BD: chain to next BD(s) */ | ||
540 | for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT; i++) | ||
541 | bd[i].bd_nextp = rdesc[i + 1].bd_dma; | ||
542 | bd[PESQI_BD_COUNT - 1].bd_nextp = 0; | ||
543 | |||
544 | return 0; | ||
545 | } | ||
546 | |||
547 | static void ring_desc_ring_free(struct pic32_sqi *sqi) | ||
548 | { | ||
549 | dma_free_coherent(&sqi->master->dev, | ||
550 | sizeof(struct buf_desc) * PESQI_BD_COUNT, | ||
551 | sqi->bd, sqi->bd_dma); | ||
552 | kfree(sqi->ring); | ||
553 | } | ||
554 | |||
555 | static void pic32_sqi_hw_init(struct pic32_sqi *sqi) | ||
556 | { | ||
557 | unsigned long flags; | ||
558 | u32 val; | ||
559 | |||
560 | /* Soft-reset of PESQI controller triggers interrupt. | ||
561 | * We are not yet ready to handle them so disable CPU | ||
562 | * interrupt for the time being. | ||
563 | */ | ||
564 | local_irq_save(flags); | ||
565 | |||
566 | /* assert soft-reset */ | ||
567 | writel(PESQI_SOFT_RESET, sqi->regs + PESQI_CONF_REG); | ||
568 | |||
569 | /* wait until clear */ | ||
570 | readl_poll_timeout_atomic(sqi->regs + PESQI_CONF_REG, val, | ||
571 | !(val & PESQI_SOFT_RESET), 1, 5000); | ||
572 | |||
573 | /* disable all interrupts */ | ||
574 | pic32_sqi_disable_int(sqi); | ||
575 | |||
576 | /* Now it is safe to enable back CPU interrupt */ | ||
577 | local_irq_restore(flags); | ||
578 | |||
579 | /* tx and rx fifo interrupt threshold */ | ||
580 | val = readl(sqi->regs + PESQI_CMD_THRES_REG); | ||
581 | val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT); | ||
582 | val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT); | ||
583 | val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT); | ||
584 | writel(val, sqi->regs + PESQI_CMD_THRES_REG); | ||
585 | |||
586 | val = readl(sqi->regs + PESQI_INT_THRES_REG); | ||
587 | val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT); | ||
588 | val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT); | ||
589 | val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT); | ||
590 | writel(val, sqi->regs + PESQI_INT_THRES_REG); | ||
591 | |||
592 | /* default configuration */ | ||
593 | val = readl(sqi->regs + PESQI_CONF_REG); | ||
594 | |||
595 | /* set mode: DMA */ | ||
596 | val &= ~PESQI_MODE; | ||
597 | val |= PESQI_MODE_DMA << PESQI_MODE_SHIFT; | ||
598 | writel(val, sqi->regs + PESQI_CONF_REG); | ||
599 | |||
600 | /* DATAEN - SQIID0-ID3 */ | ||
601 | val |= PESQI_QUAD_LANE << PESQI_LANES_SHIFT; | ||
602 | |||
603 | /* burst/INCR4 enable */ | ||
604 | val |= PESQI_BURST_EN; | ||
605 | |||
606 | /* CSEN - all CS */ | ||
607 | val |= 3U << PESQI_CSEN_SHIFT; | ||
608 | writel(val, sqi->regs + PESQI_CONF_REG); | ||
609 | |||
610 | /* write poll count */ | ||
611 | writel(0, sqi->regs + PESQI_BD_POLL_CTRL_REG); | ||
612 | |||
613 | sqi->cur_speed = 0; | ||
614 | sqi->cur_mode = -1; | ||
615 | } | ||
616 | |||
617 | static int pic32_sqi_probe(struct platform_device *pdev) | ||
618 | { | ||
619 | struct spi_master *master; | ||
620 | struct pic32_sqi *sqi; | ||
621 | struct resource *reg; | ||
622 | int ret; | ||
623 | |||
624 | master = spi_alloc_master(&pdev->dev, sizeof(*sqi)); | ||
625 | if (!master) | ||
626 | return -ENOMEM; | ||
627 | |||
628 | sqi = spi_master_get_devdata(master); | ||
629 | sqi->master = master; | ||
630 | |||
631 | reg = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
632 | sqi->regs = devm_ioremap_resource(&pdev->dev, reg); | ||
633 | if (IS_ERR(sqi->regs)) { | ||
634 | ret = PTR_ERR(sqi->regs); | ||
635 | goto err_free_master; | ||
636 | } | ||
637 | |||
638 | /* irq */ | ||
639 | sqi->irq = platform_get_irq(pdev, 0); | ||
640 | if (sqi->irq < 0) { | ||
641 | dev_err(&pdev->dev, "no irq found\n"); | ||
642 | ret = sqi->irq; | ||
643 | goto err_free_master; | ||
644 | } | ||
645 | |||
646 | /* clocks */ | ||
647 | sqi->sys_clk = devm_clk_get(&pdev->dev, "reg_ck"); | ||
648 | if (IS_ERR(sqi->sys_clk)) { | ||
649 | ret = PTR_ERR(sqi->sys_clk); | ||
650 | dev_err(&pdev->dev, "no sys_clk ?\n"); | ||
651 | goto err_free_master; | ||
652 | } | ||
653 | |||
654 | sqi->base_clk = devm_clk_get(&pdev->dev, "spi_ck"); | ||
655 | if (IS_ERR(sqi->base_clk)) { | ||
656 | ret = PTR_ERR(sqi->base_clk); | ||
657 | dev_err(&pdev->dev, "no base clk ?\n"); | ||
658 | goto err_free_master; | ||
659 | } | ||
660 | |||
661 | ret = clk_prepare_enable(sqi->sys_clk); | ||
662 | if (ret) { | ||
663 | dev_err(&pdev->dev, "sys clk enable failed\n"); | ||
664 | goto err_free_master; | ||
665 | } | ||
666 | |||
667 | ret = clk_prepare_enable(sqi->base_clk); | ||
668 | if (ret) { | ||
669 | dev_err(&pdev->dev, "base clk enable failed\n"); | ||
670 | clk_disable_unprepare(sqi->sys_clk); | ||
671 | goto err_free_master; | ||
672 | } | ||
673 | |||
674 | init_completion(&sqi->xfer_done); | ||
675 | |||
676 | /* initialize hardware */ | ||
677 | pic32_sqi_hw_init(sqi); | ||
678 | |||
679 | /* allocate buffers & descriptors */ | ||
680 | ret = ring_desc_ring_alloc(sqi); | ||
681 | if (ret) { | ||
682 | dev_err(&pdev->dev, "ring alloc failed\n"); | ||
683 | goto err_disable_clk; | ||
684 | } | ||
685 | |||
686 | /* install irq handlers */ | ||
687 | ret = request_irq(sqi->irq, pic32_sqi_isr, 0, | ||
688 | dev_name(&pdev->dev), sqi); | ||
689 | if (ret < 0) { | ||
690 | dev_err(&pdev->dev, "request_irq(%d), failed\n", sqi->irq); | ||
691 | goto err_free_ring; | ||
692 | } | ||
693 | |||
694 | /* register master */ | ||
695 | master->num_chipselect = 2; | ||
696 | master->max_speed_hz = clk_get_rate(sqi->base_clk); | ||
697 | master->dma_alignment = 32; | ||
698 | master->max_dma_len = PESQI_BD_BUF_LEN_MAX; | ||
699 | master->dev.of_node = of_node_get(pdev->dev.of_node); | ||
700 | master->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_TX_DUAL | | ||
701 | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD; | ||
702 | master->flags = SPI_MASTER_HALF_DUPLEX; | ||
703 | master->setup = pic32_sqi_setup; | ||
704 | master->cleanup = pic32_sqi_cleanup; | ||
705 | master->can_dma = pic32_sqi_can_dma; | ||
706 | master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); | ||
707 | master->transfer_one_message = pic32_sqi_one_message; | ||
708 | master->prepare_transfer_hardware = pic32_sqi_prepare_hardware; | ||
709 | master->unprepare_transfer_hardware = pic32_sqi_unprepare_hardware; | ||
710 | |||
711 | ret = devm_spi_register_master(&pdev->dev, master); | ||
712 | if (ret) { | ||
713 | dev_err(&master->dev, "failed registering spi master\n"); | ||
714 | free_irq(sqi->irq, sqi); | ||
715 | goto err_free_ring; | ||
716 | } | ||
717 | |||
718 | platform_set_drvdata(pdev, sqi); | ||
719 | |||
720 | return 0; | ||
721 | |||
722 | err_free_ring: | ||
723 | ring_desc_ring_free(sqi); | ||
724 | |||
725 | err_disable_clk: | ||
726 | clk_disable_unprepare(sqi->base_clk); | ||
727 | clk_disable_unprepare(sqi->sys_clk); | ||
728 | |||
729 | err_free_master: | ||
730 | spi_master_put(master); | ||
731 | return ret; | ||
732 | } | ||
733 | |||
734 | static int pic32_sqi_remove(struct platform_device *pdev) | ||
735 | { | ||
736 | struct pic32_sqi *sqi = platform_get_drvdata(pdev); | ||
737 | |||
738 | /* release resources */ | ||
739 | free_irq(sqi->irq, sqi); | ||
740 | ring_desc_ring_free(sqi); | ||
741 | |||
742 | /* disable clk */ | ||
743 | clk_disable_unprepare(sqi->base_clk); | ||
744 | clk_disable_unprepare(sqi->sys_clk); | ||
745 | |||
746 | return 0; | ||
747 | } | ||
748 | |||
749 | static const struct of_device_id pic32_sqi_of_ids[] = { | ||
750 | {.compatible = "microchip,pic32mzda-sqi",}, | ||
751 | {}, | ||
752 | }; | ||
753 | MODULE_DEVICE_TABLE(of, pic32_sqi_of_ids); | ||
754 | |||
755 | static struct platform_driver pic32_sqi_driver = { | ||
756 | .driver = { | ||
757 | .name = "sqi-pic32", | ||
758 | .of_match_table = of_match_ptr(pic32_sqi_of_ids), | ||
759 | }, | ||
760 | .probe = pic32_sqi_probe, | ||
761 | .remove = pic32_sqi_remove, | ||
762 | }; | ||
763 | |||
764 | module_platform_driver(pic32_sqi_driver); | ||
765 | |||
766 | MODULE_AUTHOR("Purna Chandra Mandal <purna.mandal@microchip.com>"); | ||
767 | MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SQI controller."); | ||
768 | MODULE_LICENSE("GPL v2"); | ||