diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/spi/Kconfig | 23 | ||||
-rw-r--r-- | drivers/spi/Makefile | 3 | ||||
-rw-r--r-- | drivers/spi/coldfire_qspi.c | 640 | ||||
-rw-r--r-- | drivers/spi/davinci_spi.c | 1255 | ||||
-rw-r--r-- | drivers/spi/dw_spi.c | 111 | ||||
-rw-r--r-- | drivers/spi/dw_spi_mmio.c | 147 | ||||
-rw-r--r-- | drivers/spi/dw_spi_pci.c | 2 | ||||
-rw-r--r-- | drivers/spi/mpc52xx_psc_spi.c | 2 | ||||
-rw-r--r-- | drivers/spi/mpc52xx_spi.c | 2 | ||||
-rw-r--r-- | drivers/spi/spi_imx.c | 2 | ||||
-rw-r--r-- | drivers/spi/spi_mpc8xxx.c | 8 | ||||
-rw-r--r-- | drivers/spi/spi_ppc4xx.c | 2 | ||||
-rw-r--r-- | drivers/spi/spi_s3c64xx.c | 89 | ||||
-rw-r--r-- | drivers/spi/spi_sh_msiof.c | 2 | ||||
-rw-r--r-- | drivers/spi/spi_stmp.c | 2 | ||||
-rw-r--r-- | drivers/spi/xilinx_spi.c | 28 | ||||
-rw-r--r-- | drivers/spi/xilinx_spi_of.c | 2 |
17 files changed, 2220 insertions, 100 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index f55eb0107336..0fee95cd9a49 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -100,6 +100,23 @@ config SPI_BUTTERFLY | |||
100 | inexpensive battery powered microcontroller evaluation board. | 100 | inexpensive battery powered microcontroller evaluation board. |
101 | This same cable can be used to flash new firmware. | 101 | This same cable can be used to flash new firmware. |
102 | 102 | ||
103 | config SPI_COLDFIRE_QSPI | ||
104 | tristate "Freescale Coldfire QSPI controller" | ||
105 | depends on (M520x || M523x || M5249 || M527x || M528x || M532x) | ||
106 | help | ||
107 | This enables support for the Coldfire QSPI controller in master | ||
108 | mode. | ||
109 | |||
110 | This driver can also be built as a module. If so, the module | ||
111 | will be called coldfire_qspi. | ||
112 | |||
113 | config SPI_DAVINCI | ||
114 | tristate "SPI controller driver for DaVinci/DA8xx SoC's" | ||
115 | depends on SPI_MASTER && ARCH_DAVINCI | ||
116 | select SPI_BITBANG | ||
117 | help | ||
118 | SPI master controller for DaVinci and DA8xx SPI modules. | ||
119 | |||
103 | config SPI_GPIO | 120 | config SPI_GPIO |
104 | tristate "GPIO-based bitbanging SPI Master" | 121 | tristate "GPIO-based bitbanging SPI Master" |
105 | depends on GENERIC_GPIO | 122 | depends on GENERIC_GPIO |
@@ -308,7 +325,7 @@ config SPI_NUC900 | |||
308 | # | 325 | # |
309 | 326 | ||
310 | config SPI_DESIGNWARE | 327 | config SPI_DESIGNWARE |
311 | bool "DesignWare SPI controller core support" | 328 | tristate "DesignWare SPI controller core support" |
312 | depends on SPI_MASTER | 329 | depends on SPI_MASTER |
313 | help | 330 | help |
314 | general driver for SPI controller core from DesignWare | 331 | general driver for SPI controller core from DesignWare |
@@ -317,6 +334,10 @@ config SPI_DW_PCI | |||
317 | tristate "PCI interface driver for DW SPI core" | 334 | tristate "PCI interface driver for DW SPI core" |
318 | depends on SPI_DESIGNWARE && PCI | 335 | depends on SPI_DESIGNWARE && PCI |
319 | 336 | ||
337 | config SPI_DW_MMIO | ||
338 | tristate "Memory-mapped io interface driver for DW SPI core" | ||
339 | depends on SPI_DESIGNWARE && HAVE_CLK | ||
340 | |||
320 | # | 341 | # |
321 | # There are lots of SPI device types, with sensors and memory | 342 | # There are lots of SPI device types, with sensors and memory |
322 | # being probably the most widely used ones. | 343 | # being probably the most widely used ones. |
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index f3d2810ba11c..d7d0f89b797b 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile | |||
@@ -16,8 +16,11 @@ obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o | |||
16 | obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o | 16 | obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o |
17 | obj-$(CONFIG_SPI_AU1550) += au1550_spi.o | 17 | obj-$(CONFIG_SPI_AU1550) += au1550_spi.o |
18 | obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o | 18 | obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o |
19 | obj-$(CONFIG_SPI_COLDFIRE_QSPI) += coldfire_qspi.o | ||
20 | obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o | ||
19 | obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o | 21 | obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o |
20 | obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o | 22 | obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o |
23 | obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o | ||
21 | obj-$(CONFIG_SPI_GPIO) += spi_gpio.o | 24 | obj-$(CONFIG_SPI_GPIO) += spi_gpio.o |
22 | obj-$(CONFIG_SPI_IMX) += spi_imx.o | 25 | obj-$(CONFIG_SPI_IMX) += spi_imx.o |
23 | obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o | 26 | obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o |
diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/coldfire_qspi.c new file mode 100644 index 000000000000..59be3efe0636 --- /dev/null +++ b/drivers/spi/coldfire_qspi.c | |||
@@ -0,0 +1,640 @@ | |||
1 | /* | ||
2 | * Freescale/Motorola Coldfire Queued SPI driver | ||
3 | * | ||
4 | * Copyright 2010 Steven King <sfking@fdwdc.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA | ||
19 | * | ||
20 | */ | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/errno.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/workqueue.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/io.h> | ||
30 | #include <linux/clk.h> | ||
31 | #include <linux/err.h> | ||
32 | #include <linux/spi/spi.h> | ||
33 | |||
34 | #include <asm/coldfire.h> | ||
35 | #include <asm/mcfqspi.h> | ||
36 | |||
37 | #define DRIVER_NAME "mcfqspi" | ||
38 | |||
39 | #define MCFQSPI_BUSCLK (MCF_BUSCLK / 2) | ||
40 | |||
41 | #define MCFQSPI_QMR 0x00 | ||
42 | #define MCFQSPI_QMR_MSTR 0x8000 | ||
43 | #define MCFQSPI_QMR_CPOL 0x0200 | ||
44 | #define MCFQSPI_QMR_CPHA 0x0100 | ||
45 | #define MCFQSPI_QDLYR 0x04 | ||
46 | #define MCFQSPI_QDLYR_SPE 0x8000 | ||
47 | #define MCFQSPI_QWR 0x08 | ||
48 | #define MCFQSPI_QWR_HALT 0x8000 | ||
49 | #define MCFQSPI_QWR_WREN 0x4000 | ||
50 | #define MCFQSPI_QWR_CSIV 0x1000 | ||
51 | #define MCFQSPI_QIR 0x0C | ||
52 | #define MCFQSPI_QIR_WCEFB 0x8000 | ||
53 | #define MCFQSPI_QIR_ABRTB 0x4000 | ||
54 | #define MCFQSPI_QIR_ABRTL 0x1000 | ||
55 | #define MCFQSPI_QIR_WCEFE 0x0800 | ||
56 | #define MCFQSPI_QIR_ABRTE 0x0400 | ||
57 | #define MCFQSPI_QIR_SPIFE 0x0100 | ||
58 | #define MCFQSPI_QIR_WCEF 0x0008 | ||
59 | #define MCFQSPI_QIR_ABRT 0x0004 | ||
60 | #define MCFQSPI_QIR_SPIF 0x0001 | ||
61 | #define MCFQSPI_QAR 0x010 | ||
62 | #define MCFQSPI_QAR_TXBUF 0x00 | ||
63 | #define MCFQSPI_QAR_RXBUF 0x10 | ||
64 | #define MCFQSPI_QAR_CMDBUF 0x20 | ||
65 | #define MCFQSPI_QDR 0x014 | ||
66 | #define MCFQSPI_QCR 0x014 | ||
67 | #define MCFQSPI_QCR_CONT 0x8000 | ||
68 | #define MCFQSPI_QCR_BITSE 0x4000 | ||
69 | #define MCFQSPI_QCR_DT 0x2000 | ||
70 | |||
71 | struct mcfqspi { | ||
72 | void __iomem *iobase; | ||
73 | int irq; | ||
74 | struct clk *clk; | ||
75 | struct mcfqspi_cs_control *cs_control; | ||
76 | |||
77 | wait_queue_head_t waitq; | ||
78 | |||
79 | struct work_struct work; | ||
80 | struct workqueue_struct *workq; | ||
81 | spinlock_t lock; | ||
82 | struct list_head msgq; | ||
83 | }; | ||
84 | |||
85 | static void mcfqspi_wr_qmr(struct mcfqspi *mcfqspi, u16 val) | ||
86 | { | ||
87 | writew(val, mcfqspi->iobase + MCFQSPI_QMR); | ||
88 | } | ||
89 | |||
90 | static void mcfqspi_wr_qdlyr(struct mcfqspi *mcfqspi, u16 val) | ||
91 | { | ||
92 | writew(val, mcfqspi->iobase + MCFQSPI_QDLYR); | ||
93 | } | ||
94 | |||
95 | static u16 mcfqspi_rd_qdlyr(struct mcfqspi *mcfqspi) | ||
96 | { | ||
97 | return readw(mcfqspi->iobase + MCFQSPI_QDLYR); | ||
98 | } | ||
99 | |||
100 | static void mcfqspi_wr_qwr(struct mcfqspi *mcfqspi, u16 val) | ||
101 | { | ||
102 | writew(val, mcfqspi->iobase + MCFQSPI_QWR); | ||
103 | } | ||
104 | |||
105 | static void mcfqspi_wr_qir(struct mcfqspi *mcfqspi, u16 val) | ||
106 | { | ||
107 | writew(val, mcfqspi->iobase + MCFQSPI_QIR); | ||
108 | } | ||
109 | |||
110 | static void mcfqspi_wr_qar(struct mcfqspi *mcfqspi, u16 val) | ||
111 | { | ||
112 | writew(val, mcfqspi->iobase + MCFQSPI_QAR); | ||
113 | } | ||
114 | |||
115 | static void mcfqspi_wr_qdr(struct mcfqspi *mcfqspi, u16 val) | ||
116 | { | ||
117 | writew(val, mcfqspi->iobase + MCFQSPI_QDR); | ||
118 | } | ||
119 | |||
120 | static u16 mcfqspi_rd_qdr(struct mcfqspi *mcfqspi) | ||
121 | { | ||
122 | return readw(mcfqspi->iobase + MCFQSPI_QDR); | ||
123 | } | ||
124 | |||
125 | static void mcfqspi_cs_select(struct mcfqspi *mcfqspi, u8 chip_select, | ||
126 | bool cs_high) | ||
127 | { | ||
128 | mcfqspi->cs_control->select(mcfqspi->cs_control, chip_select, cs_high); | ||
129 | } | ||
130 | |||
131 | static void mcfqspi_cs_deselect(struct mcfqspi *mcfqspi, u8 chip_select, | ||
132 | bool cs_high) | ||
133 | { | ||
134 | mcfqspi->cs_control->deselect(mcfqspi->cs_control, chip_select, cs_high); | ||
135 | } | ||
136 | |||
137 | static int mcfqspi_cs_setup(struct mcfqspi *mcfqspi) | ||
138 | { | ||
139 | return (mcfqspi->cs_control && mcfqspi->cs_control->setup) ? | ||
140 | mcfqspi->cs_control->setup(mcfqspi->cs_control) : 0; | ||
141 | } | ||
142 | |||
143 | static void mcfqspi_cs_teardown(struct mcfqspi *mcfqspi) | ||
144 | { | ||
145 | if (mcfqspi->cs_control && mcfqspi->cs_control->teardown) | ||
146 | mcfqspi->cs_control->teardown(mcfqspi->cs_control); | ||
147 | } | ||
148 | |||
149 | static u8 mcfqspi_qmr_baud(u32 speed_hz) | ||
150 | { | ||
151 | return clamp((MCFQSPI_BUSCLK + speed_hz - 1) / speed_hz, 2u, 255u); | ||
152 | } | ||
153 | |||
154 | static bool mcfqspi_qdlyr_spe(struct mcfqspi *mcfqspi) | ||
155 | { | ||
156 | return mcfqspi_rd_qdlyr(mcfqspi) & MCFQSPI_QDLYR_SPE; | ||
157 | } | ||
158 | |||
159 | static irqreturn_t mcfqspi_irq_handler(int this_irq, void *dev_id) | ||
160 | { | ||
161 | struct mcfqspi *mcfqspi = dev_id; | ||
162 | |||
163 | /* clear interrupt */ | ||
164 | mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE | MCFQSPI_QIR_SPIF); | ||
165 | wake_up(&mcfqspi->waitq); | ||
166 | |||
167 | return IRQ_HANDLED; | ||
168 | } | ||
169 | |||
170 | static void mcfqspi_transfer_msg8(struct mcfqspi *mcfqspi, unsigned count, | ||
171 | const u8 *txbuf, u8 *rxbuf) | ||
172 | { | ||
173 | unsigned i, n, offset = 0; | ||
174 | |||
175 | n = min(count, 16u); | ||
176 | |||
177 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF); | ||
178 | for (i = 0; i < n; ++i) | ||
179 | mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE); | ||
180 | |||
181 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF); | ||
182 | if (txbuf) | ||
183 | for (i = 0; i < n; ++i) | ||
184 | mcfqspi_wr_qdr(mcfqspi, *txbuf++); | ||
185 | else | ||
186 | for (i = 0; i < count; ++i) | ||
187 | mcfqspi_wr_qdr(mcfqspi, 0); | ||
188 | |||
189 | count -= n; | ||
190 | if (count) { | ||
191 | u16 qwr = 0xf08; | ||
192 | mcfqspi_wr_qwr(mcfqspi, 0x700); | ||
193 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
194 | |||
195 | do { | ||
196 | wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); | ||
197 | mcfqspi_wr_qwr(mcfqspi, qwr); | ||
198 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
199 | if (rxbuf) { | ||
200 | mcfqspi_wr_qar(mcfqspi, | ||
201 | MCFQSPI_QAR_RXBUF + offset); | ||
202 | for (i = 0; i < 8; ++i) | ||
203 | *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); | ||
204 | } | ||
205 | n = min(count, 8u); | ||
206 | if (txbuf) { | ||
207 | mcfqspi_wr_qar(mcfqspi, | ||
208 | MCFQSPI_QAR_TXBUF + offset); | ||
209 | for (i = 0; i < n; ++i) | ||
210 | mcfqspi_wr_qdr(mcfqspi, *txbuf++); | ||
211 | } | ||
212 | qwr = (offset ? 0x808 : 0) + ((n - 1) << 8); | ||
213 | offset ^= 8; | ||
214 | count -= n; | ||
215 | } while (count); | ||
216 | wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); | ||
217 | mcfqspi_wr_qwr(mcfqspi, qwr); | ||
218 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
219 | if (rxbuf) { | ||
220 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); | ||
221 | for (i = 0; i < 8; ++i) | ||
222 | *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); | ||
223 | offset ^= 8; | ||
224 | } | ||
225 | } else { | ||
226 | mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8); | ||
227 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
228 | } | ||
229 | wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); | ||
230 | if (rxbuf) { | ||
231 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); | ||
232 | for (i = 0; i < n; ++i) | ||
233 | *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); | ||
234 | } | ||
235 | } | ||
236 | |||
237 | static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count, | ||
238 | const u16 *txbuf, u16 *rxbuf) | ||
239 | { | ||
240 | unsigned i, n, offset = 0; | ||
241 | |||
242 | n = min(count, 16u); | ||
243 | |||
244 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF); | ||
245 | for (i = 0; i < n; ++i) | ||
246 | mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE); | ||
247 | |||
248 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF); | ||
249 | if (txbuf) | ||
250 | for (i = 0; i < n; ++i) | ||
251 | mcfqspi_wr_qdr(mcfqspi, *txbuf++); | ||
252 | else | ||
253 | for (i = 0; i < count; ++i) | ||
254 | mcfqspi_wr_qdr(mcfqspi, 0); | ||
255 | |||
256 | count -= n; | ||
257 | if (count) { | ||
258 | u16 qwr = 0xf08; | ||
259 | mcfqspi_wr_qwr(mcfqspi, 0x700); | ||
260 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
261 | |||
262 | do { | ||
263 | wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); | ||
264 | mcfqspi_wr_qwr(mcfqspi, qwr); | ||
265 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
266 | if (rxbuf) { | ||
267 | mcfqspi_wr_qar(mcfqspi, | ||
268 | MCFQSPI_QAR_RXBUF + offset); | ||
269 | for (i = 0; i < 8; ++i) | ||
270 | *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); | ||
271 | } | ||
272 | n = min(count, 8u); | ||
273 | if (txbuf) { | ||
274 | mcfqspi_wr_qar(mcfqspi, | ||
275 | MCFQSPI_QAR_TXBUF + offset); | ||
276 | for (i = 0; i < n; ++i) | ||
277 | mcfqspi_wr_qdr(mcfqspi, *txbuf++); | ||
278 | } | ||
279 | qwr = (offset ? 0x808 : 0x000) + ((n - 1) << 8); | ||
280 | offset ^= 8; | ||
281 | count -= n; | ||
282 | } while (count); | ||
283 | wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); | ||
284 | mcfqspi_wr_qwr(mcfqspi, qwr); | ||
285 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
286 | if (rxbuf) { | ||
287 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); | ||
288 | for (i = 0; i < 8; ++i) | ||
289 | *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); | ||
290 | offset ^= 8; | ||
291 | } | ||
292 | } else { | ||
293 | mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8); | ||
294 | mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE); | ||
295 | } | ||
296 | wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi)); | ||
297 | if (rxbuf) { | ||
298 | mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset); | ||
299 | for (i = 0; i < n; ++i) | ||
300 | *rxbuf++ = mcfqspi_rd_qdr(mcfqspi); | ||
301 | } | ||
302 | } | ||
303 | |||
304 | static void mcfqspi_work(struct work_struct *work) | ||
305 | { | ||
306 | struct mcfqspi *mcfqspi = container_of(work, struct mcfqspi, work); | ||
307 | unsigned long flags; | ||
308 | |||
309 | spin_lock_irqsave(&mcfqspi->lock, flags); | ||
310 | while (!list_empty(&mcfqspi->msgq)) { | ||
311 | struct spi_message *msg; | ||
312 | struct spi_device *spi; | ||
313 | struct spi_transfer *xfer; | ||
314 | int status = 0; | ||
315 | |||
316 | msg = container_of(mcfqspi->msgq.next, struct spi_message, | ||
317 | queue); | ||
318 | |||
319 | list_del_init(&mcfqspi->msgq); | ||
320 | spin_unlock_irqrestore(&mcfqspi->lock, flags); | ||
321 | |||
322 | spi = msg->spi; | ||
323 | |||
324 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
325 | bool cs_high = spi->mode & SPI_CS_HIGH; | ||
326 | u16 qmr = MCFQSPI_QMR_MSTR; | ||
327 | |||
328 | if (xfer->bits_per_word) | ||
329 | qmr |= xfer->bits_per_word << 10; | ||
330 | else | ||
331 | qmr |= spi->bits_per_word << 10; | ||
332 | if (spi->mode & SPI_CPHA) | ||
333 | qmr |= MCFQSPI_QMR_CPHA; | ||
334 | if (spi->mode & SPI_CPOL) | ||
335 | qmr |= MCFQSPI_QMR_CPOL; | ||
336 | if (xfer->speed_hz) | ||
337 | qmr |= mcfqspi_qmr_baud(xfer->speed_hz); | ||
338 | else | ||
339 | qmr |= mcfqspi_qmr_baud(spi->max_speed_hz); | ||
340 | mcfqspi_wr_qmr(mcfqspi, qmr); | ||
341 | |||
342 | mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high); | ||
343 | |||
344 | mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE); | ||
345 | if ((xfer->bits_per_word ? xfer->bits_per_word : | ||
346 | spi->bits_per_word) == 8) | ||
347 | mcfqspi_transfer_msg8(mcfqspi, xfer->len, | ||
348 | xfer->tx_buf, | ||
349 | xfer->rx_buf); | ||
350 | else | ||
351 | mcfqspi_transfer_msg16(mcfqspi, xfer->len / 2, | ||
352 | xfer->tx_buf, | ||
353 | xfer->rx_buf); | ||
354 | mcfqspi_wr_qir(mcfqspi, 0); | ||
355 | |||
356 | if (xfer->delay_usecs) | ||
357 | udelay(xfer->delay_usecs); | ||
358 | if (xfer->cs_change) { | ||
359 | if (!list_is_last(&xfer->transfer_list, | ||
360 | &msg->transfers)) | ||
361 | mcfqspi_cs_deselect(mcfqspi, | ||
362 | spi->chip_select, | ||
363 | cs_high); | ||
364 | } else { | ||
365 | if (list_is_last(&xfer->transfer_list, | ||
366 | &msg->transfers)) | ||
367 | mcfqspi_cs_deselect(mcfqspi, | ||
368 | spi->chip_select, | ||
369 | cs_high); | ||
370 | } | ||
371 | msg->actual_length += xfer->len; | ||
372 | } | ||
373 | msg->status = status; | ||
374 | msg->complete(msg->context); | ||
375 | |||
376 | spin_lock_irqsave(&mcfqspi->lock, flags); | ||
377 | } | ||
378 | spin_unlock_irqrestore(&mcfqspi->lock, flags); | ||
379 | } | ||
380 | |||
381 | static int mcfqspi_transfer(struct spi_device *spi, struct spi_message *msg) | ||
382 | { | ||
383 | struct mcfqspi *mcfqspi; | ||
384 | struct spi_transfer *xfer; | ||
385 | unsigned long flags; | ||
386 | |||
387 | mcfqspi = spi_master_get_devdata(spi->master); | ||
388 | |||
389 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
390 | if (xfer->bits_per_word && ((xfer->bits_per_word < 8) | ||
391 | || (xfer->bits_per_word > 16))) { | ||
392 | dev_dbg(&spi->dev, | ||
393 | "%d bits per word is not supported\n", | ||
394 | xfer->bits_per_word); | ||
395 | goto fail; | ||
396 | } | ||
397 | if (xfer->speed_hz) { | ||
398 | u32 real_speed = MCFQSPI_BUSCLK / | ||
399 | mcfqspi_qmr_baud(xfer->speed_hz); | ||
400 | if (real_speed != xfer->speed_hz) | ||
401 | dev_dbg(&spi->dev, | ||
402 | "using speed %d instead of %d\n", | ||
403 | real_speed, xfer->speed_hz); | ||
404 | } | ||
405 | } | ||
406 | msg->status = -EINPROGRESS; | ||
407 | msg->actual_length = 0; | ||
408 | |||
409 | spin_lock_irqsave(&mcfqspi->lock, flags); | ||
410 | list_add_tail(&msg->queue, &mcfqspi->msgq); | ||
411 | queue_work(mcfqspi->workq, &mcfqspi->work); | ||
412 | spin_unlock_irqrestore(&mcfqspi->lock, flags); | ||
413 | |||
414 | return 0; | ||
415 | fail: | ||
416 | msg->status = -EINVAL; | ||
417 | return -EINVAL; | ||
418 | } | ||
419 | |||
420 | static int mcfqspi_setup(struct spi_device *spi) | ||
421 | { | ||
422 | if ((spi->bits_per_word < 8) || (spi->bits_per_word > 16)) { | ||
423 | dev_dbg(&spi->dev, "%d bits per word is not supported\n", | ||
424 | spi->bits_per_word); | ||
425 | return -EINVAL; | ||
426 | } | ||
427 | if (spi->chip_select >= spi->master->num_chipselect) { | ||
428 | dev_dbg(&spi->dev, "%d chip select is out of range\n", | ||
429 | spi->chip_select); | ||
430 | return -EINVAL; | ||
431 | } | ||
432 | |||
433 | mcfqspi_cs_deselect(spi_master_get_devdata(spi->master), | ||
434 | spi->chip_select, spi->mode & SPI_CS_HIGH); | ||
435 | |||
436 | dev_dbg(&spi->dev, | ||
437 | "bits per word %d, chip select %d, speed %d KHz\n", | ||
438 | spi->bits_per_word, spi->chip_select, | ||
439 | (MCFQSPI_BUSCLK / mcfqspi_qmr_baud(spi->max_speed_hz)) | ||
440 | / 1000); | ||
441 | |||
442 | return 0; | ||
443 | } | ||
444 | |||
445 | static int __devinit mcfqspi_probe(struct platform_device *pdev) | ||
446 | { | ||
447 | struct spi_master *master; | ||
448 | struct mcfqspi *mcfqspi; | ||
449 | struct resource *res; | ||
450 | struct mcfqspi_platform_data *pdata; | ||
451 | int status; | ||
452 | |||
453 | master = spi_alloc_master(&pdev->dev, sizeof(*mcfqspi)); | ||
454 | if (master == NULL) { | ||
455 | dev_dbg(&pdev->dev, "spi_alloc_master failed\n"); | ||
456 | return -ENOMEM; | ||
457 | } | ||
458 | |||
459 | mcfqspi = spi_master_get_devdata(master); | ||
460 | |||
461 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
462 | if (!res) { | ||
463 | dev_dbg(&pdev->dev, "platform_get_resource failed\n"); | ||
464 | status = -ENXIO; | ||
465 | goto fail0; | ||
466 | } | ||
467 | |||
468 | if (!request_mem_region(res->start, resource_size(res), pdev->name)) { | ||
469 | dev_dbg(&pdev->dev, "request_mem_region failed\n"); | ||
470 | status = -EBUSY; | ||
471 | goto fail0; | ||
472 | } | ||
473 | |||
474 | mcfqspi->iobase = ioremap(res->start, resource_size(res)); | ||
475 | if (!mcfqspi->iobase) { | ||
476 | dev_dbg(&pdev->dev, "ioremap failed\n"); | ||
477 | status = -ENOMEM; | ||
478 | goto fail1; | ||
479 | } | ||
480 | |||
481 | mcfqspi->irq = platform_get_irq(pdev, 0); | ||
482 | if (mcfqspi->irq < 0) { | ||
483 | dev_dbg(&pdev->dev, "platform_get_irq failed\n"); | ||
484 | status = -ENXIO; | ||
485 | goto fail2; | ||
486 | } | ||
487 | |||
488 | status = request_irq(mcfqspi->irq, mcfqspi_irq_handler, IRQF_DISABLED, | ||
489 | pdev->name, mcfqspi); | ||
490 | if (status) { | ||
491 | dev_dbg(&pdev->dev, "request_irq failed\n"); | ||
492 | goto fail2; | ||
493 | } | ||
494 | |||
495 | mcfqspi->clk = clk_get(&pdev->dev, "qspi_clk"); | ||
496 | if (IS_ERR(mcfqspi->clk)) { | ||
497 | dev_dbg(&pdev->dev, "clk_get failed\n"); | ||
498 | status = PTR_ERR(mcfqspi->clk); | ||
499 | goto fail3; | ||
500 | } | ||
501 | clk_enable(mcfqspi->clk); | ||
502 | |||
503 | mcfqspi->workq = create_singlethread_workqueue(dev_name(master->dev.parent)); | ||
504 | if (!mcfqspi->workq) { | ||
505 | dev_dbg(&pdev->dev, "create_workqueue failed\n"); | ||
506 | status = -ENOMEM; | ||
507 | goto fail4; | ||
508 | } | ||
509 | INIT_WORK(&mcfqspi->work, mcfqspi_work); | ||
510 | spin_lock_init(&mcfqspi->lock); | ||
511 | INIT_LIST_HEAD(&mcfqspi->msgq); | ||
512 | init_waitqueue_head(&mcfqspi->waitq); | ||
513 | |||
514 | pdata = pdev->dev.platform_data; | ||
515 | if (!pdata) { | ||
516 | dev_dbg(&pdev->dev, "platform data is missing\n"); | ||
517 | goto fail5; | ||
518 | } | ||
519 | master->bus_num = pdata->bus_num; | ||
520 | master->num_chipselect = pdata->num_chipselect; | ||
521 | |||
522 | mcfqspi->cs_control = pdata->cs_control; | ||
523 | status = mcfqspi_cs_setup(mcfqspi); | ||
524 | if (status) { | ||
525 | dev_dbg(&pdev->dev, "error initializing cs_control\n"); | ||
526 | goto fail5; | ||
527 | } | ||
528 | |||
529 | master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA; | ||
530 | master->setup = mcfqspi_setup; | ||
531 | master->transfer = mcfqspi_transfer; | ||
532 | |||
533 | platform_set_drvdata(pdev, master); | ||
534 | |||
535 | status = spi_register_master(master); | ||
536 | if (status) { | ||
537 | dev_dbg(&pdev->dev, "spi_register_master failed\n"); | ||
538 | goto fail6; | ||
539 | } | ||
540 | dev_info(&pdev->dev, "Coldfire QSPI bus driver\n"); | ||
541 | |||
542 | return 0; | ||
543 | |||
544 | fail6: | ||
545 | mcfqspi_cs_teardown(mcfqspi); | ||
546 | fail5: | ||
547 | destroy_workqueue(mcfqspi->workq); | ||
548 | fail4: | ||
549 | clk_disable(mcfqspi->clk); | ||
550 | clk_put(mcfqspi->clk); | ||
551 | fail3: | ||
552 | free_irq(mcfqspi->irq, mcfqspi); | ||
553 | fail2: | ||
554 | iounmap(mcfqspi->iobase); | ||
555 | fail1: | ||
556 | release_mem_region(res->start, resource_size(res)); | ||
557 | fail0: | ||
558 | spi_master_put(master); | ||
559 | |||
560 | dev_dbg(&pdev->dev, "Coldfire QSPI probe failed\n"); | ||
561 | |||
562 | return status; | ||
563 | } | ||
564 | |||
565 | static int __devexit mcfqspi_remove(struct platform_device *pdev) | ||
566 | { | ||
567 | struct spi_master *master = platform_get_drvdata(pdev); | ||
568 | struct mcfqspi *mcfqspi = spi_master_get_devdata(master); | ||
569 | struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
570 | |||
571 | /* disable the hardware (set the baud rate to 0) */ | ||
572 | mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR); | ||
573 | |||
574 | platform_set_drvdata(pdev, NULL); | ||
575 | mcfqspi_cs_teardown(mcfqspi); | ||
576 | destroy_workqueue(mcfqspi->workq); | ||
577 | clk_disable(mcfqspi->clk); | ||
578 | clk_put(mcfqspi->clk); | ||
579 | free_irq(mcfqspi->irq, mcfqspi); | ||
580 | iounmap(mcfqspi->iobase); | ||
581 | release_mem_region(res->start, resource_size(res)); | ||
582 | spi_unregister_master(master); | ||
583 | spi_master_put(master); | ||
584 | |||
585 | return 0; | ||
586 | } | ||
587 | |||
588 | #ifdef CONFIG_PM | ||
589 | |||
590 | static int mcfqspi_suspend(struct device *dev) | ||
591 | { | ||
592 | struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev)); | ||
593 | |||
594 | clk_disable(mcfqspi->clk); | ||
595 | |||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | static int mcfqspi_resume(struct device *dev) | ||
600 | { | ||
601 | struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev)); | ||
602 | |||
603 | clk_enable(mcfqspi->clk); | ||
604 | |||
605 | return 0; | ||
606 | } | ||
607 | |||
608 | static struct dev_pm_ops mcfqspi_dev_pm_ops = { | ||
609 | .suspend = mcfqspi_suspend, | ||
610 | .resume = mcfqspi_resume, | ||
611 | }; | ||
612 | |||
613 | #define MCFQSPI_DEV_PM_OPS (&mcfqspi_dev_pm_ops) | ||
614 | #else | ||
615 | #define MCFQSPI_DEV_PM_OPS NULL | ||
616 | #endif | ||
617 | |||
618 | static struct platform_driver mcfqspi_driver = { | ||
619 | .driver.name = DRIVER_NAME, | ||
620 | .driver.owner = THIS_MODULE, | ||
621 | .driver.pm = MCFQSPI_DEV_PM_OPS, | ||
622 | .remove = __devexit_p(mcfqspi_remove), | ||
623 | }; | ||
624 | |||
625 | static int __init mcfqspi_init(void) | ||
626 | { | ||
627 | return platform_driver_probe(&mcfqspi_driver, mcfqspi_probe); | ||
628 | } | ||
629 | module_init(mcfqspi_init); | ||
630 | |||
631 | static void __exit mcfqspi_exit(void) | ||
632 | { | ||
633 | platform_driver_unregister(&mcfqspi_driver); | ||
634 | } | ||
635 | module_exit(mcfqspi_exit); | ||
636 | |||
637 | MODULE_AUTHOR("Steven King <sfking@fdwdc.com>"); | ||
638 | MODULE_DESCRIPTION("Coldfire QSPI Controller Driver"); | ||
639 | MODULE_LICENSE("GPL"); | ||
640 | MODULE_ALIAS("platform:" DRIVER_NAME); | ||
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c new file mode 100644 index 000000000000..225ab60b02c4 --- /dev/null +++ b/drivers/spi/davinci_spi.c | |||
@@ -0,0 +1,1255 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009 Texas Instruments. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/gpio.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <linux/err.h> | ||
26 | #include <linux/clk.h> | ||
27 | #include <linux/dma-mapping.h> | ||
28 | #include <linux/spi/spi.h> | ||
29 | #include <linux/spi/spi_bitbang.h> | ||
30 | |||
31 | #include <mach/spi.h> | ||
32 | #include <mach/edma.h> | ||
33 | |||
34 | #define SPI_NO_RESOURCE ((resource_size_t)-1) | ||
35 | |||
36 | #define SPI_MAX_CHIPSELECT 2 | ||
37 | |||
38 | #define CS_DEFAULT 0xFF | ||
39 | |||
40 | #define SPI_BUFSIZ (SMP_CACHE_BYTES + 1) | ||
41 | #define DAVINCI_DMA_DATA_TYPE_S8 0x01 | ||
42 | #define DAVINCI_DMA_DATA_TYPE_S16 0x02 | ||
43 | #define DAVINCI_DMA_DATA_TYPE_S32 0x04 | ||
44 | |||
45 | #define SPIFMT_PHASE_MASK BIT(16) | ||
46 | #define SPIFMT_POLARITY_MASK BIT(17) | ||
47 | #define SPIFMT_DISTIMER_MASK BIT(18) | ||
48 | #define SPIFMT_SHIFTDIR_MASK BIT(20) | ||
49 | #define SPIFMT_WAITENA_MASK BIT(21) | ||
50 | #define SPIFMT_PARITYENA_MASK BIT(22) | ||
51 | #define SPIFMT_ODD_PARITY_MASK BIT(23) | ||
52 | #define SPIFMT_WDELAY_MASK 0x3f000000u | ||
53 | #define SPIFMT_WDELAY_SHIFT 24 | ||
54 | #define SPIFMT_CHARLEN_MASK 0x0000001Fu | ||
55 | |||
56 | /* SPIGCR1 */ | ||
57 | #define SPIGCR1_SPIENA_MASK 0x01000000u | ||
58 | |||
59 | /* SPIPC0 */ | ||
60 | #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */ | ||
61 | #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */ | ||
62 | #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */ | ||
63 | #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */ | ||
64 | #define SPIPC0_EN1FUN_MASK BIT(1) | ||
65 | #define SPIPC0_EN0FUN_MASK BIT(0) | ||
66 | |||
67 | #define SPIINT_MASKALL 0x0101035F | ||
68 | #define SPI_INTLVL_1 0x000001FFu | ||
69 | #define SPI_INTLVL_0 0x00000000u | ||
70 | |||
71 | /* SPIDAT1 */ | ||
72 | #define SPIDAT1_CSHOLD_SHIFT 28 | ||
73 | #define SPIDAT1_CSNR_SHIFT 16 | ||
74 | #define SPIGCR1_CLKMOD_MASK BIT(1) | ||
75 | #define SPIGCR1_MASTER_MASK BIT(0) | ||
76 | #define SPIGCR1_LOOPBACK_MASK BIT(16) | ||
77 | |||
78 | /* SPIBUF */ | ||
79 | #define SPIBUF_TXFULL_MASK BIT(29) | ||
80 | #define SPIBUF_RXEMPTY_MASK BIT(31) | ||
81 | |||
82 | /* Error Masks */ | ||
83 | #define SPIFLG_DLEN_ERR_MASK BIT(0) | ||
84 | #define SPIFLG_TIMEOUT_MASK BIT(1) | ||
85 | #define SPIFLG_PARERR_MASK BIT(2) | ||
86 | #define SPIFLG_DESYNC_MASK BIT(3) | ||
87 | #define SPIFLG_BITERR_MASK BIT(4) | ||
88 | #define SPIFLG_OVRRUN_MASK BIT(6) | ||
89 | #define SPIFLG_RX_INTR_MASK BIT(8) | ||
90 | #define SPIFLG_TX_INTR_MASK BIT(9) | ||
91 | #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24) | ||
92 | #define SPIFLG_MASK (SPIFLG_DLEN_ERR_MASK \ | ||
93 | | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \ | ||
94 | | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \ | ||
95 | | SPIFLG_OVRRUN_MASK | SPIFLG_RX_INTR_MASK \ | ||
96 | | SPIFLG_TX_INTR_MASK \ | ||
97 | | SPIFLG_BUF_INIT_ACTIVE_MASK) | ||
98 | |||
99 | #define SPIINT_DLEN_ERR_INTR BIT(0) | ||
100 | #define SPIINT_TIMEOUT_INTR BIT(1) | ||
101 | #define SPIINT_PARERR_INTR BIT(2) | ||
102 | #define SPIINT_DESYNC_INTR BIT(3) | ||
103 | #define SPIINT_BITERR_INTR BIT(4) | ||
104 | #define SPIINT_OVRRUN_INTR BIT(6) | ||
105 | #define SPIINT_RX_INTR BIT(8) | ||
106 | #define SPIINT_TX_INTR BIT(9) | ||
107 | #define SPIINT_DMA_REQ_EN BIT(16) | ||
108 | #define SPIINT_ENABLE_HIGHZ BIT(24) | ||
109 | |||
110 | #define SPI_T2CDELAY_SHIFT 16 | ||
111 | #define SPI_C2TDELAY_SHIFT 24 | ||
112 | |||
113 | /* SPI Controller registers */ | ||
114 | #define SPIGCR0 0x00 | ||
115 | #define SPIGCR1 0x04 | ||
116 | #define SPIINT 0x08 | ||
117 | #define SPILVL 0x0c | ||
118 | #define SPIFLG 0x10 | ||
119 | #define SPIPC0 0x14 | ||
120 | #define SPIPC1 0x18 | ||
121 | #define SPIPC2 0x1c | ||
122 | #define SPIPC3 0x20 | ||
123 | #define SPIPC4 0x24 | ||
124 | #define SPIPC5 0x28 | ||
125 | #define SPIPC6 0x2c | ||
126 | #define SPIPC7 0x30 | ||
127 | #define SPIPC8 0x34 | ||
128 | #define SPIDAT0 0x38 | ||
129 | #define SPIDAT1 0x3c | ||
130 | #define SPIBUF 0x40 | ||
131 | #define SPIEMU 0x44 | ||
132 | #define SPIDELAY 0x48 | ||
133 | #define SPIDEF 0x4c | ||
134 | #define SPIFMT0 0x50 | ||
135 | #define SPIFMT1 0x54 | ||
136 | #define SPIFMT2 0x58 | ||
137 | #define SPIFMT3 0x5c | ||
138 | #define TGINTVEC0 0x60 | ||
139 | #define TGINTVEC1 0x64 | ||
140 | |||
141 | struct davinci_spi_slave { | ||
142 | u32 cmd_to_write; | ||
143 | u32 clk_ctrl_to_write; | ||
144 | u32 bytes_per_word; | ||
145 | u8 active_cs; | ||
146 | }; | ||
147 | |||
148 | /* We have 2 DMA channels per CS, one for RX and one for TX */ | ||
149 | struct davinci_spi_dma { | ||
150 | int dma_tx_channel; | ||
151 | int dma_rx_channel; | ||
152 | int dma_tx_sync_dev; | ||
153 | int dma_rx_sync_dev; | ||
154 | enum dma_event_q eventq; | ||
155 | |||
156 | struct completion dma_tx_completion; | ||
157 | struct completion dma_rx_completion; | ||
158 | }; | ||
159 | |||
160 | /* SPI Controller driver's private data. */ | ||
161 | struct davinci_spi { | ||
162 | struct spi_bitbang bitbang; | ||
163 | struct clk *clk; | ||
164 | |||
165 | u8 version; | ||
166 | resource_size_t pbase; | ||
167 | void __iomem *base; | ||
168 | size_t region_size; | ||
169 | u32 irq; | ||
170 | struct completion done; | ||
171 | |||
172 | const void *tx; | ||
173 | void *rx; | ||
174 | u8 *tmp_buf; | ||
175 | int count; | ||
176 | struct davinci_spi_dma *dma_channels; | ||
177 | struct davinci_spi_platform_data *pdata; | ||
178 | |||
179 | void (*get_rx)(u32 rx_data, struct davinci_spi *); | ||
180 | u32 (*get_tx)(struct davinci_spi *); | ||
181 | |||
182 | struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT]; | ||
183 | }; | ||
184 | |||
185 | static unsigned use_dma; | ||
186 | |||
187 | static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi) | ||
188 | { | ||
189 | u8 *rx = davinci_spi->rx; | ||
190 | |||
191 | *rx++ = (u8)data; | ||
192 | davinci_spi->rx = rx; | ||
193 | } | ||
194 | |||
195 | static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi) | ||
196 | { | ||
197 | u16 *rx = davinci_spi->rx; | ||
198 | |||
199 | *rx++ = (u16)data; | ||
200 | davinci_spi->rx = rx; | ||
201 | } | ||
202 | |||
203 | static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi) | ||
204 | { | ||
205 | u32 data; | ||
206 | const u8 *tx = davinci_spi->tx; | ||
207 | |||
208 | data = *tx++; | ||
209 | davinci_spi->tx = tx; | ||
210 | return data; | ||
211 | } | ||
212 | |||
213 | static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi) | ||
214 | { | ||
215 | u32 data; | ||
216 | const u16 *tx = davinci_spi->tx; | ||
217 | |||
218 | data = *tx++; | ||
219 | davinci_spi->tx = tx; | ||
220 | return data; | ||
221 | } | ||
222 | |||
223 | static inline void set_io_bits(void __iomem *addr, u32 bits) | ||
224 | { | ||
225 | u32 v = ioread32(addr); | ||
226 | |||
227 | v |= bits; | ||
228 | iowrite32(v, addr); | ||
229 | } | ||
230 | |||
231 | static inline void clear_io_bits(void __iomem *addr, u32 bits) | ||
232 | { | ||
233 | u32 v = ioread32(addr); | ||
234 | |||
235 | v &= ~bits; | ||
236 | iowrite32(v, addr); | ||
237 | } | ||
238 | |||
239 | static inline void set_fmt_bits(void __iomem *addr, u32 bits, int cs_num) | ||
240 | { | ||
241 | set_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits); | ||
242 | } | ||
243 | |||
244 | static inline void clear_fmt_bits(void __iomem *addr, u32 bits, int cs_num) | ||
245 | { | ||
246 | clear_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits); | ||
247 | } | ||
248 | |||
249 | static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable) | ||
250 | { | ||
251 | struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master); | ||
252 | |||
253 | if (enable) | ||
254 | set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN); | ||
255 | else | ||
256 | clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN); | ||
257 | } | ||
258 | |||
259 | /* | ||
260 | * Interface to control the chip select signal | ||
261 | */ | ||
262 | static void davinci_spi_chipselect(struct spi_device *spi, int value) | ||
263 | { | ||
264 | struct davinci_spi *davinci_spi; | ||
265 | struct davinci_spi_platform_data *pdata; | ||
266 | u32 data1_reg_val = 0; | ||
267 | |||
268 | davinci_spi = spi_master_get_devdata(spi->master); | ||
269 | pdata = davinci_spi->pdata; | ||
270 | |||
271 | /* | ||
272 | * Board specific chip select logic decides the polarity and cs | ||
273 | * line for the controller | ||
274 | */ | ||
275 | if (value == BITBANG_CS_INACTIVE) { | ||
276 | set_io_bits(davinci_spi->base + SPIDEF, CS_DEFAULT); | ||
277 | |||
278 | data1_reg_val |= CS_DEFAULT << SPIDAT1_CSNR_SHIFT; | ||
279 | iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); | ||
280 | |||
281 | while ((ioread32(davinci_spi->base + SPIBUF) | ||
282 | & SPIBUF_RXEMPTY_MASK) == 0) | ||
283 | cpu_relax(); | ||
284 | } | ||
285 | } | ||
286 | |||
287 | /** | ||
288 | * davinci_spi_setup_transfer - This functions will determine transfer method | ||
289 | * @spi: spi device on which data transfer to be done | ||
290 | * @t: spi transfer in which transfer info is filled | ||
291 | * | ||
292 | * This function determines data transfer method (8/16/32 bit transfer). | ||
293 | * It will also set the SPI Clock Control register according to | ||
294 | * SPI slave device freq. | ||
295 | */ | ||
296 | static int davinci_spi_setup_transfer(struct spi_device *spi, | ||
297 | struct spi_transfer *t) | ||
298 | { | ||
299 | |||
300 | struct davinci_spi *davinci_spi; | ||
301 | struct davinci_spi_platform_data *pdata; | ||
302 | u8 bits_per_word = 0; | ||
303 | u32 hz = 0, prescale; | ||
304 | |||
305 | davinci_spi = spi_master_get_devdata(spi->master); | ||
306 | pdata = davinci_spi->pdata; | ||
307 | |||
308 | if (t) { | ||
309 | bits_per_word = t->bits_per_word; | ||
310 | hz = t->speed_hz; | ||
311 | } | ||
312 | |||
313 | /* if bits_per_word is not set then set it default */ | ||
314 | if (!bits_per_word) | ||
315 | bits_per_word = spi->bits_per_word; | ||
316 | |||
317 | /* | ||
318 | * Assign function pointer to appropriate transfer method | ||
319 | * 8bit, 16bit or 32bit transfer | ||
320 | */ | ||
321 | if (bits_per_word <= 8 && bits_per_word >= 2) { | ||
322 | davinci_spi->get_rx = davinci_spi_rx_buf_u8; | ||
323 | davinci_spi->get_tx = davinci_spi_tx_buf_u8; | ||
324 | davinci_spi->slave[spi->chip_select].bytes_per_word = 1; | ||
325 | } else if (bits_per_word <= 16 && bits_per_word >= 2) { | ||
326 | davinci_spi->get_rx = davinci_spi_rx_buf_u16; | ||
327 | davinci_spi->get_tx = davinci_spi_tx_buf_u16; | ||
328 | davinci_spi->slave[spi->chip_select].bytes_per_word = 2; | ||
329 | } else | ||
330 | return -EINVAL; | ||
331 | |||
332 | if (!hz) | ||
333 | hz = spi->max_speed_hz; | ||
334 | |||
335 | clear_fmt_bits(davinci_spi->base, SPIFMT_CHARLEN_MASK, | ||
336 | spi->chip_select); | ||
337 | set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f, | ||
338 | spi->chip_select); | ||
339 | |||
340 | prescale = ((clk_get_rate(davinci_spi->clk) / hz) - 1) & 0xff; | ||
341 | |||
342 | clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select); | ||
343 | set_fmt_bits(davinci_spi->base, prescale << 8, spi->chip_select); | ||
344 | |||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data) | ||
349 | { | ||
350 | struct spi_device *spi = (struct spi_device *)data; | ||
351 | struct davinci_spi *davinci_spi; | ||
352 | struct davinci_spi_dma *davinci_spi_dma; | ||
353 | struct davinci_spi_platform_data *pdata; | ||
354 | |||
355 | davinci_spi = spi_master_get_devdata(spi->master); | ||
356 | davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); | ||
357 | pdata = davinci_spi->pdata; | ||
358 | |||
359 | if (ch_status == DMA_COMPLETE) | ||
360 | edma_stop(davinci_spi_dma->dma_rx_channel); | ||
361 | else | ||
362 | edma_clean_channel(davinci_spi_dma->dma_rx_channel); | ||
363 | |||
364 | complete(&davinci_spi_dma->dma_rx_completion); | ||
365 | /* We must disable the DMA RX request */ | ||
366 | davinci_spi_set_dma_req(spi, 0); | ||
367 | } | ||
368 | |||
369 | static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data) | ||
370 | { | ||
371 | struct spi_device *spi = (struct spi_device *)data; | ||
372 | struct davinci_spi *davinci_spi; | ||
373 | struct davinci_spi_dma *davinci_spi_dma; | ||
374 | struct davinci_spi_platform_data *pdata; | ||
375 | |||
376 | davinci_spi = spi_master_get_devdata(spi->master); | ||
377 | davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); | ||
378 | pdata = davinci_spi->pdata; | ||
379 | |||
380 | if (ch_status == DMA_COMPLETE) | ||
381 | edma_stop(davinci_spi_dma->dma_tx_channel); | ||
382 | else | ||
383 | edma_clean_channel(davinci_spi_dma->dma_tx_channel); | ||
384 | |||
385 | complete(&davinci_spi_dma->dma_tx_completion); | ||
386 | /* We must disable the DMA TX request */ | ||
387 | davinci_spi_set_dma_req(spi, 0); | ||
388 | } | ||
389 | |||
390 | static int davinci_spi_request_dma(struct spi_device *spi) | ||
391 | { | ||
392 | struct davinci_spi *davinci_spi; | ||
393 | struct davinci_spi_dma *davinci_spi_dma; | ||
394 | struct davinci_spi_platform_data *pdata; | ||
395 | struct device *sdev; | ||
396 | int r; | ||
397 | |||
398 | davinci_spi = spi_master_get_devdata(spi->master); | ||
399 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | ||
400 | pdata = davinci_spi->pdata; | ||
401 | sdev = davinci_spi->bitbang.master->dev.parent; | ||
402 | |||
403 | r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev, | ||
404 | davinci_spi_dma_rx_callback, spi, | ||
405 | davinci_spi_dma->eventq); | ||
406 | if (r < 0) { | ||
407 | dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n"); | ||
408 | return -EAGAIN; | ||
409 | } | ||
410 | davinci_spi_dma->dma_rx_channel = r; | ||
411 | r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev, | ||
412 | davinci_spi_dma_tx_callback, spi, | ||
413 | davinci_spi_dma->eventq); | ||
414 | if (r < 0) { | ||
415 | edma_free_channel(davinci_spi_dma->dma_rx_channel); | ||
416 | davinci_spi_dma->dma_rx_channel = -1; | ||
417 | dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n"); | ||
418 | return -EAGAIN; | ||
419 | } | ||
420 | davinci_spi_dma->dma_tx_channel = r; | ||
421 | |||
422 | return 0; | ||
423 | } | ||
424 | |||
425 | /** | ||
426 | * davinci_spi_setup - This functions will set default transfer method | ||
427 | * @spi: spi device on which data transfer to be done | ||
428 | * | ||
429 | * This functions sets the default transfer method. | ||
430 | */ | ||
431 | |||
432 | static int davinci_spi_setup(struct spi_device *spi) | ||
433 | { | ||
434 | int retval; | ||
435 | struct davinci_spi *davinci_spi; | ||
436 | struct davinci_spi_dma *davinci_spi_dma; | ||
437 | struct device *sdev; | ||
438 | |||
439 | davinci_spi = spi_master_get_devdata(spi->master); | ||
440 | sdev = davinci_spi->bitbang.master->dev.parent; | ||
441 | |||
442 | /* if bits per word length is zero then set it default 8 */ | ||
443 | if (!spi->bits_per_word) | ||
444 | spi->bits_per_word = 8; | ||
445 | |||
446 | davinci_spi->slave[spi->chip_select].cmd_to_write = 0; | ||
447 | |||
448 | if (use_dma && davinci_spi->dma_channels) { | ||
449 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | ||
450 | |||
451 | if ((davinci_spi_dma->dma_rx_channel == -1) | ||
452 | || (davinci_spi_dma->dma_tx_channel == -1)) { | ||
453 | retval = davinci_spi_request_dma(spi); | ||
454 | if (retval < 0) | ||
455 | return retval; | ||
456 | } | ||
457 | } | ||
458 | |||
459 | /* | ||
460 | * SPI in DaVinci and DA8xx operate between | ||
461 | * 600 KHz and 50 MHz | ||
462 | */ | ||
463 | if (spi->max_speed_hz < 600000 || spi->max_speed_hz > 50000000) { | ||
464 | dev_dbg(sdev, "Operating frequency is not in acceptable " | ||
465 | "range\n"); | ||
466 | return -EINVAL; | ||
467 | } | ||
468 | |||
469 | /* | ||
470 | * Set up SPIFMTn register, unique to this chipselect. | ||
471 | * | ||
472 | * NOTE: we could do all of these with one write. Also, some | ||
473 | * of the "version 2" features are found in chips that don't | ||
474 | * support all of them... | ||
475 | */ | ||
476 | if (spi->mode & SPI_LSB_FIRST) | ||
477 | set_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK, | ||
478 | spi->chip_select); | ||
479 | else | ||
480 | clear_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK, | ||
481 | spi->chip_select); | ||
482 | |||
483 | if (spi->mode & SPI_CPOL) | ||
484 | set_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK, | ||
485 | spi->chip_select); | ||
486 | else | ||
487 | clear_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK, | ||
488 | spi->chip_select); | ||
489 | |||
490 | if (!(spi->mode & SPI_CPHA)) | ||
491 | set_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK, | ||
492 | spi->chip_select); | ||
493 | else | ||
494 | clear_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK, | ||
495 | spi->chip_select); | ||
496 | |||
497 | /* | ||
498 | * Version 1 hardware supports two basic SPI modes: | ||
499 | * - Standard SPI mode uses 4 pins, with chipselect | ||
500 | * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS) | ||
501 | * (distinct from SPI_3WIRE, with just one data wire; | ||
502 | * or similar variants without MOSI or without MISO) | ||
503 | * | ||
504 | * Version 2 hardware supports an optional handshaking signal, | ||
505 | * so it can support two more modes: | ||
506 | * - 5 pin SPI variant is standard SPI plus SPI_READY | ||
507 | * - 4 pin with enable is (SPI_READY | SPI_NO_CS) | ||
508 | */ | ||
509 | |||
510 | if (davinci_spi->version == SPI_VERSION_2) { | ||
511 | clear_fmt_bits(davinci_spi->base, SPIFMT_WDELAY_MASK, | ||
512 | spi->chip_select); | ||
513 | set_fmt_bits(davinci_spi->base, | ||
514 | (davinci_spi->pdata->wdelay | ||
515 | << SPIFMT_WDELAY_SHIFT) | ||
516 | & SPIFMT_WDELAY_MASK, | ||
517 | spi->chip_select); | ||
518 | |||
519 | if (davinci_spi->pdata->odd_parity) | ||
520 | set_fmt_bits(davinci_spi->base, | ||
521 | SPIFMT_ODD_PARITY_MASK, | ||
522 | spi->chip_select); | ||
523 | else | ||
524 | clear_fmt_bits(davinci_spi->base, | ||
525 | SPIFMT_ODD_PARITY_MASK, | ||
526 | spi->chip_select); | ||
527 | |||
528 | if (davinci_spi->pdata->parity_enable) | ||
529 | set_fmt_bits(davinci_spi->base, | ||
530 | SPIFMT_PARITYENA_MASK, | ||
531 | spi->chip_select); | ||
532 | else | ||
533 | clear_fmt_bits(davinci_spi->base, | ||
534 | SPIFMT_PARITYENA_MASK, | ||
535 | spi->chip_select); | ||
536 | |||
537 | if (davinci_spi->pdata->wait_enable) | ||
538 | set_fmt_bits(davinci_spi->base, | ||
539 | SPIFMT_WAITENA_MASK, | ||
540 | spi->chip_select); | ||
541 | else | ||
542 | clear_fmt_bits(davinci_spi->base, | ||
543 | SPIFMT_WAITENA_MASK, | ||
544 | spi->chip_select); | ||
545 | |||
546 | if (davinci_spi->pdata->timer_disable) | ||
547 | set_fmt_bits(davinci_spi->base, | ||
548 | SPIFMT_DISTIMER_MASK, | ||
549 | spi->chip_select); | ||
550 | else | ||
551 | clear_fmt_bits(davinci_spi->base, | ||
552 | SPIFMT_DISTIMER_MASK, | ||
553 | spi->chip_select); | ||
554 | } | ||
555 | |||
556 | retval = davinci_spi_setup_transfer(spi, NULL); | ||
557 | |||
558 | return retval; | ||
559 | } | ||
560 | |||
561 | static void davinci_spi_cleanup(struct spi_device *spi) | ||
562 | { | ||
563 | struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master); | ||
564 | struct davinci_spi_dma *davinci_spi_dma; | ||
565 | |||
566 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | ||
567 | |||
568 | if (use_dma && davinci_spi->dma_channels) { | ||
569 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | ||
570 | |||
571 | if ((davinci_spi_dma->dma_rx_channel != -1) | ||
572 | && (davinci_spi_dma->dma_tx_channel != -1)) { | ||
573 | edma_free_channel(davinci_spi_dma->dma_tx_channel); | ||
574 | edma_free_channel(davinci_spi_dma->dma_rx_channel); | ||
575 | } | ||
576 | } | ||
577 | } | ||
578 | |||
579 | static int davinci_spi_bufs_prep(struct spi_device *spi, | ||
580 | struct davinci_spi *davinci_spi) | ||
581 | { | ||
582 | int op_mode = 0; | ||
583 | |||
584 | /* | ||
585 | * REVISIT unless devices disagree about SPI_LOOP or | ||
586 | * SPI_READY (SPI_NO_CS only allows one device!), this | ||
587 | * should not need to be done before each message... | ||
588 | * optimize for both flags staying cleared. | ||
589 | */ | ||
590 | |||
591 | op_mode = SPIPC0_DIFUN_MASK | ||
592 | | SPIPC0_DOFUN_MASK | ||
593 | | SPIPC0_CLKFUN_MASK; | ||
594 | if (!(spi->mode & SPI_NO_CS)) | ||
595 | op_mode |= 1 << spi->chip_select; | ||
596 | if (spi->mode & SPI_READY) | ||
597 | op_mode |= SPIPC0_SPIENA_MASK; | ||
598 | |||
599 | iowrite32(op_mode, davinci_spi->base + SPIPC0); | ||
600 | |||
601 | if (spi->mode & SPI_LOOP) | ||
602 | set_io_bits(davinci_spi->base + SPIGCR1, | ||
603 | SPIGCR1_LOOPBACK_MASK); | ||
604 | else | ||
605 | clear_io_bits(davinci_spi->base + SPIGCR1, | ||
606 | SPIGCR1_LOOPBACK_MASK); | ||
607 | |||
608 | return 0; | ||
609 | } | ||
610 | |||
611 | static int davinci_spi_check_error(struct davinci_spi *davinci_spi, | ||
612 | int int_status) | ||
613 | { | ||
614 | struct device *sdev = davinci_spi->bitbang.master->dev.parent; | ||
615 | |||
616 | if (int_status & SPIFLG_TIMEOUT_MASK) { | ||
617 | dev_dbg(sdev, "SPI Time-out Error\n"); | ||
618 | return -ETIMEDOUT; | ||
619 | } | ||
620 | if (int_status & SPIFLG_DESYNC_MASK) { | ||
621 | dev_dbg(sdev, "SPI Desynchronization Error\n"); | ||
622 | return -EIO; | ||
623 | } | ||
624 | if (int_status & SPIFLG_BITERR_MASK) { | ||
625 | dev_dbg(sdev, "SPI Bit error\n"); | ||
626 | return -EIO; | ||
627 | } | ||
628 | |||
629 | if (davinci_spi->version == SPI_VERSION_2) { | ||
630 | if (int_status & SPIFLG_DLEN_ERR_MASK) { | ||
631 | dev_dbg(sdev, "SPI Data Length Error\n"); | ||
632 | return -EIO; | ||
633 | } | ||
634 | if (int_status & SPIFLG_PARERR_MASK) { | ||
635 | dev_dbg(sdev, "SPI Parity Error\n"); | ||
636 | return -EIO; | ||
637 | } | ||
638 | if (int_status & SPIFLG_OVRRUN_MASK) { | ||
639 | dev_dbg(sdev, "SPI Data Overrun error\n"); | ||
640 | return -EIO; | ||
641 | } | ||
642 | if (int_status & SPIFLG_TX_INTR_MASK) { | ||
643 | dev_dbg(sdev, "SPI TX intr bit set\n"); | ||
644 | return -EIO; | ||
645 | } | ||
646 | if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) { | ||
647 | dev_dbg(sdev, "SPI Buffer Init Active\n"); | ||
648 | return -EBUSY; | ||
649 | } | ||
650 | } | ||
651 | |||
652 | return 0; | ||
653 | } | ||
654 | |||
655 | /** | ||
656 | * davinci_spi_bufs - functions which will handle transfer data | ||
657 | * @spi: spi device on which data transfer to be done | ||
658 | * @t: spi transfer in which transfer info is filled | ||
659 | * | ||
660 | * This function will put data to be transferred into data register | ||
661 | * of SPI controller and then wait until the completion will be marked | ||
662 | * by the IRQ Handler. | ||
663 | */ | ||
664 | static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t) | ||
665 | { | ||
666 | struct davinci_spi *davinci_spi; | ||
667 | int int_status, count, ret; | ||
668 | u8 conv, tmp; | ||
669 | u32 tx_data, data1_reg_val; | ||
670 | u32 buf_val, flg_val; | ||
671 | struct davinci_spi_platform_data *pdata; | ||
672 | |||
673 | davinci_spi = spi_master_get_devdata(spi->master); | ||
674 | pdata = davinci_spi->pdata; | ||
675 | |||
676 | davinci_spi->tx = t->tx_buf; | ||
677 | davinci_spi->rx = t->rx_buf; | ||
678 | |||
679 | /* convert len to words based on bits_per_word */ | ||
680 | conv = davinci_spi->slave[spi->chip_select].bytes_per_word; | ||
681 | davinci_spi->count = t->len / conv; | ||
682 | |||
683 | INIT_COMPLETION(davinci_spi->done); | ||
684 | |||
685 | ret = davinci_spi_bufs_prep(spi, davinci_spi); | ||
686 | if (ret) | ||
687 | return ret; | ||
688 | |||
689 | /* Enable SPI */ | ||
690 | set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); | ||
691 | |||
692 | iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) | | ||
693 | (pdata->t2cdelay << SPI_T2CDELAY_SHIFT), | ||
694 | davinci_spi->base + SPIDELAY); | ||
695 | |||
696 | count = davinci_spi->count; | ||
697 | data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT; | ||
698 | tmp = ~(0x1 << spi->chip_select); | ||
699 | |||
700 | clear_io_bits(davinci_spi->base + SPIDEF, ~tmp); | ||
701 | |||
702 | data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT; | ||
703 | |||
704 | while ((ioread32(davinci_spi->base + SPIBUF) | ||
705 | & SPIBUF_RXEMPTY_MASK) == 0) | ||
706 | cpu_relax(); | ||
707 | |||
708 | /* Determine the command to execute READ or WRITE */ | ||
709 | if (t->tx_buf) { | ||
710 | clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); | ||
711 | |||
712 | while (1) { | ||
713 | tx_data = davinci_spi->get_tx(davinci_spi); | ||
714 | |||
715 | data1_reg_val &= ~(0xFFFF); | ||
716 | data1_reg_val |= (0xFFFF & tx_data); | ||
717 | |||
718 | buf_val = ioread32(davinci_spi->base + SPIBUF); | ||
719 | if ((buf_val & SPIBUF_TXFULL_MASK) == 0) { | ||
720 | iowrite32(data1_reg_val, | ||
721 | davinci_spi->base + SPIDAT1); | ||
722 | |||
723 | count--; | ||
724 | } | ||
725 | while (ioread32(davinci_spi->base + SPIBUF) | ||
726 | & SPIBUF_RXEMPTY_MASK) | ||
727 | cpu_relax(); | ||
728 | |||
729 | /* getting the returned byte */ | ||
730 | if (t->rx_buf) { | ||
731 | buf_val = ioread32(davinci_spi->base + SPIBUF); | ||
732 | davinci_spi->get_rx(buf_val, davinci_spi); | ||
733 | } | ||
734 | if (count <= 0) | ||
735 | break; | ||
736 | } | ||
737 | } else { | ||
738 | if (pdata->poll_mode) { | ||
739 | while (1) { | ||
740 | /* keeps the serial clock going */ | ||
741 | if ((ioread32(davinci_spi->base + SPIBUF) | ||
742 | & SPIBUF_TXFULL_MASK) == 0) | ||
743 | iowrite32(data1_reg_val, | ||
744 | davinci_spi->base + SPIDAT1); | ||
745 | |||
746 | while (ioread32(davinci_spi->base + SPIBUF) & | ||
747 | SPIBUF_RXEMPTY_MASK) | ||
748 | cpu_relax(); | ||
749 | |||
750 | flg_val = ioread32(davinci_spi->base + SPIFLG); | ||
751 | buf_val = ioread32(davinci_spi->base + SPIBUF); | ||
752 | |||
753 | davinci_spi->get_rx(buf_val, davinci_spi); | ||
754 | |||
755 | count--; | ||
756 | if (count <= 0) | ||
757 | break; | ||
758 | } | ||
759 | } else { /* Receive in Interrupt mode */ | ||
760 | int i; | ||
761 | |||
762 | for (i = 0; i < davinci_spi->count; i++) { | ||
763 | set_io_bits(davinci_spi->base + SPIINT, | ||
764 | SPIINT_BITERR_INTR | ||
765 | | SPIINT_OVRRUN_INTR | ||
766 | | SPIINT_RX_INTR); | ||
767 | |||
768 | iowrite32(data1_reg_val, | ||
769 | davinci_spi->base + SPIDAT1); | ||
770 | |||
771 | while (ioread32(davinci_spi->base + SPIINT) & | ||
772 | SPIINT_RX_INTR) | ||
773 | cpu_relax(); | ||
774 | } | ||
775 | iowrite32((data1_reg_val & 0x0ffcffff), | ||
776 | davinci_spi->base + SPIDAT1); | ||
777 | } | ||
778 | } | ||
779 | |||
780 | /* | ||
781 | * Check for bit error, desync error,parity error,timeout error and | ||
782 | * receive overflow errors | ||
783 | */ | ||
784 | int_status = ioread32(davinci_spi->base + SPIFLG); | ||
785 | |||
786 | ret = davinci_spi_check_error(davinci_spi, int_status); | ||
787 | if (ret != 0) | ||
788 | return ret; | ||
789 | |||
790 | /* SPI Framework maintains the count only in bytes so convert back */ | ||
791 | davinci_spi->count *= conv; | ||
792 | |||
793 | return t->len; | ||
794 | } | ||
795 | |||
796 | #define DAVINCI_DMA_DATA_TYPE_S8 0x01 | ||
797 | #define DAVINCI_DMA_DATA_TYPE_S16 0x02 | ||
798 | #define DAVINCI_DMA_DATA_TYPE_S32 0x04 | ||
799 | |||
800 | static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) | ||
801 | { | ||
802 | struct davinci_spi *davinci_spi; | ||
803 | int int_status = 0; | ||
804 | int count, temp_count; | ||
805 | u8 conv = 1; | ||
806 | u8 tmp; | ||
807 | u32 data1_reg_val; | ||
808 | struct davinci_spi_dma *davinci_spi_dma; | ||
809 | int word_len, data_type, ret; | ||
810 | unsigned long tx_reg, rx_reg; | ||
811 | struct davinci_spi_platform_data *pdata; | ||
812 | struct device *sdev; | ||
813 | |||
814 | davinci_spi = spi_master_get_devdata(spi->master); | ||
815 | pdata = davinci_spi->pdata; | ||
816 | sdev = davinci_spi->bitbang.master->dev.parent; | ||
817 | |||
818 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | ||
819 | |||
820 | tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1; | ||
821 | rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF; | ||
822 | |||
823 | davinci_spi->tx = t->tx_buf; | ||
824 | davinci_spi->rx = t->rx_buf; | ||
825 | |||
826 | /* convert len to words based on bits_per_word */ | ||
827 | conv = davinci_spi->slave[spi->chip_select].bytes_per_word; | ||
828 | davinci_spi->count = t->len / conv; | ||
829 | |||
830 | INIT_COMPLETION(davinci_spi->done); | ||
831 | |||
832 | init_completion(&davinci_spi_dma->dma_rx_completion); | ||
833 | init_completion(&davinci_spi_dma->dma_tx_completion); | ||
834 | |||
835 | word_len = conv * 8; | ||
836 | |||
837 | if (word_len <= 8) | ||
838 | data_type = DAVINCI_DMA_DATA_TYPE_S8; | ||
839 | else if (word_len <= 16) | ||
840 | data_type = DAVINCI_DMA_DATA_TYPE_S16; | ||
841 | else if (word_len <= 32) | ||
842 | data_type = DAVINCI_DMA_DATA_TYPE_S32; | ||
843 | else | ||
844 | return -EINVAL; | ||
845 | |||
846 | ret = davinci_spi_bufs_prep(spi, davinci_spi); | ||
847 | if (ret) | ||
848 | return ret; | ||
849 | |||
850 | /* Put delay val if required */ | ||
851 | iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) | | ||
852 | (pdata->t2cdelay << SPI_T2CDELAY_SHIFT), | ||
853 | davinci_spi->base + SPIDELAY); | ||
854 | |||
855 | count = davinci_spi->count; /* the number of elements */ | ||
856 | data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT; | ||
857 | |||
858 | /* CS default = 0xFF */ | ||
859 | tmp = ~(0x1 << spi->chip_select); | ||
860 | |||
861 | clear_io_bits(davinci_spi->base + SPIDEF, ~tmp); | ||
862 | |||
863 | data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT; | ||
864 | |||
865 | /* disable all interrupts for dma transfers */ | ||
866 | clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); | ||
867 | /* Disable SPI to write configuration bits in SPIDAT */ | ||
868 | clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); | ||
869 | iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); | ||
870 | /* Enable SPI */ | ||
871 | set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); | ||
872 | |||
873 | while ((ioread32(davinci_spi->base + SPIBUF) | ||
874 | & SPIBUF_RXEMPTY_MASK) == 0) | ||
875 | cpu_relax(); | ||
876 | |||
877 | |||
878 | if (t->tx_buf) { | ||
879 | t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count, | ||
880 | DMA_TO_DEVICE); | ||
881 | if (dma_mapping_error(&spi->dev, t->tx_dma)) { | ||
882 | dev_dbg(sdev, "Unable to DMA map a %d bytes" | ||
883 | " TX buffer\n", count); | ||
884 | return -ENOMEM; | ||
885 | } | ||
886 | temp_count = count; | ||
887 | } else { | ||
888 | /* We need TX clocking for RX transaction */ | ||
889 | t->tx_dma = dma_map_single(&spi->dev, | ||
890 | (void *)davinci_spi->tmp_buf, count + 1, | ||
891 | DMA_TO_DEVICE); | ||
892 | if (dma_mapping_error(&spi->dev, t->tx_dma)) { | ||
893 | dev_dbg(sdev, "Unable to DMA map a %d bytes" | ||
894 | " TX tmp buffer\n", count); | ||
895 | return -ENOMEM; | ||
896 | } | ||
897 | temp_count = count + 1; | ||
898 | } | ||
899 | |||
900 | edma_set_transfer_params(davinci_spi_dma->dma_tx_channel, | ||
901 | data_type, temp_count, 1, 0, ASYNC); | ||
902 | edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT); | ||
903 | edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT); | ||
904 | edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0); | ||
905 | edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0); | ||
906 | |||
907 | if (t->rx_buf) { | ||
908 | /* initiate transaction */ | ||
909 | iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); | ||
910 | |||
911 | t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count, | ||
912 | DMA_FROM_DEVICE); | ||
913 | if (dma_mapping_error(&spi->dev, t->rx_dma)) { | ||
914 | dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", | ||
915 | count); | ||
916 | if (t->tx_buf != NULL) | ||
917 | dma_unmap_single(NULL, t->tx_dma, | ||
918 | count, DMA_TO_DEVICE); | ||
919 | return -ENOMEM; | ||
920 | } | ||
921 | edma_set_transfer_params(davinci_spi_dma->dma_rx_channel, | ||
922 | data_type, count, 1, 0, ASYNC); | ||
923 | edma_set_src(davinci_spi_dma->dma_rx_channel, | ||
924 | rx_reg, INCR, W8BIT); | ||
925 | edma_set_dest(davinci_spi_dma->dma_rx_channel, | ||
926 | t->rx_dma, INCR, W8BIT); | ||
927 | edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0); | ||
928 | edma_set_dest_index(davinci_spi_dma->dma_rx_channel, | ||
929 | data_type, 0); | ||
930 | } | ||
931 | |||
932 | if ((t->tx_buf) || (t->rx_buf)) | ||
933 | edma_start(davinci_spi_dma->dma_tx_channel); | ||
934 | |||
935 | if (t->rx_buf) | ||
936 | edma_start(davinci_spi_dma->dma_rx_channel); | ||
937 | |||
938 | if ((t->rx_buf) || (t->tx_buf)) | ||
939 | davinci_spi_set_dma_req(spi, 1); | ||
940 | |||
941 | if (t->tx_buf) | ||
942 | wait_for_completion_interruptible( | ||
943 | &davinci_spi_dma->dma_tx_completion); | ||
944 | |||
945 | if (t->rx_buf) | ||
946 | wait_for_completion_interruptible( | ||
947 | &davinci_spi_dma->dma_rx_completion); | ||
948 | |||
949 | dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE); | ||
950 | |||
951 | if (t->rx_buf) | ||
952 | dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE); | ||
953 | |||
954 | /* | ||
955 | * Check for bit error, desync error,parity error,timeout error and | ||
956 | * receive overflow errors | ||
957 | */ | ||
958 | int_status = ioread32(davinci_spi->base + SPIFLG); | ||
959 | |||
960 | ret = davinci_spi_check_error(davinci_spi, int_status); | ||
961 | if (ret != 0) | ||
962 | return ret; | ||
963 | |||
964 | /* SPI Framework maintains the count only in bytes so convert back */ | ||
965 | davinci_spi->count *= conv; | ||
966 | |||
967 | return t->len; | ||
968 | } | ||
969 | |||
970 | /** | ||
971 | * davinci_spi_irq - IRQ handler for DaVinci SPI | ||
972 | * @irq: IRQ number for this SPI Master | ||
973 | * @context_data: structure for SPI Master controller davinci_spi | ||
974 | */ | ||
975 | static irqreturn_t davinci_spi_irq(s32 irq, void *context_data) | ||
976 | { | ||
977 | struct davinci_spi *davinci_spi = context_data; | ||
978 | u32 int_status, rx_data = 0; | ||
979 | irqreturn_t ret = IRQ_NONE; | ||
980 | |||
981 | int_status = ioread32(davinci_spi->base + SPIFLG); | ||
982 | |||
983 | while ((int_status & SPIFLG_RX_INTR_MASK)) { | ||
984 | if (likely(int_status & SPIFLG_RX_INTR_MASK)) { | ||
985 | ret = IRQ_HANDLED; | ||
986 | |||
987 | rx_data = ioread32(davinci_spi->base + SPIBUF); | ||
988 | davinci_spi->get_rx(rx_data, davinci_spi); | ||
989 | |||
990 | /* Disable Receive Interrupt */ | ||
991 | iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR), | ||
992 | davinci_spi->base + SPIINT); | ||
993 | } else | ||
994 | (void)davinci_spi_check_error(davinci_spi, int_status); | ||
995 | |||
996 | int_status = ioread32(davinci_spi->base + SPIFLG); | ||
997 | } | ||
998 | |||
999 | return ret; | ||
1000 | } | ||
1001 | |||
1002 | /** | ||
1003 | * davinci_spi_probe - probe function for SPI Master Controller | ||
1004 | * @pdev: platform_device structure which contains plateform specific data | ||
1005 | */ | ||
1006 | static int davinci_spi_probe(struct platform_device *pdev) | ||
1007 | { | ||
1008 | struct spi_master *master; | ||
1009 | struct davinci_spi *davinci_spi; | ||
1010 | struct davinci_spi_platform_data *pdata; | ||
1011 | struct resource *r, *mem; | ||
1012 | resource_size_t dma_rx_chan = SPI_NO_RESOURCE; | ||
1013 | resource_size_t dma_tx_chan = SPI_NO_RESOURCE; | ||
1014 | resource_size_t dma_eventq = SPI_NO_RESOURCE; | ||
1015 | int i = 0, ret = 0; | ||
1016 | |||
1017 | pdata = pdev->dev.platform_data; | ||
1018 | if (pdata == NULL) { | ||
1019 | ret = -ENODEV; | ||
1020 | goto err; | ||
1021 | } | ||
1022 | |||
1023 | master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi)); | ||
1024 | if (master == NULL) { | ||
1025 | ret = -ENOMEM; | ||
1026 | goto err; | ||
1027 | } | ||
1028 | |||
1029 | dev_set_drvdata(&pdev->dev, master); | ||
1030 | |||
1031 | davinci_spi = spi_master_get_devdata(master); | ||
1032 | if (davinci_spi == NULL) { | ||
1033 | ret = -ENOENT; | ||
1034 | goto free_master; | ||
1035 | } | ||
1036 | |||
1037 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1038 | if (r == NULL) { | ||
1039 | ret = -ENOENT; | ||
1040 | goto free_master; | ||
1041 | } | ||
1042 | |||
1043 | davinci_spi->pbase = r->start; | ||
1044 | davinci_spi->region_size = resource_size(r); | ||
1045 | davinci_spi->pdata = pdata; | ||
1046 | |||
1047 | mem = request_mem_region(r->start, davinci_spi->region_size, | ||
1048 | pdev->name); | ||
1049 | if (mem == NULL) { | ||
1050 | ret = -EBUSY; | ||
1051 | goto free_master; | ||
1052 | } | ||
1053 | |||
1054 | davinci_spi->base = (struct davinci_spi_reg __iomem *) | ||
1055 | ioremap(r->start, davinci_spi->region_size); | ||
1056 | if (davinci_spi->base == NULL) { | ||
1057 | ret = -ENOMEM; | ||
1058 | goto release_region; | ||
1059 | } | ||
1060 | |||
1061 | davinci_spi->irq = platform_get_irq(pdev, 0); | ||
1062 | if (davinci_spi->irq <= 0) { | ||
1063 | ret = -EINVAL; | ||
1064 | goto unmap_io; | ||
1065 | } | ||
1066 | |||
1067 | ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED, | ||
1068 | dev_name(&pdev->dev), davinci_spi); | ||
1069 | if (ret) | ||
1070 | goto unmap_io; | ||
1071 | |||
1072 | /* Allocate tmp_buf for tx_buf */ | ||
1073 | davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL); | ||
1074 | if (davinci_spi->tmp_buf == NULL) { | ||
1075 | ret = -ENOMEM; | ||
1076 | goto irq_free; | ||
1077 | } | ||
1078 | |||
1079 | davinci_spi->bitbang.master = spi_master_get(master); | ||
1080 | if (davinci_spi->bitbang.master == NULL) { | ||
1081 | ret = -ENODEV; | ||
1082 | goto free_tmp_buf; | ||
1083 | } | ||
1084 | |||
1085 | davinci_spi->clk = clk_get(&pdev->dev, NULL); | ||
1086 | if (IS_ERR(davinci_spi->clk)) { | ||
1087 | ret = -ENODEV; | ||
1088 | goto put_master; | ||
1089 | } | ||
1090 | clk_enable(davinci_spi->clk); | ||
1091 | |||
1092 | |||
1093 | master->bus_num = pdev->id; | ||
1094 | master->num_chipselect = pdata->num_chipselect; | ||
1095 | master->setup = davinci_spi_setup; | ||
1096 | master->cleanup = davinci_spi_cleanup; | ||
1097 | |||
1098 | davinci_spi->bitbang.chipselect = davinci_spi_chipselect; | ||
1099 | davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer; | ||
1100 | |||
1101 | davinci_spi->version = pdata->version; | ||
1102 | use_dma = pdata->use_dma; | ||
1103 | |||
1104 | davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP; | ||
1105 | if (davinci_spi->version == SPI_VERSION_2) | ||
1106 | davinci_spi->bitbang.flags |= SPI_READY; | ||
1107 | |||
1108 | if (use_dma) { | ||
1109 | r = platform_get_resource(pdev, IORESOURCE_DMA, 0); | ||
1110 | if (r) | ||
1111 | dma_rx_chan = r->start; | ||
1112 | r = platform_get_resource(pdev, IORESOURCE_DMA, 1); | ||
1113 | if (r) | ||
1114 | dma_tx_chan = r->start; | ||
1115 | r = platform_get_resource(pdev, IORESOURCE_DMA, 2); | ||
1116 | if (r) | ||
1117 | dma_eventq = r->start; | ||
1118 | } | ||
1119 | |||
1120 | if (!use_dma || | ||
1121 | dma_rx_chan == SPI_NO_RESOURCE || | ||
1122 | dma_tx_chan == SPI_NO_RESOURCE || | ||
1123 | dma_eventq == SPI_NO_RESOURCE) { | ||
1124 | davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio; | ||
1125 | use_dma = 0; | ||
1126 | } else { | ||
1127 | davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma; | ||
1128 | davinci_spi->dma_channels = kzalloc(master->num_chipselect | ||
1129 | * sizeof(struct davinci_spi_dma), GFP_KERNEL); | ||
1130 | if (davinci_spi->dma_channels == NULL) { | ||
1131 | ret = -ENOMEM; | ||
1132 | goto free_clk; | ||
1133 | } | ||
1134 | |||
1135 | for (i = 0; i < master->num_chipselect; i++) { | ||
1136 | davinci_spi->dma_channels[i].dma_rx_channel = -1; | ||
1137 | davinci_spi->dma_channels[i].dma_rx_sync_dev = | ||
1138 | dma_rx_chan; | ||
1139 | davinci_spi->dma_channels[i].dma_tx_channel = -1; | ||
1140 | davinci_spi->dma_channels[i].dma_tx_sync_dev = | ||
1141 | dma_tx_chan; | ||
1142 | davinci_spi->dma_channels[i].eventq = dma_eventq; | ||
1143 | } | ||
1144 | dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n" | ||
1145 | "Using RX channel = %d , TX channel = %d and " | ||
1146 | "event queue = %d", dma_rx_chan, dma_tx_chan, | ||
1147 | dma_eventq); | ||
1148 | } | ||
1149 | |||
1150 | davinci_spi->get_rx = davinci_spi_rx_buf_u8; | ||
1151 | davinci_spi->get_tx = davinci_spi_tx_buf_u8; | ||
1152 | |||
1153 | init_completion(&davinci_spi->done); | ||
1154 | |||
1155 | /* Reset In/OUT SPI module */ | ||
1156 | iowrite32(0, davinci_spi->base + SPIGCR0); | ||
1157 | udelay(100); | ||
1158 | iowrite32(1, davinci_spi->base + SPIGCR0); | ||
1159 | |||
1160 | /* Clock internal */ | ||
1161 | if (davinci_spi->pdata->clk_internal) | ||
1162 | set_io_bits(davinci_spi->base + SPIGCR1, | ||
1163 | SPIGCR1_CLKMOD_MASK); | ||
1164 | else | ||
1165 | clear_io_bits(davinci_spi->base + SPIGCR1, | ||
1166 | SPIGCR1_CLKMOD_MASK); | ||
1167 | |||
1168 | /* master mode default */ | ||
1169 | set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK); | ||
1170 | |||
1171 | if (davinci_spi->pdata->intr_level) | ||
1172 | iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL); | ||
1173 | else | ||
1174 | iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL); | ||
1175 | |||
1176 | ret = spi_bitbang_start(&davinci_spi->bitbang); | ||
1177 | if (ret) | ||
1178 | goto free_clk; | ||
1179 | |||
1180 | dev_info(&pdev->dev, "Controller at 0x%p \n", davinci_spi->base); | ||
1181 | |||
1182 | if (!pdata->poll_mode) | ||
1183 | dev_info(&pdev->dev, "Operating in interrupt mode" | ||
1184 | " using IRQ %d\n", davinci_spi->irq); | ||
1185 | |||
1186 | return ret; | ||
1187 | |||
1188 | free_clk: | ||
1189 | clk_disable(davinci_spi->clk); | ||
1190 | clk_put(davinci_spi->clk); | ||
1191 | put_master: | ||
1192 | spi_master_put(master); | ||
1193 | free_tmp_buf: | ||
1194 | kfree(davinci_spi->tmp_buf); | ||
1195 | irq_free: | ||
1196 | free_irq(davinci_spi->irq, davinci_spi); | ||
1197 | unmap_io: | ||
1198 | iounmap(davinci_spi->base); | ||
1199 | release_region: | ||
1200 | release_mem_region(davinci_spi->pbase, davinci_spi->region_size); | ||
1201 | free_master: | ||
1202 | kfree(master); | ||
1203 | err: | ||
1204 | return ret; | ||
1205 | } | ||
1206 | |||
1207 | /** | ||
1208 | * davinci_spi_remove - remove function for SPI Master Controller | ||
1209 | * @pdev: platform_device structure which contains plateform specific data | ||
1210 | * | ||
1211 | * This function will do the reverse action of davinci_spi_probe function | ||
1212 | * It will free the IRQ and SPI controller's memory region. | ||
1213 | * It will also call spi_bitbang_stop to destroy the work queue which was | ||
1214 | * created by spi_bitbang_start. | ||
1215 | */ | ||
1216 | static int __exit davinci_spi_remove(struct platform_device *pdev) | ||
1217 | { | ||
1218 | struct davinci_spi *davinci_spi; | ||
1219 | struct spi_master *master; | ||
1220 | |||
1221 | master = dev_get_drvdata(&pdev->dev); | ||
1222 | davinci_spi = spi_master_get_devdata(master); | ||
1223 | |||
1224 | spi_bitbang_stop(&davinci_spi->bitbang); | ||
1225 | |||
1226 | clk_disable(davinci_spi->clk); | ||
1227 | clk_put(davinci_spi->clk); | ||
1228 | spi_master_put(master); | ||
1229 | kfree(davinci_spi->tmp_buf); | ||
1230 | free_irq(davinci_spi->irq, davinci_spi); | ||
1231 | iounmap(davinci_spi->base); | ||
1232 | release_mem_region(davinci_spi->pbase, davinci_spi->region_size); | ||
1233 | |||
1234 | return 0; | ||
1235 | } | ||
1236 | |||
1237 | static struct platform_driver davinci_spi_driver = { | ||
1238 | .driver.name = "spi_davinci", | ||
1239 | .remove = __exit_p(davinci_spi_remove), | ||
1240 | }; | ||
1241 | |||
1242 | static int __init davinci_spi_init(void) | ||
1243 | { | ||
1244 | return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe); | ||
1245 | } | ||
1246 | module_init(davinci_spi_init); | ||
1247 | |||
1248 | static void __exit davinci_spi_exit(void) | ||
1249 | { | ||
1250 | platform_driver_unregister(&davinci_spi_driver); | ||
1251 | } | ||
1252 | module_exit(davinci_spi_exit); | ||
1253 | |||
1254 | MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver"); | ||
1255 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c index 31620fae77be..8ed38f1d6c18 100644 --- a/drivers/spi/dw_spi.c +++ b/drivers/spi/dw_spi.c | |||
@@ -152,6 +152,7 @@ static void mrst_spi_debugfs_remove(struct dw_spi *dws) | |||
152 | #else | 152 | #else |
153 | static inline int mrst_spi_debugfs_init(struct dw_spi *dws) | 153 | static inline int mrst_spi_debugfs_init(struct dw_spi *dws) |
154 | { | 154 | { |
155 | return 0; | ||
155 | } | 156 | } |
156 | 157 | ||
157 | static inline void mrst_spi_debugfs_remove(struct dw_spi *dws) | 158 | static inline void mrst_spi_debugfs_remove(struct dw_spi *dws) |
@@ -161,14 +162,14 @@ static inline void mrst_spi_debugfs_remove(struct dw_spi *dws) | |||
161 | 162 | ||
162 | static void wait_till_not_busy(struct dw_spi *dws) | 163 | static void wait_till_not_busy(struct dw_spi *dws) |
163 | { | 164 | { |
164 | unsigned long end = jiffies + usecs_to_jiffies(1000); | 165 | unsigned long end = jiffies + 1 + usecs_to_jiffies(1000); |
165 | 166 | ||
166 | while (time_before(jiffies, end)) { | 167 | while (time_before(jiffies, end)) { |
167 | if (!(dw_readw(dws, sr) & SR_BUSY)) | 168 | if (!(dw_readw(dws, sr) & SR_BUSY)) |
168 | return; | 169 | return; |
169 | } | 170 | } |
170 | dev_err(&dws->master->dev, | 171 | dev_err(&dws->master->dev, |
171 | "DW SPI: Stutus keeps busy for 1000us after a read/write!\n"); | 172 | "DW SPI: Status keeps busy for 1000us after a read/write!\n"); |
172 | } | 173 | } |
173 | 174 | ||
174 | static void flush(struct dw_spi *dws) | 175 | static void flush(struct dw_spi *dws) |
@@ -358,6 +359,8 @@ static void transfer_complete(struct dw_spi *dws) | |||
358 | static irqreturn_t interrupt_transfer(struct dw_spi *dws) | 359 | static irqreturn_t interrupt_transfer(struct dw_spi *dws) |
359 | { | 360 | { |
360 | u16 irq_status, irq_mask = 0x3f; | 361 | u16 irq_status, irq_mask = 0x3f; |
362 | u32 int_level = dws->fifo_len / 2; | ||
363 | u32 left; | ||
361 | 364 | ||
362 | irq_status = dw_readw(dws, isr) & irq_mask; | 365 | irq_status = dw_readw(dws, isr) & irq_mask; |
363 | /* Error handling */ | 366 | /* Error handling */ |
@@ -369,22 +372,23 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws) | |||
369 | return IRQ_HANDLED; | 372 | return IRQ_HANDLED; |
370 | } | 373 | } |
371 | 374 | ||
372 | /* INT comes from tx */ | 375 | if (irq_status & SPI_INT_TXEI) { |
373 | if (dws->tx && (irq_status & SPI_INT_TXEI)) { | 376 | spi_mask_intr(dws, SPI_INT_TXEI); |
374 | while (dws->tx < dws->tx_end) | 377 | |
378 | left = (dws->tx_end - dws->tx) / dws->n_bytes; | ||
379 | left = (left > int_level) ? int_level : left; | ||
380 | |||
381 | while (left--) | ||
375 | dws->write(dws); | 382 | dws->write(dws); |
383 | dws->read(dws); | ||
376 | 384 | ||
377 | if (dws->tx == dws->tx_end) { | 385 | /* Re-enable the IRQ if there is still data left to tx */ |
378 | spi_mask_intr(dws, SPI_INT_TXEI); | 386 | if (dws->tx_end > dws->tx) |
387 | spi_umask_intr(dws, SPI_INT_TXEI); | ||
388 | else | ||
379 | transfer_complete(dws); | 389 | transfer_complete(dws); |
380 | } | ||
381 | } | 390 | } |
382 | 391 | ||
383 | /* INT comes from rx */ | ||
384 | if (dws->rx && (irq_status & SPI_INT_RXFI)) { | ||
385 | if (dws->read(dws)) | ||
386 | transfer_complete(dws); | ||
387 | } | ||
388 | return IRQ_HANDLED; | 392 | return IRQ_HANDLED; |
389 | } | 393 | } |
390 | 394 | ||
@@ -404,12 +408,9 @@ static irqreturn_t dw_spi_irq(int irq, void *dev_id) | |||
404 | /* Must be called inside pump_transfers() */ | 408 | /* Must be called inside pump_transfers() */ |
405 | static void poll_transfer(struct dw_spi *dws) | 409 | static void poll_transfer(struct dw_spi *dws) |
406 | { | 410 | { |
407 | if (dws->tx) { | 411 | while (dws->write(dws)) |
408 | while (dws->write(dws)) | 412 | dws->read(dws); |
409 | dws->read(dws); | ||
410 | } | ||
411 | 413 | ||
412 | dws->read(dws); | ||
413 | transfer_complete(dws); | 414 | transfer_complete(dws); |
414 | } | 415 | } |
415 | 416 | ||
@@ -428,6 +429,7 @@ static void pump_transfers(unsigned long data) | |||
428 | u8 bits = 0; | 429 | u8 bits = 0; |
429 | u8 imask = 0; | 430 | u8 imask = 0; |
430 | u8 cs_change = 0; | 431 | u8 cs_change = 0; |
432 | u16 txint_level = 0; | ||
431 | u16 clk_div = 0; | 433 | u16 clk_div = 0; |
432 | u32 speed = 0; | 434 | u32 speed = 0; |
433 | u32 cr0 = 0; | 435 | u32 cr0 = 0; |
@@ -438,6 +440,9 @@ static void pump_transfers(unsigned long data) | |||
438 | chip = dws->cur_chip; | 440 | chip = dws->cur_chip; |
439 | spi = message->spi; | 441 | spi = message->spi; |
440 | 442 | ||
443 | if (unlikely(!chip->clk_div)) | ||
444 | chip->clk_div = dws->max_freq / chip->speed_hz; | ||
445 | |||
441 | if (message->state == ERROR_STATE) { | 446 | if (message->state == ERROR_STATE) { |
442 | message->status = -EIO; | 447 | message->status = -EIO; |
443 | goto early_exit; | 448 | goto early_exit; |
@@ -492,7 +497,7 @@ static void pump_transfers(unsigned long data) | |||
492 | 497 | ||
493 | /* clk_div doesn't support odd number */ | 498 | /* clk_div doesn't support odd number */ |
494 | clk_div = dws->max_freq / speed; | 499 | clk_div = dws->max_freq / speed; |
495 | clk_div = (clk_div >> 1) << 1; | 500 | clk_div = (clk_div + 1) & 0xfffe; |
496 | 501 | ||
497 | chip->speed_hz = speed; | 502 | chip->speed_hz = speed; |
498 | chip->clk_div = clk_div; | 503 | chip->clk_div = clk_div; |
@@ -532,14 +537,35 @@ static void pump_transfers(unsigned long data) | |||
532 | } | 537 | } |
533 | message->state = RUNNING_STATE; | 538 | message->state = RUNNING_STATE; |
534 | 539 | ||
540 | /* | ||
541 | * Adjust transfer mode if necessary. Requires platform dependent | ||
542 | * chipselect mechanism. | ||
543 | */ | ||
544 | if (dws->cs_control) { | ||
545 | if (dws->rx && dws->tx) | ||
546 | chip->tmode = 0x00; | ||
547 | else if (dws->rx) | ||
548 | chip->tmode = 0x02; | ||
549 | else | ||
550 | chip->tmode = 0x01; | ||
551 | |||
552 | cr0 &= ~(0x3 << SPI_MODE_OFFSET); | ||
553 | cr0 |= (chip->tmode << SPI_TMOD_OFFSET); | ||
554 | } | ||
555 | |||
535 | /* Check if current transfer is a DMA transaction */ | 556 | /* Check if current transfer is a DMA transaction */ |
536 | dws->dma_mapped = map_dma_buffers(dws); | 557 | dws->dma_mapped = map_dma_buffers(dws); |
537 | 558 | ||
559 | /* | ||
560 | * Interrupt mode | ||
561 | * we only need set the TXEI IRQ, as TX/RX always happen syncronizely | ||
562 | */ | ||
538 | if (!dws->dma_mapped && !chip->poll_mode) { | 563 | if (!dws->dma_mapped && !chip->poll_mode) { |
539 | if (dws->rx) | 564 | int templen = dws->len / dws->n_bytes; |
540 | imask |= SPI_INT_RXFI; | 565 | txint_level = dws->fifo_len / 2; |
541 | if (dws->tx) | 566 | txint_level = (templen > txint_level) ? txint_level : templen; |
542 | imask |= SPI_INT_TXEI; | 567 | |
568 | imask |= SPI_INT_TXEI; | ||
543 | dws->transfer_handler = interrupt_transfer; | 569 | dws->transfer_handler = interrupt_transfer; |
544 | } | 570 | } |
545 | 571 | ||
@@ -549,21 +575,23 @@ static void pump_transfers(unsigned long data) | |||
549 | * 2. clk_div is changed | 575 | * 2. clk_div is changed |
550 | * 3. control value changes | 576 | * 3. control value changes |
551 | */ | 577 | */ |
552 | if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div) { | 578 | if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div || imask) { |
553 | spi_enable_chip(dws, 0); | 579 | spi_enable_chip(dws, 0); |
554 | 580 | ||
555 | if (dw_readw(dws, ctrl0) != cr0) | 581 | if (dw_readw(dws, ctrl0) != cr0) |
556 | dw_writew(dws, ctrl0, cr0); | 582 | dw_writew(dws, ctrl0, cr0); |
557 | 583 | ||
584 | spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); | ||
585 | spi_chip_sel(dws, spi->chip_select); | ||
586 | |||
558 | /* Set the interrupt mask, for poll mode just diable all int */ | 587 | /* Set the interrupt mask, for poll mode just diable all int */ |
559 | spi_mask_intr(dws, 0xff); | 588 | spi_mask_intr(dws, 0xff); |
560 | if (!chip->poll_mode) | 589 | if (imask) |
561 | spi_umask_intr(dws, imask); | 590 | spi_umask_intr(dws, imask); |
591 | if (txint_level) | ||
592 | dw_writew(dws, txfltr, txint_level); | ||
562 | 593 | ||
563 | spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); | ||
564 | spi_chip_sel(dws, spi->chip_select); | ||
565 | spi_enable_chip(dws, 1); | 594 | spi_enable_chip(dws, 1); |
566 | |||
567 | if (cs_change) | 595 | if (cs_change) |
568 | dws->prev_chip = chip; | 596 | dws->prev_chip = chip; |
569 | } | 597 | } |
@@ -712,11 +740,11 @@ static int dw_spi_setup(struct spi_device *spi) | |||
712 | } | 740 | } |
713 | chip->bits_per_word = spi->bits_per_word; | 741 | chip->bits_per_word = spi->bits_per_word; |
714 | 742 | ||
743 | if (!spi->max_speed_hz) { | ||
744 | dev_err(&spi->dev, "No max speed HZ parameter\n"); | ||
745 | return -EINVAL; | ||
746 | } | ||
715 | chip->speed_hz = spi->max_speed_hz; | 747 | chip->speed_hz = spi->max_speed_hz; |
716 | if (chip->speed_hz) | ||
717 | chip->clk_div = 25000000 / chip->speed_hz; | ||
718 | else | ||
719 | chip->clk_div = 8; /* default value */ | ||
720 | 748 | ||
721 | chip->tmode = 0; /* Tx & Rx */ | 749 | chip->tmode = 0; /* Tx & Rx */ |
722 | /* Default SPI mode is SCPOL = 0, SCPH = 0 */ | 750 | /* Default SPI mode is SCPOL = 0, SCPH = 0 */ |
@@ -735,7 +763,7 @@ static void dw_spi_cleanup(struct spi_device *spi) | |||
735 | kfree(chip); | 763 | kfree(chip); |
736 | } | 764 | } |
737 | 765 | ||
738 | static int __init init_queue(struct dw_spi *dws) | 766 | static int __devinit init_queue(struct dw_spi *dws) |
739 | { | 767 | { |
740 | INIT_LIST_HEAD(&dws->queue); | 768 | INIT_LIST_HEAD(&dws->queue); |
741 | spin_lock_init(&dws->lock); | 769 | spin_lock_init(&dws->lock); |
@@ -817,6 +845,22 @@ static void spi_hw_init(struct dw_spi *dws) | |||
817 | spi_mask_intr(dws, 0xff); | 845 | spi_mask_intr(dws, 0xff); |
818 | spi_enable_chip(dws, 1); | 846 | spi_enable_chip(dws, 1); |
819 | flush(dws); | 847 | flush(dws); |
848 | |||
849 | /* | ||
850 | * Try to detect the FIFO depth if not set by interface driver, | ||
851 | * the depth could be from 2 to 256 from HW spec | ||
852 | */ | ||
853 | if (!dws->fifo_len) { | ||
854 | u32 fifo; | ||
855 | for (fifo = 2; fifo <= 257; fifo++) { | ||
856 | dw_writew(dws, txfltr, fifo); | ||
857 | if (fifo != dw_readw(dws, txfltr)) | ||
858 | break; | ||
859 | } | ||
860 | |||
861 | dws->fifo_len = (fifo == 257) ? 0 : fifo; | ||
862 | dw_writew(dws, txfltr, 0); | ||
863 | } | ||
820 | } | 864 | } |
821 | 865 | ||
822 | int __devinit dw_spi_add_host(struct dw_spi *dws) | 866 | int __devinit dw_spi_add_host(struct dw_spi *dws) |
@@ -913,6 +957,7 @@ void __devexit dw_spi_remove_host(struct dw_spi *dws) | |||
913 | /* Disconnect from the SPI framework */ | 957 | /* Disconnect from the SPI framework */ |
914 | spi_unregister_master(dws->master); | 958 | spi_unregister_master(dws->master); |
915 | } | 959 | } |
960 | EXPORT_SYMBOL(dw_spi_remove_host); | ||
916 | 961 | ||
917 | int dw_spi_suspend_host(struct dw_spi *dws) | 962 | int dw_spi_suspend_host(struct dw_spi *dws) |
918 | { | 963 | { |
diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/dw_spi_mmio.c new file mode 100644 index 000000000000..e35b45ac5174 --- /dev/null +++ b/drivers/spi/dw_spi_mmio.c | |||
@@ -0,0 +1,147 @@ | |||
1 | /* | ||
2 | * dw_spi_mmio.c - Memory-mapped interface driver for DW SPI Core | ||
3 | * | ||
4 | * Copyright (c) 2010, Octasic semiconductor. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/clk.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/platform_device.h> | ||
14 | #include <linux/spi/dw_spi.h> | ||
15 | #include <linux/spi/spi.h> | ||
16 | |||
17 | #define DRIVER_NAME "dw_spi_mmio" | ||
18 | |||
19 | struct dw_spi_mmio { | ||
20 | struct dw_spi dws; | ||
21 | struct clk *clk; | ||
22 | }; | ||
23 | |||
24 | static int __devinit dw_spi_mmio_probe(struct platform_device *pdev) | ||
25 | { | ||
26 | struct dw_spi_mmio *dwsmmio; | ||
27 | struct dw_spi *dws; | ||
28 | struct resource *mem, *ioarea; | ||
29 | int ret; | ||
30 | |||
31 | dwsmmio = kzalloc(sizeof(struct dw_spi_mmio), GFP_KERNEL); | ||
32 | if (!dwsmmio) { | ||
33 | ret = -ENOMEM; | ||
34 | goto err_end; | ||
35 | } | ||
36 | |||
37 | dws = &dwsmmio->dws; | ||
38 | |||
39 | /* Get basic io resource and map it */ | ||
40 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
41 | if (!mem) { | ||
42 | dev_err(&pdev->dev, "no mem resource?\n"); | ||
43 | ret = -EINVAL; | ||
44 | goto err_kfree; | ||
45 | } | ||
46 | |||
47 | ioarea = request_mem_region(mem->start, resource_size(mem), | ||
48 | pdev->name); | ||
49 | if (!ioarea) { | ||
50 | dev_err(&pdev->dev, "SPI region already claimed\n"); | ||
51 | ret = -EBUSY; | ||
52 | goto err_kfree; | ||
53 | } | ||
54 | |||
55 | dws->regs = ioremap_nocache(mem->start, resource_size(mem)); | ||
56 | if (!dws->regs) { | ||
57 | dev_err(&pdev->dev, "SPI region already mapped\n"); | ||
58 | ret = -ENOMEM; | ||
59 | goto err_release_reg; | ||
60 | } | ||
61 | |||
62 | dws->irq = platform_get_irq(pdev, 0); | ||
63 | if (dws->irq < 0) { | ||
64 | dev_err(&pdev->dev, "no irq resource?\n"); | ||
65 | ret = dws->irq; /* -ENXIO */ | ||
66 | goto err_unmap; | ||
67 | } | ||
68 | |||
69 | dwsmmio->clk = clk_get(&pdev->dev, NULL); | ||
70 | if (!dwsmmio->clk) { | ||
71 | ret = -ENODEV; | ||
72 | goto err_irq; | ||
73 | } | ||
74 | clk_enable(dwsmmio->clk); | ||
75 | |||
76 | dws->parent_dev = &pdev->dev; | ||
77 | dws->bus_num = 0; | ||
78 | dws->num_cs = 4; | ||
79 | dws->max_freq = clk_get_rate(dwsmmio->clk); | ||
80 | |||
81 | ret = dw_spi_add_host(dws); | ||
82 | if (ret) | ||
83 | goto err_clk; | ||
84 | |||
85 | platform_set_drvdata(pdev, dwsmmio); | ||
86 | return 0; | ||
87 | |||
88 | err_clk: | ||
89 | clk_disable(dwsmmio->clk); | ||
90 | clk_put(dwsmmio->clk); | ||
91 | dwsmmio->clk = NULL; | ||
92 | err_irq: | ||
93 | free_irq(dws->irq, dws); | ||
94 | err_unmap: | ||
95 | iounmap(dws->regs); | ||
96 | err_release_reg: | ||
97 | release_mem_region(mem->start, resource_size(mem)); | ||
98 | err_kfree: | ||
99 | kfree(dwsmmio); | ||
100 | err_end: | ||
101 | return ret; | ||
102 | } | ||
103 | |||
104 | static int __devexit dw_spi_mmio_remove(struct platform_device *pdev) | ||
105 | { | ||
106 | struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev); | ||
107 | struct resource *mem; | ||
108 | |||
109 | platform_set_drvdata(pdev, NULL); | ||
110 | |||
111 | clk_disable(dwsmmio->clk); | ||
112 | clk_put(dwsmmio->clk); | ||
113 | dwsmmio->clk = NULL; | ||
114 | |||
115 | free_irq(dwsmmio->dws.irq, &dwsmmio->dws); | ||
116 | dw_spi_remove_host(&dwsmmio->dws); | ||
117 | iounmap(dwsmmio->dws.regs); | ||
118 | kfree(dwsmmio); | ||
119 | |||
120 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
121 | release_mem_region(mem->start, resource_size(mem)); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | static struct platform_driver dw_spi_mmio_driver = { | ||
126 | .remove = __devexit_p(dw_spi_mmio_remove), | ||
127 | .driver = { | ||
128 | .name = DRIVER_NAME, | ||
129 | .owner = THIS_MODULE, | ||
130 | }, | ||
131 | }; | ||
132 | |||
133 | static int __init dw_spi_mmio_init(void) | ||
134 | { | ||
135 | return platform_driver_probe(&dw_spi_mmio_driver, dw_spi_mmio_probe); | ||
136 | } | ||
137 | module_init(dw_spi_mmio_init); | ||
138 | |||
139 | static void __exit dw_spi_mmio_exit(void) | ||
140 | { | ||
141 | platform_driver_unregister(&dw_spi_mmio_driver); | ||
142 | } | ||
143 | module_exit(dw_spi_mmio_exit); | ||
144 | |||
145 | MODULE_AUTHOR("Jean-Hugues Deschenes <jean-hugues.deschenes@octasic.com>"); | ||
146 | MODULE_DESCRIPTION("Memory-mapped I/O interface driver for DW SPI Core"); | ||
147 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/spi/dw_spi_pci.c b/drivers/spi/dw_spi_pci.c index 34ba69161734..1f0735f9cc76 100644 --- a/drivers/spi/dw_spi_pci.c +++ b/drivers/spi/dw_spi_pci.c | |||
@@ -73,6 +73,7 @@ static int __devinit spi_pci_probe(struct pci_dev *pdev, | |||
73 | dws->num_cs = 4; | 73 | dws->num_cs = 4; |
74 | dws->max_freq = 25000000; /* for Moorestwon */ | 74 | dws->max_freq = 25000000; /* for Moorestwon */ |
75 | dws->irq = pdev->irq; | 75 | dws->irq = pdev->irq; |
76 | dws->fifo_len = 40; /* FIFO has 40 words buffer */ | ||
76 | 77 | ||
77 | ret = dw_spi_add_host(dws); | 78 | ret = dw_spi_add_host(dws); |
78 | if (ret) | 79 | if (ret) |
@@ -98,6 +99,7 @@ static void __devexit spi_pci_remove(struct pci_dev *pdev) | |||
98 | struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); | 99 | struct dw_spi_pci *dwpci = pci_get_drvdata(pdev); |
99 | 100 | ||
100 | pci_set_drvdata(pdev, NULL); | 101 | pci_set_drvdata(pdev, NULL); |
102 | dw_spi_remove_host(&dwpci->dws); | ||
101 | iounmap(dwpci->dws.regs); | 103 | iounmap(dwpci->dws.regs); |
102 | pci_release_region(pdev, 0); | 104 | pci_release_region(pdev, 0); |
103 | kfree(dwpci); | 105 | kfree(dwpci); |
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c index f50c81df336a..04747868d6c4 100644 --- a/drivers/spi/mpc52xx_psc_spi.c +++ b/drivers/spi/mpc52xx_psc_spi.c | |||
@@ -503,7 +503,7 @@ static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op) | |||
503 | return mpc52xx_psc_spi_do_remove(&op->dev); | 503 | return mpc52xx_psc_spi_do_remove(&op->dev); |
504 | } | 504 | } |
505 | 505 | ||
506 | static struct of_device_id mpc52xx_psc_spi_of_match[] = { | 506 | static const struct of_device_id mpc52xx_psc_spi_of_match[] = { |
507 | { .compatible = "fsl,mpc5200-psc-spi", }, | 507 | { .compatible = "fsl,mpc5200-psc-spi", }, |
508 | { .compatible = "mpc5200-psc-spi", }, /* old */ | 508 | { .compatible = "mpc5200-psc-spi", }, /* old */ |
509 | {} | 509 | {} |
diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c index 45bfe6458173..6eab46537a0a 100644 --- a/drivers/spi/mpc52xx_spi.c +++ b/drivers/spi/mpc52xx_spi.c | |||
@@ -550,7 +550,7 @@ static int __devexit mpc52xx_spi_remove(struct of_device *op) | |||
550 | return 0; | 550 | return 0; |
551 | } | 551 | } |
552 | 552 | ||
553 | static struct of_device_id mpc52xx_spi_match[] __devinitdata = { | 553 | static const struct of_device_id mpc52xx_spi_match[] __devinitconst = { |
554 | { .compatible = "fsl,mpc5200-spi", }, | 554 | { .compatible = "fsl,mpc5200-spi", }, |
555 | {} | 555 | {} |
556 | }; | 556 | }; |
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c index 1893f1e96dc4..0ddbbe45e834 100644 --- a/drivers/spi/spi_imx.c +++ b/drivers/spi/spi_imx.c | |||
@@ -469,7 +469,7 @@ static int spi_imx_setup(struct spi_device *spi) | |||
469 | struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); | 469 | struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); |
470 | int gpio = spi_imx->chipselect[spi->chip_select]; | 470 | int gpio = spi_imx->chipselect[spi->chip_select]; |
471 | 471 | ||
472 | pr_debug("%s: mode %d, %u bpw, %d hz\n", __func__, | 472 | dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__, |
473 | spi->mode, spi->bits_per_word, spi->max_speed_hz); | 473 | spi->mode, spi->bits_per_word, spi->max_speed_hz); |
474 | 474 | ||
475 | if (gpio >= 0) | 475 | if (gpio >= 0) |
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c index 1fb2a6ea328c..4f0cc9d457e0 100644 --- a/drivers/spi/spi_mpc8xxx.c +++ b/drivers/spi/spi_mpc8xxx.c | |||
@@ -365,7 +365,7 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | |||
365 | 365 | ||
366 | if ((mpc8xxx_spi->spibrg / hz) > 64) { | 366 | if ((mpc8xxx_spi->spibrg / hz) > 64) { |
367 | cs->hw_mode |= SPMODE_DIV16; | 367 | cs->hw_mode |= SPMODE_DIV16; |
368 | pm = mpc8xxx_spi->spibrg / (hz * 64); | 368 | pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1; |
369 | 369 | ||
370 | WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " | 370 | WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " |
371 | "Will use %d Hz instead.\n", dev_name(&spi->dev), | 371 | "Will use %d Hz instead.\n", dev_name(&spi->dev), |
@@ -373,7 +373,7 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | |||
373 | if (pm > 16) | 373 | if (pm > 16) |
374 | pm = 16; | 374 | pm = 16; |
375 | } else | 375 | } else |
376 | pm = mpc8xxx_spi->spibrg / (hz * 4); | 376 | pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; |
377 | if (pm) | 377 | if (pm) |
378 | pm--; | 378 | pm--; |
379 | 379 | ||
@@ -1328,7 +1328,7 @@ static struct of_platform_driver of_mpc8xxx_spi_driver = { | |||
1328 | static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev) | 1328 | static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev) |
1329 | { | 1329 | { |
1330 | struct resource *mem; | 1330 | struct resource *mem; |
1331 | unsigned int irq; | 1331 | int irq; |
1332 | struct spi_master *master; | 1332 | struct spi_master *master; |
1333 | 1333 | ||
1334 | if (!pdev->dev.platform_data) | 1334 | if (!pdev->dev.platform_data) |
@@ -1339,7 +1339,7 @@ static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev) | |||
1339 | return -EINVAL; | 1339 | return -EINVAL; |
1340 | 1340 | ||
1341 | irq = platform_get_irq(pdev, 0); | 1341 | irq = platform_get_irq(pdev, 0); |
1342 | if (!irq) | 1342 | if (irq <= 0) |
1343 | return -EINVAL; | 1343 | return -EINVAL; |
1344 | 1344 | ||
1345 | master = mpc8xxx_spi_probe(&pdev->dev, mem, irq); | 1345 | master = mpc8xxx_spi_probe(&pdev->dev, mem, irq); |
diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi_ppc4xx.c index 140a18d6cf3e..6d8d4026a07a 100644 --- a/drivers/spi/spi_ppc4xx.c +++ b/drivers/spi/spi_ppc4xx.c | |||
@@ -578,7 +578,7 @@ static int __exit spi_ppc4xx_of_remove(struct of_device *op) | |||
578 | return 0; | 578 | return 0; |
579 | } | 579 | } |
580 | 580 | ||
581 | static struct of_device_id spi_ppc4xx_of_match[] = { | 581 | static const struct of_device_id spi_ppc4xx_of_match[] = { |
582 | { .compatible = "ibm,ppc4xx-spi", }, | 582 | { .compatible = "ibm,ppc4xx-spi", }, |
583 | {}, | 583 | {}, |
584 | }; | 584 | }; |
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c index 88a456dba967..97365815a729 100644 --- a/drivers/spi/spi_s3c64xx.c +++ b/drivers/spi/spi_s3c64xx.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <linux/spi/spi.h> | 28 | #include <linux/spi/spi.h> |
29 | 29 | ||
30 | #include <mach/dma.h> | 30 | #include <mach/dma.h> |
31 | #include <plat/spi.h> | 31 | #include <plat/s3c64xx-spi.h> |
32 | 32 | ||
33 | /* Registers and bit-fields */ | 33 | /* Registers and bit-fields */ |
34 | 34 | ||
@@ -137,6 +137,7 @@ | |||
137 | /** | 137 | /** |
138 | * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. | 138 | * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. |
139 | * @clk: Pointer to the spi clock. | 139 | * @clk: Pointer to the spi clock. |
140 | * @src_clk: Pointer to the clock used to generate SPI signals. | ||
140 | * @master: Pointer to the SPI Protocol master. | 141 | * @master: Pointer to the SPI Protocol master. |
141 | * @workqueue: Work queue for the SPI xfer requests. | 142 | * @workqueue: Work queue for the SPI xfer requests. |
142 | * @cntrlr_info: Platform specific data for the controller this driver manages. | 143 | * @cntrlr_info: Platform specific data for the controller this driver manages. |
@@ -157,10 +158,11 @@ | |||
157 | struct s3c64xx_spi_driver_data { | 158 | struct s3c64xx_spi_driver_data { |
158 | void __iomem *regs; | 159 | void __iomem *regs; |
159 | struct clk *clk; | 160 | struct clk *clk; |
161 | struct clk *src_clk; | ||
160 | struct platform_device *pdev; | 162 | struct platform_device *pdev; |
161 | struct spi_master *master; | 163 | struct spi_master *master; |
162 | struct workqueue_struct *workqueue; | 164 | struct workqueue_struct *workqueue; |
163 | struct s3c64xx_spi_cntrlr_info *cntrlr_info; | 165 | struct s3c64xx_spi_info *cntrlr_info; |
164 | struct spi_device *tgl_spi; | 166 | struct spi_device *tgl_spi; |
165 | struct work_struct work; | 167 | struct work_struct work; |
166 | struct list_head queue; | 168 | struct list_head queue; |
@@ -180,7 +182,7 @@ static struct s3c2410_dma_client s3c64xx_spi_dma_client = { | |||
180 | 182 | ||
181 | static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) | 183 | static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) |
182 | { | 184 | { |
183 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | 185 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; |
184 | void __iomem *regs = sdd->regs; | 186 | void __iomem *regs = sdd->regs; |
185 | unsigned long loops; | 187 | unsigned long loops; |
186 | u32 val; | 188 | u32 val; |
@@ -225,7 +227,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, | |||
225 | struct spi_device *spi, | 227 | struct spi_device *spi, |
226 | struct spi_transfer *xfer, int dma_mode) | 228 | struct spi_transfer *xfer, int dma_mode) |
227 | { | 229 | { |
228 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | 230 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; |
229 | void __iomem *regs = sdd->regs; | 231 | void __iomem *regs = sdd->regs; |
230 | u32 modecfg, chcfg; | 232 | u32 modecfg, chcfg; |
231 | 233 | ||
@@ -298,19 +300,20 @@ static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd, | |||
298 | if (sdd->tgl_spi != spi) { /* if last mssg on diff device */ | 300 | if (sdd->tgl_spi != spi) { /* if last mssg on diff device */ |
299 | /* Deselect the last toggled device */ | 301 | /* Deselect the last toggled device */ |
300 | cs = sdd->tgl_spi->controller_data; | 302 | cs = sdd->tgl_spi->controller_data; |
301 | cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1); | 303 | cs->set_level(cs->line, |
304 | spi->mode & SPI_CS_HIGH ? 0 : 1); | ||
302 | } | 305 | } |
303 | sdd->tgl_spi = NULL; | 306 | sdd->tgl_spi = NULL; |
304 | } | 307 | } |
305 | 308 | ||
306 | cs = spi->controller_data; | 309 | cs = spi->controller_data; |
307 | cs->set_level(spi->mode & SPI_CS_HIGH ? 1 : 0); | 310 | cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0); |
308 | } | 311 | } |
309 | 312 | ||
310 | static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, | 313 | static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, |
311 | struct spi_transfer *xfer, int dma_mode) | 314 | struct spi_transfer *xfer, int dma_mode) |
312 | { | 315 | { |
313 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | 316 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; |
314 | void __iomem *regs = sdd->regs; | 317 | void __iomem *regs = sdd->regs; |
315 | unsigned long val; | 318 | unsigned long val; |
316 | int ms; | 319 | int ms; |
@@ -384,12 +387,11 @@ static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd, | |||
384 | if (sdd->tgl_spi == spi) | 387 | if (sdd->tgl_spi == spi) |
385 | sdd->tgl_spi = NULL; | 388 | sdd->tgl_spi = NULL; |
386 | 389 | ||
387 | cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1); | 390 | cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1); |
388 | } | 391 | } |
389 | 392 | ||
390 | static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) | 393 | static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) |
391 | { | 394 | { |
392 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | ||
393 | void __iomem *regs = sdd->regs; | 395 | void __iomem *regs = sdd->regs; |
394 | u32 val; | 396 | u32 val; |
395 | 397 | ||
@@ -435,7 +437,7 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) | |||
435 | /* Configure Clock */ | 437 | /* Configure Clock */ |
436 | val = readl(regs + S3C64XX_SPI_CLK_CFG); | 438 | val = readl(regs + S3C64XX_SPI_CLK_CFG); |
437 | val &= ~S3C64XX_SPI_PSR_MASK; | 439 | val &= ~S3C64XX_SPI_PSR_MASK; |
438 | val |= ((clk_get_rate(sci->src_clk) / sdd->cur_speed / 2 - 1) | 440 | val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) |
439 | & S3C64XX_SPI_PSR_MASK); | 441 | & S3C64XX_SPI_PSR_MASK); |
440 | writel(val, regs + S3C64XX_SPI_CLK_CFG); | 442 | writel(val, regs + S3C64XX_SPI_CLK_CFG); |
441 | 443 | ||
@@ -558,7 +560,7 @@ static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, | |||
558 | static void handle_msg(struct s3c64xx_spi_driver_data *sdd, | 560 | static void handle_msg(struct s3c64xx_spi_driver_data *sdd, |
559 | struct spi_message *msg) | 561 | struct spi_message *msg) |
560 | { | 562 | { |
561 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | 563 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; |
562 | struct spi_device *spi = msg->spi; | 564 | struct spi_device *spi = msg->spi; |
563 | struct s3c64xx_spi_csinfo *cs = spi->controller_data; | 565 | struct s3c64xx_spi_csinfo *cs = spi->controller_data; |
564 | struct spi_transfer *xfer; | 566 | struct spi_transfer *xfer; |
@@ -632,8 +634,8 @@ static void handle_msg(struct s3c64xx_spi_driver_data *sdd, | |||
632 | S3C64XX_SPI_DEACT(sdd); | 634 | S3C64XX_SPI_DEACT(sdd); |
633 | 635 | ||
634 | if (status) { | 636 | if (status) { |
635 | dev_err(&spi->dev, "I/O Error: \ | 637 | dev_err(&spi->dev, "I/O Error: " |
636 | rx-%d tx-%d res:rx-%c tx-%c len-%d\n", | 638 | "rx-%d tx-%d res:rx-%c tx-%c len-%d\n", |
637 | xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, | 639 | xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0, |
638 | (sdd->state & RXBUSY) ? 'f' : 'p', | 640 | (sdd->state & RXBUSY) ? 'f' : 'p', |
639 | (sdd->state & TXBUSY) ? 'f' : 'p', | 641 | (sdd->state & TXBUSY) ? 'f' : 'p', |
@@ -786,7 +788,7 @@ static int s3c64xx_spi_setup(struct spi_device *spi) | |||
786 | { | 788 | { |
787 | struct s3c64xx_spi_csinfo *cs = spi->controller_data; | 789 | struct s3c64xx_spi_csinfo *cs = spi->controller_data; |
788 | struct s3c64xx_spi_driver_data *sdd; | 790 | struct s3c64xx_spi_driver_data *sdd; |
789 | struct s3c64xx_spi_cntrlr_info *sci; | 791 | struct s3c64xx_spi_info *sci; |
790 | struct spi_message *msg; | 792 | struct spi_message *msg; |
791 | u32 psr, speed; | 793 | u32 psr, speed; |
792 | unsigned long flags; | 794 | unsigned long flags; |
@@ -831,17 +833,17 @@ static int s3c64xx_spi_setup(struct spi_device *spi) | |||
831 | } | 833 | } |
832 | 834 | ||
833 | /* Check if we can provide the requested rate */ | 835 | /* Check if we can provide the requested rate */ |
834 | speed = clk_get_rate(sci->src_clk) / 2 / (0 + 1); /* Max possible */ | 836 | speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); /* Max possible */ |
835 | 837 | ||
836 | if (spi->max_speed_hz > speed) | 838 | if (spi->max_speed_hz > speed) |
837 | spi->max_speed_hz = speed; | 839 | spi->max_speed_hz = speed; |
838 | 840 | ||
839 | psr = clk_get_rate(sci->src_clk) / 2 / spi->max_speed_hz - 1; | 841 | psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; |
840 | psr &= S3C64XX_SPI_PSR_MASK; | 842 | psr &= S3C64XX_SPI_PSR_MASK; |
841 | if (psr == S3C64XX_SPI_PSR_MASK) | 843 | if (psr == S3C64XX_SPI_PSR_MASK) |
842 | psr--; | 844 | psr--; |
843 | 845 | ||
844 | speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1); | 846 | speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); |
845 | if (spi->max_speed_hz < speed) { | 847 | if (spi->max_speed_hz < speed) { |
846 | if (psr+1 < S3C64XX_SPI_PSR_MASK) { | 848 | if (psr+1 < S3C64XX_SPI_PSR_MASK) { |
847 | psr++; | 849 | psr++; |
@@ -851,7 +853,7 @@ static int s3c64xx_spi_setup(struct spi_device *spi) | |||
851 | } | 853 | } |
852 | } | 854 | } |
853 | 855 | ||
854 | speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1); | 856 | speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); |
855 | if (spi->max_speed_hz >= speed) | 857 | if (spi->max_speed_hz >= speed) |
856 | spi->max_speed_hz = speed; | 858 | spi->max_speed_hz = speed; |
857 | else | 859 | else |
@@ -867,7 +869,7 @@ setup_exit: | |||
867 | 869 | ||
868 | static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) | 870 | static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) |
869 | { | 871 | { |
870 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | 872 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; |
871 | void __iomem *regs = sdd->regs; | 873 | void __iomem *regs = sdd->regs; |
872 | unsigned int val; | 874 | unsigned int val; |
873 | 875 | ||
@@ -902,7 +904,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) | |||
902 | { | 904 | { |
903 | struct resource *mem_res, *dmatx_res, *dmarx_res; | 905 | struct resource *mem_res, *dmatx_res, *dmarx_res; |
904 | struct s3c64xx_spi_driver_data *sdd; | 906 | struct s3c64xx_spi_driver_data *sdd; |
905 | struct s3c64xx_spi_cntrlr_info *sci; | 907 | struct s3c64xx_spi_info *sci; |
906 | struct spi_master *master; | 908 | struct spi_master *master; |
907 | int ret; | 909 | int ret; |
908 | 910 | ||
@@ -1000,18 +1002,15 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) | |||
1000 | goto err4; | 1002 | goto err4; |
1001 | } | 1003 | } |
1002 | 1004 | ||
1003 | if (sci->src_clk_nr == S3C64XX_SPI_SRCCLK_PCLK) | 1005 | sdd->src_clk = clk_get(&pdev->dev, sci->src_clk_name); |
1004 | sci->src_clk = sdd->clk; | 1006 | if (IS_ERR(sdd->src_clk)) { |
1005 | else | ||
1006 | sci->src_clk = clk_get(&pdev->dev, sci->src_clk_name); | ||
1007 | if (IS_ERR(sci->src_clk)) { | ||
1008 | dev_err(&pdev->dev, | 1007 | dev_err(&pdev->dev, |
1009 | "Unable to acquire clock '%s'\n", sci->src_clk_name); | 1008 | "Unable to acquire clock '%s'\n", sci->src_clk_name); |
1010 | ret = PTR_ERR(sci->src_clk); | 1009 | ret = PTR_ERR(sdd->src_clk); |
1011 | goto err5; | 1010 | goto err5; |
1012 | } | 1011 | } |
1013 | 1012 | ||
1014 | if (sci->src_clk != sdd->clk && clk_enable(sci->src_clk)) { | 1013 | if (clk_enable(sdd->src_clk)) { |
1015 | dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", | 1014 | dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", |
1016 | sci->src_clk_name); | 1015 | sci->src_clk_name); |
1017 | ret = -EBUSY; | 1016 | ret = -EBUSY; |
@@ -1040,11 +1039,10 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) | |||
1040 | goto err8; | 1039 | goto err8; |
1041 | } | 1040 | } |
1042 | 1041 | ||
1043 | dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d \ | 1042 | dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d " |
1044 | with %d Slaves attached\n", | 1043 | "with %d Slaves attached\n", |
1045 | pdev->id, master->num_chipselect); | 1044 | pdev->id, master->num_chipselect); |
1046 | dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\ | 1045 | dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n", |
1047 | \tDMA=[Rx-%d, Tx-%d]\n", | ||
1048 | mem_res->end, mem_res->start, | 1046 | mem_res->end, mem_res->start, |
1049 | sdd->rx_dmach, sdd->tx_dmach); | 1047 | sdd->rx_dmach, sdd->tx_dmach); |
1050 | 1048 | ||
@@ -1053,11 +1051,9 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) | |||
1053 | err8: | 1051 | err8: |
1054 | destroy_workqueue(sdd->workqueue); | 1052 | destroy_workqueue(sdd->workqueue); |
1055 | err7: | 1053 | err7: |
1056 | if (sci->src_clk != sdd->clk) | 1054 | clk_disable(sdd->src_clk); |
1057 | clk_disable(sci->src_clk); | ||
1058 | err6: | 1055 | err6: |
1059 | if (sci->src_clk != sdd->clk) | 1056 | clk_put(sdd->src_clk); |
1060 | clk_put(sci->src_clk); | ||
1061 | err5: | 1057 | err5: |
1062 | clk_disable(sdd->clk); | 1058 | clk_disable(sdd->clk); |
1063 | err4: | 1059 | err4: |
@@ -1078,7 +1074,6 @@ static int s3c64xx_spi_remove(struct platform_device *pdev) | |||
1078 | { | 1074 | { |
1079 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); | 1075 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); |
1080 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); | 1076 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); |
1081 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | ||
1082 | struct resource *mem_res; | 1077 | struct resource *mem_res; |
1083 | unsigned long flags; | 1078 | unsigned long flags; |
1084 | 1079 | ||
@@ -1093,11 +1088,8 @@ static int s3c64xx_spi_remove(struct platform_device *pdev) | |||
1093 | 1088 | ||
1094 | destroy_workqueue(sdd->workqueue); | 1089 | destroy_workqueue(sdd->workqueue); |
1095 | 1090 | ||
1096 | if (sci->src_clk != sdd->clk) | 1091 | clk_disable(sdd->src_clk); |
1097 | clk_disable(sci->src_clk); | 1092 | clk_put(sdd->src_clk); |
1098 | |||
1099 | if (sci->src_clk != sdd->clk) | ||
1100 | clk_put(sci->src_clk); | ||
1101 | 1093 | ||
1102 | clk_disable(sdd->clk); | 1094 | clk_disable(sdd->clk); |
1103 | clk_put(sdd->clk); | 1095 | clk_put(sdd->clk); |
@@ -1105,7 +1097,8 @@ static int s3c64xx_spi_remove(struct platform_device *pdev) | |||
1105 | iounmap((void *) sdd->regs); | 1097 | iounmap((void *) sdd->regs); |
1106 | 1098 | ||
1107 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1099 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1108 | release_mem_region(mem_res->start, resource_size(mem_res)); | 1100 | if (mem_res != NULL) |
1101 | release_mem_region(mem_res->start, resource_size(mem_res)); | ||
1109 | 1102 | ||
1110 | platform_set_drvdata(pdev, NULL); | 1103 | platform_set_drvdata(pdev, NULL); |
1111 | spi_master_put(master); | 1104 | spi_master_put(master); |
@@ -1118,8 +1111,6 @@ static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state) | |||
1118 | { | 1111 | { |
1119 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); | 1112 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); |
1120 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); | 1113 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); |
1121 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | ||
1122 | struct s3c64xx_spi_csinfo *cs; | ||
1123 | unsigned long flags; | 1114 | unsigned long flags; |
1124 | 1115 | ||
1125 | spin_lock_irqsave(&sdd->lock, flags); | 1116 | spin_lock_irqsave(&sdd->lock, flags); |
@@ -1130,9 +1121,7 @@ static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state) | |||
1130 | msleep(10); | 1121 | msleep(10); |
1131 | 1122 | ||
1132 | /* Disable the clock */ | 1123 | /* Disable the clock */ |
1133 | if (sci->src_clk != sdd->clk) | 1124 | clk_disable(sdd->src_clk); |
1134 | clk_disable(sci->src_clk); | ||
1135 | |||
1136 | clk_disable(sdd->clk); | 1125 | clk_disable(sdd->clk); |
1137 | 1126 | ||
1138 | sdd->cur_speed = 0; /* Output Clock is stopped */ | 1127 | sdd->cur_speed = 0; /* Output Clock is stopped */ |
@@ -1144,15 +1133,13 @@ static int s3c64xx_spi_resume(struct platform_device *pdev) | |||
1144 | { | 1133 | { |
1145 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); | 1134 | struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); |
1146 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); | 1135 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); |
1147 | struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info; | 1136 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; |
1148 | unsigned long flags; | 1137 | unsigned long flags; |
1149 | 1138 | ||
1150 | sci->cfg_gpio(pdev); | 1139 | sci->cfg_gpio(pdev); |
1151 | 1140 | ||
1152 | /* Enable the clock */ | 1141 | /* Enable the clock */ |
1153 | if (sci->src_clk != sdd->clk) | 1142 | clk_enable(sdd->src_clk); |
1154 | clk_enable(sci->src_clk); | ||
1155 | |||
1156 | clk_enable(sdd->clk); | 1143 | clk_enable(sdd->clk); |
1157 | 1144 | ||
1158 | s3c64xx_spi_hwinit(sdd, pdev->id); | 1145 | s3c64xx_spi_hwinit(sdd, pdev->id); |
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c index 30973ec16a93..d93b66743ba7 100644 --- a/drivers/spi/spi_sh_msiof.c +++ b/drivers/spi/spi_sh_msiof.c | |||
@@ -20,12 +20,12 @@ | |||
20 | #include <linux/bitmap.h> | 20 | #include <linux/bitmap.h> |
21 | #include <linux/clk.h> | 21 | #include <linux/clk.h> |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/err.h> | ||
23 | 24 | ||
24 | #include <linux/spi/spi.h> | 25 | #include <linux/spi/spi.h> |
25 | #include <linux/spi/spi_bitbang.h> | 26 | #include <linux/spi/spi_bitbang.h> |
26 | #include <linux/spi/sh_msiof.h> | 27 | #include <linux/spi/sh_msiof.h> |
27 | 28 | ||
28 | #include <asm/spi.h> | ||
29 | #include <asm/unaligned.h> | 29 | #include <asm/unaligned.h> |
30 | 30 | ||
31 | struct sh_msiof_spi_priv { | 31 | struct sh_msiof_spi_priv { |
diff --git a/drivers/spi/spi_stmp.c b/drivers/spi/spi_stmp.c index 2552bb364005..fadff76eb7e0 100644 --- a/drivers/spi/spi_stmp.c +++ b/drivers/spi/spi_stmp.c | |||
@@ -76,7 +76,7 @@ struct stmp_spi { | |||
76 | break; \ | 76 | break; \ |
77 | } \ | 77 | } \ |
78 | cpu_relax(); \ | 78 | cpu_relax(); \ |
79 | } while (time_before(end_jiffies, jiffies)); \ | 79 | } while (time_before(jiffies, end_jiffies)); \ |
80 | succeeded; \ | 80 | succeeded; \ |
81 | }) | 81 | }) |
82 | 82 | ||
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c index 9f386379c169..1b47363cb73f 100644 --- a/drivers/spi/xilinx_spi.c +++ b/drivers/spi/xilinx_spi.c | |||
@@ -93,6 +93,26 @@ struct xilinx_spi { | |||
93 | void (*rx_fn) (struct xilinx_spi *); | 93 | void (*rx_fn) (struct xilinx_spi *); |
94 | }; | 94 | }; |
95 | 95 | ||
96 | static void xspi_write32(u32 val, void __iomem *addr) | ||
97 | { | ||
98 | iowrite32(val, addr); | ||
99 | } | ||
100 | |||
101 | static unsigned int xspi_read32(void __iomem *addr) | ||
102 | { | ||
103 | return ioread32(addr); | ||
104 | } | ||
105 | |||
106 | static void xspi_write32_be(u32 val, void __iomem *addr) | ||
107 | { | ||
108 | iowrite32be(val, addr); | ||
109 | } | ||
110 | |||
111 | static unsigned int xspi_read32_be(void __iomem *addr) | ||
112 | { | ||
113 | return ioread32be(addr); | ||
114 | } | ||
115 | |||
96 | static void xspi_tx8(struct xilinx_spi *xspi) | 116 | static void xspi_tx8(struct xilinx_spi *xspi) |
97 | { | 117 | { |
98 | xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET); | 118 | xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET); |
@@ -374,11 +394,11 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, | |||
374 | xspi->mem = *mem; | 394 | xspi->mem = *mem; |
375 | xspi->irq = irq; | 395 | xspi->irq = irq; |
376 | if (pdata->little_endian) { | 396 | if (pdata->little_endian) { |
377 | xspi->read_fn = ioread32; | 397 | xspi->read_fn = xspi_read32; |
378 | xspi->write_fn = iowrite32; | 398 | xspi->write_fn = xspi_write32; |
379 | } else { | 399 | } else { |
380 | xspi->read_fn = ioread32be; | 400 | xspi->read_fn = xspi_read32_be; |
381 | xspi->write_fn = iowrite32be; | 401 | xspi->write_fn = xspi_write32_be; |
382 | } | 402 | } |
383 | xspi->bits_per_word = pdata->bits_per_word; | 403 | xspi->bits_per_word = pdata->bits_per_word; |
384 | if (xspi->bits_per_word == 8) { | 404 | if (xspi->bits_per_word == 8) { |
diff --git a/drivers/spi/xilinx_spi_of.c b/drivers/spi/xilinx_spi_of.c index 71dc3adc0495..ed34a8d419c7 100644 --- a/drivers/spi/xilinx_spi_of.c +++ b/drivers/spi/xilinx_spi_of.c | |||
@@ -99,7 +99,7 @@ static int __exit xilinx_spi_of_remove(struct of_device *op) | |||
99 | return xilinx_spi_remove(op); | 99 | return xilinx_spi_remove(op); |
100 | } | 100 | } |
101 | 101 | ||
102 | static struct of_device_id xilinx_spi_of_match[] = { | 102 | static const struct of_device_id xilinx_spi_of_match[] = { |
103 | { .compatible = "xlnx,xps-spi-2.00.a", }, | 103 | { .compatible = "xlnx,xps-spi-2.00.a", }, |
104 | { .compatible = "xlnx,xps-spi-2.00.b", }, | 104 | { .compatible = "xlnx,xps-spi-2.00.b", }, |
105 | {} | 105 | {} |