diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/spi | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/spi')
47 files changed, 9855 insertions, 2883 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 91c2f4f3af10..de35c3ad8a69 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -53,6 +53,20 @@ if SPI_MASTER | |||
53 | 53 | ||
54 | comment "SPI Master Controller Drivers" | 54 | comment "SPI Master Controller Drivers" |
55 | 55 | ||
56 | config SPI_ALTERA | ||
57 | tristate "Altera SPI Controller" | ||
58 | select SPI_BITBANG | ||
59 | help | ||
60 | This is the driver for the Altera SPI Controller. | ||
61 | |||
62 | config SPI_ATH79 | ||
63 | tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver" | ||
64 | depends on ATH79 && GENERIC_GPIO | ||
65 | select SPI_BITBANG | ||
66 | help | ||
67 | This enables support for the SPI controller present on the | ||
68 | Atheros AR71XX/AR724X/AR913X SoCs. | ||
69 | |||
56 | config SPI_ATMEL | 70 | config SPI_ATMEL |
57 | tristate "Atmel SPI Controller" | 71 | tristate "Atmel SPI Controller" |
58 | depends on (ARCH_AT91 || AVR32) | 72 | depends on (ARCH_AT91 || AVR32) |
@@ -66,6 +80,15 @@ config SPI_BFIN | |||
66 | help | 80 | help |
67 | This is the SPI controller master driver for Blackfin 5xx processor. | 81 | This is the SPI controller master driver for Blackfin 5xx processor. |
68 | 82 | ||
83 | config SPI_BFIN_SPORT | ||
84 | tristate "SPI bus via Blackfin SPORT" | ||
85 | depends on BLACKFIN | ||
86 | help | ||
87 | Enable support for a SPI bus via the Blackfin SPORT peripheral. | ||
88 | |||
89 | This driver can also be built as a module. If so, the module | ||
90 | will be called spi_bfin_sport. | ||
91 | |||
69 | config SPI_AU1550 | 92 | config SPI_AU1550 |
70 | tristate "Au1550/Au12x0 SPI Controller" | 93 | tristate "Au1550/Au12x0 SPI Controller" |
71 | depends on (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL | 94 | depends on (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL |
@@ -111,11 +134,14 @@ config SPI_COLDFIRE_QSPI | |||
111 | will be called coldfire_qspi. | 134 | will be called coldfire_qspi. |
112 | 135 | ||
113 | config SPI_DAVINCI | 136 | config SPI_DAVINCI |
114 | tristate "SPI controller driver for DaVinci/DA8xx SoC's" | 137 | tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller" |
115 | depends on SPI_MASTER && ARCH_DAVINCI | 138 | depends on SPI_MASTER && ARCH_DAVINCI |
116 | select SPI_BITBANG | 139 | select SPI_BITBANG |
117 | help | 140 | help |
118 | SPI master controller for DaVinci and DA8xx SPI modules. | 141 | SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. |
142 | |||
143 | This driver can also be built as a module. The module will be called | ||
144 | davinci_spi. | ||
119 | 145 | ||
120 | config SPI_EP93XX | 146 | config SPI_EP93XX |
121 | tristate "Cirrus Logic EP93xx SPI controller" | 147 | tristate "Cirrus Logic EP93xx SPI controller" |
@@ -143,10 +169,26 @@ config SPI_GPIO | |||
143 | GPIO operations, you should be able to leverage that for better | 169 | GPIO operations, you should be able to leverage that for better |
144 | speed with a custom version of this driver; see the source code. | 170 | speed with a custom version of this driver; see the source code. |
145 | 171 | ||
172 | config SPI_IMX_VER_IMX1 | ||
173 | def_bool y if SOC_IMX1 | ||
174 | |||
175 | config SPI_IMX_VER_0_0 | ||
176 | def_bool y if SOC_IMX21 || SOC_IMX27 | ||
177 | |||
178 | config SPI_IMX_VER_0_4 | ||
179 | def_bool y if SOC_IMX31 | ||
180 | |||
181 | config SPI_IMX_VER_0_7 | ||
182 | def_bool y if ARCH_MX25 || SOC_IMX35 || SOC_IMX51 || SOC_IMX53 | ||
183 | |||
184 | config SPI_IMX_VER_2_3 | ||
185 | def_bool y if SOC_IMX51 || SOC_IMX53 | ||
186 | |||
146 | config SPI_IMX | 187 | config SPI_IMX |
147 | tristate "Freescale i.MX SPI controllers" | 188 | tristate "Freescale i.MX SPI controllers" |
148 | depends on ARCH_MXC | 189 | depends on ARCH_MXC |
149 | select SPI_BITBANG | 190 | select SPI_BITBANG |
191 | default m if IMX_HAVE_PLATFORM_SPI_IMX | ||
150 | help | 192 | help |
151 | This enables using the Freescale i.MX SPI controllers in master | 193 | This enables using the Freescale i.MX SPI controllers in master |
152 | mode. | 194 | mode. |
@@ -182,12 +224,34 @@ config SPI_MPC512x_PSC | |||
182 | This enables using the Freescale MPC5121 Programmable Serial | 224 | This enables using the Freescale MPC5121 Programmable Serial |
183 | Controller in SPI master mode. | 225 | Controller in SPI master mode. |
184 | 226 | ||
185 | config SPI_MPC8xxx | 227 | config SPI_FSL_LIB |
186 | tristate "Freescale MPC8xxx SPI controller" | 228 | tristate |
187 | depends on FSL_SOC | 229 | depends on FSL_SOC |
230 | |||
231 | config SPI_FSL_SPI | ||
232 | tristate "Freescale SPI controller" | ||
233 | depends on FSL_SOC | ||
234 | select SPI_FSL_LIB | ||
188 | help | 235 | help |
189 | This enables using the Freescale MPC8xxx SPI controllers in master | 236 | This enables using the Freescale SPI controllers in master mode. |
190 | mode. | 237 | MPC83xx platform uses the controller in cpu mode or CPM/QE mode. |
238 | MPC8569 uses the controller in QE mode, MPC8610 in cpu mode. | ||
239 | |||
240 | config SPI_FSL_ESPI | ||
241 | tristate "Freescale eSPI controller" | ||
242 | depends on FSL_SOC | ||
243 | select SPI_FSL_LIB | ||
244 | help | ||
245 | This enables using the Freescale eSPI controllers in master mode. | ||
246 | From MPC8536, 85xx platform uses the controller, and all P10xx, | ||
247 | P20xx, P30xx,P40xx, P50xx uses this controller. | ||
248 | |||
249 | config SPI_OC_TINY | ||
250 | tristate "OpenCores tiny SPI" | ||
251 | depends on GENERIC_GPIO | ||
252 | select SPI_BITBANG | ||
253 | help | ||
254 | This is the driver for OpenCores tiny SPI master controller. | ||
191 | 255 | ||
192 | config SPI_OMAP_UWIRE | 256 | config SPI_OMAP_UWIRE |
193 | tristate "OMAP1 MicroWire" | 257 | tristate "OMAP1 MicroWire" |
@@ -216,8 +280,8 @@ config SPI_ORION | |||
216 | This enables using the SPI master controller on the Orion chips. | 280 | This enables using the SPI master controller on the Orion chips. |
217 | 281 | ||
218 | config SPI_PL022 | 282 | config SPI_PL022 |
219 | tristate "ARM AMBA PL022 SSP controller (EXPERIMENTAL)" | 283 | tristate "ARM AMBA PL022 SSP controller" |
220 | depends on ARM_AMBA && EXPERIMENTAL | 284 | depends on ARM_AMBA |
221 | default y if MACH_U300 | 285 | default y if MACH_U300 |
222 | default y if ARCH_REALVIEW | 286 | default y if ARCH_REALVIEW |
223 | default y if INTEGRATOR_IMPD1 | 287 | default y if INTEGRATOR_IMPD1 |
@@ -236,12 +300,15 @@ config SPI_PPC4xx | |||
236 | 300 | ||
237 | config SPI_PXA2XX | 301 | config SPI_PXA2XX |
238 | tristate "PXA2xx SSP SPI master" | 302 | tristate "PXA2xx SSP SPI master" |
239 | depends on ARCH_PXA && EXPERIMENTAL | 303 | depends on (ARCH_PXA || (X86_32 && PCI)) && EXPERIMENTAL |
240 | select PXA_SSP | 304 | select PXA_SSP if ARCH_PXA |
241 | help | 305 | help |
242 | This enables using a PXA2xx SSP port as a SPI master controller. | 306 | This enables using a PXA2xx or Sodaville SSP port as a SPI master |
243 | The driver can be configured to use any SSP port and additional | 307 | controller. The driver can be configured to use any SSP port and |
244 | documentation can be found a Documentation/spi/pxa2xx. | 308 | additional documentation can be found a Documentation/spi/pxa2xx. |
309 | |||
310 | config SPI_PXA2XX_PCI | ||
311 | def_bool SPI_PXA2XX && X86_32 && PCI | ||
245 | 312 | ||
246 | config SPI_S3C24XX | 313 | config SPI_S3C24XX |
247 | tristate "Samsung S3C24XX series SPI" | 314 | tristate "Samsung S3C24XX series SPI" |
@@ -273,8 +340,8 @@ config SPI_S3C24XX_GPIO | |||
273 | 340 | ||
274 | config SPI_S3C64XX | 341 | config SPI_S3C64XX |
275 | tristate "Samsung S3C64XX series type SPI" | 342 | tristate "Samsung S3C64XX series type SPI" |
276 | depends on ARCH_S3C64XX && EXPERIMENTAL | 343 | depends on (ARCH_S3C64XX || ARCH_S5P64X0) |
277 | select S3C64XX_DMA | 344 | select S3C64XX_DMA if ARCH_S3C64XX |
278 | help | 345 | help |
279 | SPI driver for Samsung S3C64XX and newer SoCs. | 346 | SPI driver for Samsung S3C64XX and newer SoCs. |
280 | 347 | ||
@@ -285,6 +352,12 @@ config SPI_SH_MSIOF | |||
285 | help | 352 | help |
286 | SPI driver for SuperH MSIOF blocks. | 353 | SPI driver for SuperH MSIOF blocks. |
287 | 354 | ||
355 | config SPI_SH | ||
356 | tristate "SuperH SPI controller" | ||
357 | depends on SUPERH | ||
358 | help | ||
359 | SPI driver for SuperH SPI blocks. | ||
360 | |||
288 | config SPI_SH_SCI | 361 | config SPI_SH_SCI |
289 | tristate "SuperH SCI SPI controller" | 362 | tristate "SuperH SCI SPI controller" |
290 | depends on SUPERH | 363 | depends on SUPERH |
@@ -298,6 +371,30 @@ config SPI_STMP3XXX | |||
298 | help | 371 | help |
299 | SPI driver for Freescale STMP37xx/378x SoC SSP interface | 372 | SPI driver for Freescale STMP37xx/378x SoC SSP interface |
300 | 373 | ||
374 | config SPI_TEGRA | ||
375 | tristate "Nvidia Tegra SPI controller" | ||
376 | depends on ARCH_TEGRA | ||
377 | select TEGRA_SYSTEM_DMA | ||
378 | help | ||
379 | SPI driver for NVidia Tegra SoCs | ||
380 | |||
381 | config SPI_TI_SSP | ||
382 | tristate "TI Sequencer Serial Port - SPI Support" | ||
383 | depends on MFD_TI_SSP | ||
384 | help | ||
385 | This selects an SPI master implementation using a TI sequencer | ||
386 | serial port. | ||
387 | |||
388 | To compile this driver as a module, choose M here: the | ||
389 | module will be called ti-ssp-spi. | ||
390 | |||
391 | config SPI_TOPCLIFF_PCH | ||
392 | tristate "Topcliff PCH SPI Controller" | ||
393 | depends on PCI | ||
394 | help | ||
395 | SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus | ||
396 | used in some x86 embedded processors. | ||
397 | |||
301 | config SPI_TXX9 | 398 | config SPI_TXX9 |
302 | tristate "Toshiba TXx9 SPI controller" | 399 | tristate "Toshiba TXx9 SPI controller" |
303 | depends on GENERIC_GPIO && CPU_TX49XX | 400 | depends on GENERIC_GPIO && CPU_TX49XX |
@@ -308,7 +405,6 @@ config SPI_XILINX | |||
308 | tristate "Xilinx SPI controller common module" | 405 | tristate "Xilinx SPI controller common module" |
309 | depends on HAS_IOMEM && EXPERIMENTAL | 406 | depends on HAS_IOMEM && EXPERIMENTAL |
310 | select SPI_BITBANG | 407 | select SPI_BITBANG |
311 | select SPI_XILINX_OF if (XILINX_VIRTEX || MICROBLAZE) | ||
312 | help | 408 | help |
313 | This exposes the SPI controller IP from the Xilinx EDK. | 409 | This exposes the SPI controller IP from the Xilinx EDK. |
314 | 410 | ||
@@ -317,19 +413,6 @@ config SPI_XILINX | |||
317 | 413 | ||
318 | Or for the DS570, see "XPS Serial Peripheral Interface (SPI) (v2.00b)" | 414 | Or for the DS570, see "XPS Serial Peripheral Interface (SPI) (v2.00b)" |
319 | 415 | ||
320 | config SPI_XILINX_OF | ||
321 | tristate "Xilinx SPI controller OF device" | ||
322 | depends on SPI_XILINX && (XILINX_VIRTEX || MICROBLAZE) | ||
323 | help | ||
324 | This is the OF driver for the SPI controller IP from the Xilinx EDK. | ||
325 | |||
326 | config SPI_XILINX_PLTFM | ||
327 | tristate "Xilinx SPI controller platform device" | ||
328 | depends on SPI_XILINX | ||
329 | help | ||
330 | This is the platform driver for the SPI controller IP | ||
331 | from the Xilinx EDK. | ||
332 | |||
333 | config SPI_NUC900 | 416 | config SPI_NUC900 |
334 | tristate "Nuvoton NUC900 series SPI" | 417 | tristate "Nuvoton NUC900 series SPI" |
335 | depends on ARCH_W90X900 && EXPERIMENTAL | 418 | depends on ARCH_W90X900 && EXPERIMENTAL |
@@ -351,6 +434,10 @@ config SPI_DW_PCI | |||
351 | tristate "PCI interface driver for DW SPI core" | 434 | tristate "PCI interface driver for DW SPI core" |
352 | depends on SPI_DESIGNWARE && PCI | 435 | depends on SPI_DESIGNWARE && PCI |
353 | 436 | ||
437 | config SPI_DW_MID_DMA | ||
438 | bool "DMA support for DW SPI controller on Intel Moorestown platform" | ||
439 | depends on SPI_DW_PCI && INTEL_MID_DMAC | ||
440 | |||
354 | config SPI_DW_MMIO | 441 | config SPI_DW_MMIO |
355 | tristate "Memory-mapped io interface driver for DW SPI core" | 442 | tristate "Memory-mapped io interface driver for DW SPI core" |
356 | depends on SPI_DESIGNWARE && HAVE_CLK | 443 | depends on SPI_DESIGNWARE && HAVE_CLK |
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index e9cbd18217a0..0f8c69b6b19e 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile | |||
@@ -2,30 +2,34 @@ | |||
2 | # Makefile for kernel SPI drivers. | 2 | # Makefile for kernel SPI drivers. |
3 | # | 3 | # |
4 | 4 | ||
5 | ifeq ($(CONFIG_SPI_DEBUG),y) | 5 | ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG |
6 | EXTRA_CFLAGS += -DDEBUG | ||
7 | endif | ||
8 | 6 | ||
9 | # small core, mostly translating board-specific | 7 | # small core, mostly translating board-specific |
10 | # config declarations into driver model code | 8 | # config declarations into driver model code |
11 | obj-$(CONFIG_SPI_MASTER) += spi.o | 9 | obj-$(CONFIG_SPI_MASTER) += spi.o |
12 | 10 | ||
13 | # SPI master controller drivers (bus) | 11 | # SPI master controller drivers (bus) |
12 | obj-$(CONFIG_SPI_ALTERA) += spi_altera.o | ||
14 | obj-$(CONFIG_SPI_ATMEL) += atmel_spi.o | 13 | obj-$(CONFIG_SPI_ATMEL) += atmel_spi.o |
14 | obj-$(CONFIG_SPI_ATH79) += ath79_spi.o | ||
15 | obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o | 15 | obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o |
16 | obj-$(CONFIG_SPI_BFIN_SPORT) += spi_bfin_sport.o | ||
16 | obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o | 17 | obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o |
17 | obj-$(CONFIG_SPI_AU1550) += au1550_spi.o | 18 | obj-$(CONFIG_SPI_AU1550) += au1550_spi.o |
18 | obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o | 19 | obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o |
19 | obj-$(CONFIG_SPI_COLDFIRE_QSPI) += coldfire_qspi.o | 20 | obj-$(CONFIG_SPI_COLDFIRE_QSPI) += coldfire_qspi.o |
20 | obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o | 21 | obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o |
21 | obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o | 22 | obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o |
22 | obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o | 23 | obj-$(CONFIG_SPI_DW_PCI) += dw_spi_midpci.o |
24 | dw_spi_midpci-objs := dw_spi_pci.o dw_spi_mid.o | ||
23 | obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o | 25 | obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o |
24 | obj-$(CONFIG_SPI_EP93XX) += ep93xx_spi.o | 26 | obj-$(CONFIG_SPI_EP93XX) += ep93xx_spi.o |
25 | obj-$(CONFIG_SPI_GPIO) += spi_gpio.o | 27 | obj-$(CONFIG_SPI_GPIO) += spi_gpio.o |
26 | obj-$(CONFIG_SPI_IMX) += spi_imx.o | 28 | obj-$(CONFIG_SPI_IMX) += spi_imx.o |
27 | obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o | 29 | obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o |
28 | obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o | 30 | obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o |
31 | obj-$(CONFIG_SPI_PXA2XX_PCI) += pxa2xx_spi_pci.o | ||
32 | obj-$(CONFIG_SPI_OC_TINY) += spi_oc_tiny.o | ||
29 | obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o | 33 | obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o |
30 | obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o | 34 | obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o |
31 | obj-$(CONFIG_SPI_OMAP_100K) += omap_spi_100k.o | 35 | obj-$(CONFIG_SPI_OMAP_100K) += omap_spi_100k.o |
@@ -34,15 +38,19 @@ obj-$(CONFIG_SPI_PL022) += amba-pl022.o | |||
34 | obj-$(CONFIG_SPI_MPC512x_PSC) += mpc512x_psc_spi.o | 38 | obj-$(CONFIG_SPI_MPC512x_PSC) += mpc512x_psc_spi.o |
35 | obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o | 39 | obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o |
36 | obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o | 40 | obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o |
37 | obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o | 41 | obj-$(CONFIG_SPI_FSL_LIB) += spi_fsl_lib.o |
42 | obj-$(CONFIG_SPI_FSL_ESPI) += spi_fsl_espi.o | ||
43 | obj-$(CONFIG_SPI_FSL_SPI) += spi_fsl_spi.o | ||
38 | obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o | 44 | obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o |
39 | obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o | 45 | obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o |
40 | obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o | 46 | obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o |
41 | obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o | 47 | obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o |
48 | obj-$(CONFIG_SPI_TEGRA) += spi_tegra.o | ||
49 | obj-$(CONFIG_SPI_TI_SSP) += ti-ssp-spi.o | ||
50 | obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi_topcliff_pch.o | ||
42 | obj-$(CONFIG_SPI_TXX9) += spi_txx9.o | 51 | obj-$(CONFIG_SPI_TXX9) += spi_txx9.o |
43 | obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o | 52 | obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o |
44 | obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o | 53 | obj-$(CONFIG_SPI_SH) += spi_sh.o |
45 | obj-$(CONFIG_SPI_XILINX_PLTFM) += xilinx_spi_pltfm.o | ||
46 | obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o | 54 | obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o |
47 | obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o | 55 | obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o |
48 | obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o | 56 | obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o |
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c index 4c37c4e28647..d18ce9e946d8 100644 --- a/drivers/spi/amba-pl022.c +++ b/drivers/spi/amba-pl022.c | |||
@@ -24,12 +24,6 @@ | |||
24 | * GNU General Public License for more details. | 24 | * GNU General Public License for more details. |
25 | */ | 25 | */ |
26 | 26 | ||
27 | /* | ||
28 | * TODO: | ||
29 | * - add timeout on polled transfers | ||
30 | * - add generic DMA framework support | ||
31 | */ | ||
32 | |||
33 | #include <linux/init.h> | 27 | #include <linux/init.h> |
34 | #include <linux/module.h> | 28 | #include <linux/module.h> |
35 | #include <linux/device.h> | 29 | #include <linux/device.h> |
@@ -45,6 +39,9 @@ | |||
45 | #include <linux/amba/pl022.h> | 39 | #include <linux/amba/pl022.h> |
46 | #include <linux/io.h> | 40 | #include <linux/io.h> |
47 | #include <linux/slab.h> | 41 | #include <linux/slab.h> |
42 | #include <linux/dmaengine.h> | ||
43 | #include <linux/dma-mapping.h> | ||
44 | #include <linux/scatterlist.h> | ||
48 | 45 | ||
49 | /* | 46 | /* |
50 | * This macro is used to define some register default values. | 47 | * This macro is used to define some register default values. |
@@ -251,11 +248,6 @@ | |||
251 | #define STATE_ERROR ((void *) -1) | 248 | #define STATE_ERROR ((void *) -1) |
252 | 249 | ||
253 | /* | 250 | /* |
254 | * Queue State | ||
255 | */ | ||
256 | #define QUEUE_RUNNING (0) | ||
257 | #define QUEUE_STOPPED (1) | ||
258 | /* | ||
259 | * SSP State - Whether Enabled or Disabled | 251 | * SSP State - Whether Enabled or Disabled |
260 | */ | 252 | */ |
261 | #define SSP_DISABLED (0) | 253 | #define SSP_DISABLED (0) |
@@ -290,6 +282,8 @@ | |||
290 | 282 | ||
291 | #define CLEAR_ALL_INTERRUPTS 0x3 | 283 | #define CLEAR_ALL_INTERRUPTS 0x3 |
292 | 284 | ||
285 | #define SPI_POLLING_TIMEOUT 1000 | ||
286 | |||
293 | 287 | ||
294 | /* | 288 | /* |
295 | * The type of reading going on on this chip | 289 | * The type of reading going on on this chip |
@@ -327,22 +321,24 @@ struct vendor_data { | |||
327 | bool unidir; | 321 | bool unidir; |
328 | bool extended_cr; | 322 | bool extended_cr; |
329 | bool pl023; | 323 | bool pl023; |
324 | bool loopback; | ||
330 | }; | 325 | }; |
331 | 326 | ||
332 | /** | 327 | /** |
333 | * struct pl022 - This is the private SSP driver data structure | 328 | * struct pl022 - This is the private SSP driver data structure |
334 | * @adev: AMBA device model hookup | 329 | * @adev: AMBA device model hookup |
335 | * @vendor: Vendor data for the IP block | 330 | * @vendor: vendor data for the IP block |
336 | * @phybase: The physical memory where the SSP device resides | 331 | * @phybase: the physical memory where the SSP device resides |
337 | * @virtbase: The virtual memory where the SSP is mapped | 332 | * @virtbase: the virtual memory where the SSP is mapped |
333 | * @clk: outgoing clock "SPICLK" for the SPI bus | ||
338 | * @master: SPI framework hookup | 334 | * @master: SPI framework hookup |
339 | * @master_info: controller-specific data from machine setup | 335 | * @master_info: controller-specific data from machine setup |
340 | * @regs: SSP controller register's virtual address | ||
341 | * @pump_messages: Work struct for scheduling work to the workqueue | ||
342 | * @lock: spinlock to syncronise access to driver data | ||
343 | * @workqueue: a workqueue on which any spi_message request is queued | 336 | * @workqueue: a workqueue on which any spi_message request is queued |
337 | * @pump_messages: work struct for scheduling work to the workqueue | ||
338 | * @queue_lock: spinlock to syncronise access to message queue | ||
339 | * @queue: message queue | ||
344 | * @busy: workqueue is busy | 340 | * @busy: workqueue is busy |
345 | * @run: workqueue is running | 341 | * @running: workqueue is running |
346 | * @pump_transfers: Tasklet used in Interrupt Transfer mode | 342 | * @pump_transfers: Tasklet used in Interrupt Transfer mode |
347 | * @cur_msg: Pointer to current spi_message being processed | 343 | * @cur_msg: Pointer to current spi_message being processed |
348 | * @cur_transfer: Pointer to current spi_transfer | 344 | * @cur_transfer: Pointer to current spi_transfer |
@@ -351,8 +347,14 @@ struct vendor_data { | |||
351 | * @tx_end: end position in TX buffer to be read | 347 | * @tx_end: end position in TX buffer to be read |
352 | * @rx: current position in RX buffer to be written | 348 | * @rx: current position in RX buffer to be written |
353 | * @rx_end: end position in RX buffer to be written | 349 | * @rx_end: end position in RX buffer to be written |
354 | * @readingtype: the type of read currently going on | 350 | * @read: the type of read currently going on |
355 | * @writingtype: the type or write currently going on | 351 | * @write: the type of write currently going on |
352 | * @exp_fifo_level: expected FIFO level | ||
353 | * @dma_rx_channel: optional channel for RX DMA | ||
354 | * @dma_tx_channel: optional channel for TX DMA | ||
355 | * @sgt_rx: scattertable for the RX transfer | ||
356 | * @sgt_tx: scattertable for the TX transfer | ||
357 | * @dummypage: a dummy page used for driving data on the bus with DMA | ||
356 | */ | 358 | */ |
357 | struct pl022 { | 359 | struct pl022 { |
358 | struct amba_device *adev; | 360 | struct amba_device *adev; |
@@ -367,8 +369,8 @@ struct pl022 { | |||
367 | struct work_struct pump_messages; | 369 | struct work_struct pump_messages; |
368 | spinlock_t queue_lock; | 370 | spinlock_t queue_lock; |
369 | struct list_head queue; | 371 | struct list_head queue; |
370 | int busy; | 372 | bool busy; |
371 | int run; | 373 | bool running; |
372 | /* Message transfer pump */ | 374 | /* Message transfer pump */ |
373 | struct tasklet_struct pump_transfers; | 375 | struct tasklet_struct pump_transfers; |
374 | struct spi_message *cur_msg; | 376 | struct spi_message *cur_msg; |
@@ -381,6 +383,14 @@ struct pl022 { | |||
381 | enum ssp_reading read; | 383 | enum ssp_reading read; |
382 | enum ssp_writing write; | 384 | enum ssp_writing write; |
383 | u32 exp_fifo_level; | 385 | u32 exp_fifo_level; |
386 | /* DMA settings */ | ||
387 | #ifdef CONFIG_DMA_ENGINE | ||
388 | struct dma_chan *dma_rx_channel; | ||
389 | struct dma_chan *dma_tx_channel; | ||
390 | struct sg_table sgt_rx; | ||
391 | struct sg_table sgt_tx; | ||
392 | char *dummypage; | ||
393 | #endif | ||
384 | }; | 394 | }; |
385 | 395 | ||
386 | /** | 396 | /** |
@@ -392,8 +402,8 @@ struct pl022 { | |||
392 | * @cpsr: Value of Clock prescale register | 402 | * @cpsr: Value of Clock prescale register |
393 | * @n_bytes: how many bytes(power of 2) reqd for a given data width of client | 403 | * @n_bytes: how many bytes(power of 2) reqd for a given data width of client |
394 | * @enable_dma: Whether to enable DMA or not | 404 | * @enable_dma: Whether to enable DMA or not |
395 | * @write: function ptr to be used to write when doing xfer for this chip | ||
396 | * @read: function ptr to be used to read when doing xfer for this chip | 405 | * @read: function ptr to be used to read when doing xfer for this chip |
406 | * @write: function ptr to be used to write when doing xfer for this chip | ||
397 | * @cs_control: chip select callback provided by chip | 407 | * @cs_control: chip select callback provided by chip |
398 | * @xfer_type: polling/interrupt/DMA | 408 | * @xfer_type: polling/interrupt/DMA |
399 | * | 409 | * |
@@ -406,7 +416,7 @@ struct chip_data { | |||
406 | u16 dmacr; | 416 | u16 dmacr; |
407 | u16 cpsr; | 417 | u16 cpsr; |
408 | u8 n_bytes; | 418 | u8 n_bytes; |
409 | u8 enable_dma:1; | 419 | bool enable_dma; |
410 | enum ssp_reading read; | 420 | enum ssp_reading read; |
411 | enum ssp_writing write; | 421 | enum ssp_writing write; |
412 | void (*cs_control) (u32 command); | 422 | void (*cs_control) (u32 command); |
@@ -503,9 +513,10 @@ static void giveback(struct pl022 *pl022) | |||
503 | msg->state = NULL; | 513 | msg->state = NULL; |
504 | if (msg->complete) | 514 | if (msg->complete) |
505 | msg->complete(msg->context); | 515 | msg->complete(msg->context); |
506 | /* This message is completed, so let's turn off the clocks! */ | 516 | /* This message is completed, so let's turn off the clocks & power */ |
507 | clk_disable(pl022->clk); | 517 | clk_disable(pl022->clk); |
508 | amba_pclk_disable(pl022->adev); | 518 | amba_pclk_disable(pl022->adev); |
519 | amba_vcore_disable(pl022->adev); | ||
509 | } | 520 | } |
510 | 521 | ||
511 | /** | 522 | /** |
@@ -647,7 +658,7 @@ static void readwriter(struct pl022 *pl022) | |||
647 | { | 658 | { |
648 | 659 | ||
649 | /* | 660 | /* |
650 | * The FIFO depth is different inbetween primecell variants. | 661 | * The FIFO depth is different between primecell variants. |
651 | * I believe filling in too much in the FIFO might cause | 662 | * I believe filling in too much in the FIFO might cause |
652 | * errons in 8bit wide transfers on ARM variants (just 8 words | 663 | * errons in 8bit wide transfers on ARM variants (just 8 words |
653 | * FIFO, means only 8x8 = 64 bits in FIFO) at least. | 664 | * FIFO, means only 8x8 = 64 bits in FIFO) at least. |
@@ -708,7 +719,7 @@ static void readwriter(struct pl022 *pl022) | |||
708 | * This inner reader takes care of things appearing in the RX | 719 | * This inner reader takes care of things appearing in the RX |
709 | * FIFO as we're transmitting. This will happen a lot since the | 720 | * FIFO as we're transmitting. This will happen a lot since the |
710 | * clock starts running when you put things into the TX FIFO, | 721 | * clock starts running when you put things into the TX FIFO, |
711 | * and then things are continously clocked into the RX FIFO. | 722 | * and then things are continuously clocked into the RX FIFO. |
712 | */ | 723 | */ |
713 | while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) | 724 | while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) |
714 | && (pl022->rx < pl022->rx_end)) { | 725 | && (pl022->rx < pl022->rx_end)) { |
@@ -763,6 +774,364 @@ static void *next_transfer(struct pl022 *pl022) | |||
763 | } | 774 | } |
764 | return STATE_DONE; | 775 | return STATE_DONE; |
765 | } | 776 | } |
777 | |||
778 | /* | ||
779 | * This DMA functionality is only compiled in if we have | ||
780 | * access to the generic DMA devices/DMA engine. | ||
781 | */ | ||
782 | #ifdef CONFIG_DMA_ENGINE | ||
783 | static void unmap_free_dma_scatter(struct pl022 *pl022) | ||
784 | { | ||
785 | /* Unmap and free the SG tables */ | ||
786 | dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl, | ||
787 | pl022->sgt_tx.nents, DMA_TO_DEVICE); | ||
788 | dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl, | ||
789 | pl022->sgt_rx.nents, DMA_FROM_DEVICE); | ||
790 | sg_free_table(&pl022->sgt_rx); | ||
791 | sg_free_table(&pl022->sgt_tx); | ||
792 | } | ||
793 | |||
794 | static void dma_callback(void *data) | ||
795 | { | ||
796 | struct pl022 *pl022 = data; | ||
797 | struct spi_message *msg = pl022->cur_msg; | ||
798 | |||
799 | BUG_ON(!pl022->sgt_rx.sgl); | ||
800 | |||
801 | #ifdef VERBOSE_DEBUG | ||
802 | /* | ||
803 | * Optionally dump out buffers to inspect contents, this is | ||
804 | * good if you want to convince yourself that the loopback | ||
805 | * read/write contents are the same, when adopting to a new | ||
806 | * DMA engine. | ||
807 | */ | ||
808 | { | ||
809 | struct scatterlist *sg; | ||
810 | unsigned int i; | ||
811 | |||
812 | dma_sync_sg_for_cpu(&pl022->adev->dev, | ||
813 | pl022->sgt_rx.sgl, | ||
814 | pl022->sgt_rx.nents, | ||
815 | DMA_FROM_DEVICE); | ||
816 | |||
817 | for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) { | ||
818 | dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i); | ||
819 | print_hex_dump(KERN_ERR, "SPI RX: ", | ||
820 | DUMP_PREFIX_OFFSET, | ||
821 | 16, | ||
822 | 1, | ||
823 | sg_virt(sg), | ||
824 | sg_dma_len(sg), | ||
825 | 1); | ||
826 | } | ||
827 | for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) { | ||
828 | dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i); | ||
829 | print_hex_dump(KERN_ERR, "SPI TX: ", | ||
830 | DUMP_PREFIX_OFFSET, | ||
831 | 16, | ||
832 | 1, | ||
833 | sg_virt(sg), | ||
834 | sg_dma_len(sg), | ||
835 | 1); | ||
836 | } | ||
837 | } | ||
838 | #endif | ||
839 | |||
840 | unmap_free_dma_scatter(pl022); | ||
841 | |||
842 | /* Update total bytes transferred */ | ||
843 | msg->actual_length += pl022->cur_transfer->len; | ||
844 | if (pl022->cur_transfer->cs_change) | ||
845 | pl022->cur_chip-> | ||
846 | cs_control(SSP_CHIP_DESELECT); | ||
847 | |||
848 | /* Move to next transfer */ | ||
849 | msg->state = next_transfer(pl022); | ||
850 | tasklet_schedule(&pl022->pump_transfers); | ||
851 | } | ||
852 | |||
853 | static void setup_dma_scatter(struct pl022 *pl022, | ||
854 | void *buffer, | ||
855 | unsigned int length, | ||
856 | struct sg_table *sgtab) | ||
857 | { | ||
858 | struct scatterlist *sg; | ||
859 | int bytesleft = length; | ||
860 | void *bufp = buffer; | ||
861 | int mapbytes; | ||
862 | int i; | ||
863 | |||
864 | if (buffer) { | ||
865 | for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { | ||
866 | /* | ||
867 | * If there are less bytes left than what fits | ||
868 | * in the current page (plus page alignment offset) | ||
869 | * we just feed in this, else we stuff in as much | ||
870 | * as we can. | ||
871 | */ | ||
872 | if (bytesleft < (PAGE_SIZE - offset_in_page(bufp))) | ||
873 | mapbytes = bytesleft; | ||
874 | else | ||
875 | mapbytes = PAGE_SIZE - offset_in_page(bufp); | ||
876 | sg_set_page(sg, virt_to_page(bufp), | ||
877 | mapbytes, offset_in_page(bufp)); | ||
878 | bufp += mapbytes; | ||
879 | bytesleft -= mapbytes; | ||
880 | dev_dbg(&pl022->adev->dev, | ||
881 | "set RX/TX target page @ %p, %d bytes, %d left\n", | ||
882 | bufp, mapbytes, bytesleft); | ||
883 | } | ||
884 | } else { | ||
885 | /* Map the dummy buffer on every page */ | ||
886 | for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { | ||
887 | if (bytesleft < PAGE_SIZE) | ||
888 | mapbytes = bytesleft; | ||
889 | else | ||
890 | mapbytes = PAGE_SIZE; | ||
891 | sg_set_page(sg, virt_to_page(pl022->dummypage), | ||
892 | mapbytes, 0); | ||
893 | bytesleft -= mapbytes; | ||
894 | dev_dbg(&pl022->adev->dev, | ||
895 | "set RX/TX to dummy page %d bytes, %d left\n", | ||
896 | mapbytes, bytesleft); | ||
897 | |||
898 | } | ||
899 | } | ||
900 | BUG_ON(bytesleft); | ||
901 | } | ||
902 | |||
903 | /** | ||
904 | * configure_dma - configures the channels for the next transfer | ||
905 | * @pl022: SSP driver's private data structure | ||
906 | */ | ||
907 | static int configure_dma(struct pl022 *pl022) | ||
908 | { | ||
909 | struct dma_slave_config rx_conf = { | ||
910 | .src_addr = SSP_DR(pl022->phybase), | ||
911 | .direction = DMA_FROM_DEVICE, | ||
912 | .src_maxburst = pl022->vendor->fifodepth >> 1, | ||
913 | }; | ||
914 | struct dma_slave_config tx_conf = { | ||
915 | .dst_addr = SSP_DR(pl022->phybase), | ||
916 | .direction = DMA_TO_DEVICE, | ||
917 | .dst_maxburst = pl022->vendor->fifodepth >> 1, | ||
918 | }; | ||
919 | unsigned int pages; | ||
920 | int ret; | ||
921 | int rx_sglen, tx_sglen; | ||
922 | struct dma_chan *rxchan = pl022->dma_rx_channel; | ||
923 | struct dma_chan *txchan = pl022->dma_tx_channel; | ||
924 | struct dma_async_tx_descriptor *rxdesc; | ||
925 | struct dma_async_tx_descriptor *txdesc; | ||
926 | |||
927 | /* Check that the channels are available */ | ||
928 | if (!rxchan || !txchan) | ||
929 | return -ENODEV; | ||
930 | |||
931 | switch (pl022->read) { | ||
932 | case READING_NULL: | ||
933 | /* Use the same as for writing */ | ||
934 | rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; | ||
935 | break; | ||
936 | case READING_U8: | ||
937 | rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
938 | break; | ||
939 | case READING_U16: | ||
940 | rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
941 | break; | ||
942 | case READING_U32: | ||
943 | rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
944 | break; | ||
945 | } | ||
946 | |||
947 | switch (pl022->write) { | ||
948 | case WRITING_NULL: | ||
949 | /* Use the same as for reading */ | ||
950 | tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; | ||
951 | break; | ||
952 | case WRITING_U8: | ||
953 | tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
954 | break; | ||
955 | case WRITING_U16: | ||
956 | tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
957 | break; | ||
958 | case WRITING_U32: | ||
959 | tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
960 | break; | ||
961 | } | ||
962 | |||
963 | /* SPI pecularity: we need to read and write the same width */ | ||
964 | if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) | ||
965 | rx_conf.src_addr_width = tx_conf.dst_addr_width; | ||
966 | if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) | ||
967 | tx_conf.dst_addr_width = rx_conf.src_addr_width; | ||
968 | BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width); | ||
969 | |||
970 | dmaengine_slave_config(rxchan, &rx_conf); | ||
971 | dmaengine_slave_config(txchan, &tx_conf); | ||
972 | |||
973 | /* Create sglists for the transfers */ | ||
974 | pages = (pl022->cur_transfer->len >> PAGE_SHIFT) + 1; | ||
975 | dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages); | ||
976 | |||
977 | ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_KERNEL); | ||
978 | if (ret) | ||
979 | goto err_alloc_rx_sg; | ||
980 | |||
981 | ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_KERNEL); | ||
982 | if (ret) | ||
983 | goto err_alloc_tx_sg; | ||
984 | |||
985 | /* Fill in the scatterlists for the RX+TX buffers */ | ||
986 | setup_dma_scatter(pl022, pl022->rx, | ||
987 | pl022->cur_transfer->len, &pl022->sgt_rx); | ||
988 | setup_dma_scatter(pl022, pl022->tx, | ||
989 | pl022->cur_transfer->len, &pl022->sgt_tx); | ||
990 | |||
991 | /* Map DMA buffers */ | ||
992 | rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl, | ||
993 | pl022->sgt_rx.nents, DMA_FROM_DEVICE); | ||
994 | if (!rx_sglen) | ||
995 | goto err_rx_sgmap; | ||
996 | |||
997 | tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl, | ||
998 | pl022->sgt_tx.nents, DMA_TO_DEVICE); | ||
999 | if (!tx_sglen) | ||
1000 | goto err_tx_sgmap; | ||
1001 | |||
1002 | /* Send both scatterlists */ | ||
1003 | rxdesc = rxchan->device->device_prep_slave_sg(rxchan, | ||
1004 | pl022->sgt_rx.sgl, | ||
1005 | rx_sglen, | ||
1006 | DMA_FROM_DEVICE, | ||
1007 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
1008 | if (!rxdesc) | ||
1009 | goto err_rxdesc; | ||
1010 | |||
1011 | txdesc = txchan->device->device_prep_slave_sg(txchan, | ||
1012 | pl022->sgt_tx.sgl, | ||
1013 | tx_sglen, | ||
1014 | DMA_TO_DEVICE, | ||
1015 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
1016 | if (!txdesc) | ||
1017 | goto err_txdesc; | ||
1018 | |||
1019 | /* Put the callback on the RX transfer only, that should finish last */ | ||
1020 | rxdesc->callback = dma_callback; | ||
1021 | rxdesc->callback_param = pl022; | ||
1022 | |||
1023 | /* Submit and fire RX and TX with TX last so we're ready to read! */ | ||
1024 | dmaengine_submit(rxdesc); | ||
1025 | dmaengine_submit(txdesc); | ||
1026 | dma_async_issue_pending(rxchan); | ||
1027 | dma_async_issue_pending(txchan); | ||
1028 | |||
1029 | return 0; | ||
1030 | |||
1031 | err_txdesc: | ||
1032 | dmaengine_terminate_all(txchan); | ||
1033 | err_rxdesc: | ||
1034 | dmaengine_terminate_all(rxchan); | ||
1035 | dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl, | ||
1036 | pl022->sgt_tx.nents, DMA_TO_DEVICE); | ||
1037 | err_tx_sgmap: | ||
1038 | dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl, | ||
1039 | pl022->sgt_tx.nents, DMA_FROM_DEVICE); | ||
1040 | err_rx_sgmap: | ||
1041 | sg_free_table(&pl022->sgt_tx); | ||
1042 | err_alloc_tx_sg: | ||
1043 | sg_free_table(&pl022->sgt_rx); | ||
1044 | err_alloc_rx_sg: | ||
1045 | return -ENOMEM; | ||
1046 | } | ||
1047 | |||
1048 | static int __init pl022_dma_probe(struct pl022 *pl022) | ||
1049 | { | ||
1050 | dma_cap_mask_t mask; | ||
1051 | |||
1052 | /* Try to acquire a generic DMA engine slave channel */ | ||
1053 | dma_cap_zero(mask); | ||
1054 | dma_cap_set(DMA_SLAVE, mask); | ||
1055 | /* | ||
1056 | * We need both RX and TX channels to do DMA, else do none | ||
1057 | * of them. | ||
1058 | */ | ||
1059 | pl022->dma_rx_channel = dma_request_channel(mask, | ||
1060 | pl022->master_info->dma_filter, | ||
1061 | pl022->master_info->dma_rx_param); | ||
1062 | if (!pl022->dma_rx_channel) { | ||
1063 | dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n"); | ||
1064 | goto err_no_rxchan; | ||
1065 | } | ||
1066 | |||
1067 | pl022->dma_tx_channel = dma_request_channel(mask, | ||
1068 | pl022->master_info->dma_filter, | ||
1069 | pl022->master_info->dma_tx_param); | ||
1070 | if (!pl022->dma_tx_channel) { | ||
1071 | dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n"); | ||
1072 | goto err_no_txchan; | ||
1073 | } | ||
1074 | |||
1075 | pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
1076 | if (!pl022->dummypage) { | ||
1077 | dev_dbg(&pl022->adev->dev, "no DMA dummypage!\n"); | ||
1078 | goto err_no_dummypage; | ||
1079 | } | ||
1080 | |||
1081 | dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n", | ||
1082 | dma_chan_name(pl022->dma_rx_channel), | ||
1083 | dma_chan_name(pl022->dma_tx_channel)); | ||
1084 | |||
1085 | return 0; | ||
1086 | |||
1087 | err_no_dummypage: | ||
1088 | dma_release_channel(pl022->dma_tx_channel); | ||
1089 | err_no_txchan: | ||
1090 | dma_release_channel(pl022->dma_rx_channel); | ||
1091 | pl022->dma_rx_channel = NULL; | ||
1092 | err_no_rxchan: | ||
1093 | dev_err(&pl022->adev->dev, | ||
1094 | "Failed to work in dma mode, work without dma!\n"); | ||
1095 | return -ENODEV; | ||
1096 | } | ||
1097 | |||
1098 | static void terminate_dma(struct pl022 *pl022) | ||
1099 | { | ||
1100 | struct dma_chan *rxchan = pl022->dma_rx_channel; | ||
1101 | struct dma_chan *txchan = pl022->dma_tx_channel; | ||
1102 | |||
1103 | dmaengine_terminate_all(rxchan); | ||
1104 | dmaengine_terminate_all(txchan); | ||
1105 | unmap_free_dma_scatter(pl022); | ||
1106 | } | ||
1107 | |||
1108 | static void pl022_dma_remove(struct pl022 *pl022) | ||
1109 | { | ||
1110 | if (pl022->busy) | ||
1111 | terminate_dma(pl022); | ||
1112 | if (pl022->dma_tx_channel) | ||
1113 | dma_release_channel(pl022->dma_tx_channel); | ||
1114 | if (pl022->dma_rx_channel) | ||
1115 | dma_release_channel(pl022->dma_rx_channel); | ||
1116 | kfree(pl022->dummypage); | ||
1117 | } | ||
1118 | |||
1119 | #else | ||
1120 | static inline int configure_dma(struct pl022 *pl022) | ||
1121 | { | ||
1122 | return -ENODEV; | ||
1123 | } | ||
1124 | |||
1125 | static inline int pl022_dma_probe(struct pl022 *pl022) | ||
1126 | { | ||
1127 | return 0; | ||
1128 | } | ||
1129 | |||
1130 | static inline void pl022_dma_remove(struct pl022 *pl022) | ||
1131 | { | ||
1132 | } | ||
1133 | #endif | ||
1134 | |||
766 | /** | 1135 | /** |
767 | * pl022_interrupt_handler - Interrupt handler for SSP controller | 1136 | * pl022_interrupt_handler - Interrupt handler for SSP controller |
768 | * | 1137 | * |
@@ -794,14 +1163,17 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) | |||
794 | if (unlikely(!irq_status)) | 1163 | if (unlikely(!irq_status)) |
795 | return IRQ_NONE; | 1164 | return IRQ_NONE; |
796 | 1165 | ||
797 | /* This handles the error code interrupts */ | 1166 | /* |
1167 | * This handles the FIFO interrupts, the timeout | ||
1168 | * interrupts are flatly ignored, they cannot be | ||
1169 | * trusted. | ||
1170 | */ | ||
798 | if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) { | 1171 | if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) { |
799 | /* | 1172 | /* |
800 | * Overrun interrupt - bail out since our Data has been | 1173 | * Overrun interrupt - bail out since our Data has been |
801 | * corrupted | 1174 | * corrupted |
802 | */ | 1175 | */ |
803 | dev_err(&pl022->adev->dev, | 1176 | dev_err(&pl022->adev->dev, "FIFO overrun\n"); |
804 | "FIFO overrun\n"); | ||
805 | if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) | 1177 | if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) |
806 | dev_err(&pl022->adev->dev, | 1178 | dev_err(&pl022->adev->dev, |
807 | "RXFIFO is full\n"); | 1179 | "RXFIFO is full\n"); |
@@ -851,7 +1223,7 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) | |||
851 | "number of bytes on a 16bit bus?)\n", | 1223 | "number of bytes on a 16bit bus?)\n", |
852 | (u32) (pl022->rx - pl022->rx_end)); | 1224 | (u32) (pl022->rx - pl022->rx_end)); |
853 | } | 1225 | } |
854 | /* Update total bytes transfered */ | 1226 | /* Update total bytes transferred */ |
855 | msg->actual_length += pl022->cur_transfer->len; | 1227 | msg->actual_length += pl022->cur_transfer->len; |
856 | if (pl022->cur_transfer->cs_change) | 1228 | if (pl022->cur_transfer->cs_change) |
857 | pl022->cur_chip-> | 1229 | pl022->cur_chip-> |
@@ -896,8 +1268,8 @@ static int set_up_next_transfer(struct pl022 *pl022, | |||
896 | } | 1268 | } |
897 | 1269 | ||
898 | /** | 1270 | /** |
899 | * pump_transfers - Tasklet function which schedules next interrupt transfer | 1271 | * pump_transfers - Tasklet function which schedules next transfer |
900 | * when running in interrupt transfer mode. | 1272 | * when running in interrupt or DMA transfer mode. |
901 | * @data: SSP driver private data structure | 1273 | * @data: SSP driver private data structure |
902 | * | 1274 | * |
903 | */ | 1275 | */ |
@@ -954,65 +1326,23 @@ static void pump_transfers(unsigned long data) | |||
954 | } | 1326 | } |
955 | /* Flush the FIFOs and let's go! */ | 1327 | /* Flush the FIFOs and let's go! */ |
956 | flush(pl022); | 1328 | flush(pl022); |
957 | writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); | ||
958 | } | ||
959 | |||
960 | /** | ||
961 | * NOT IMPLEMENTED | ||
962 | * configure_dma - It configures the DMA pipes for DMA transfers | ||
963 | * @data: SSP driver's private data structure | ||
964 | * | ||
965 | */ | ||
966 | static int configure_dma(void *data) | ||
967 | { | ||
968 | struct pl022 *pl022 = data; | ||
969 | dev_dbg(&pl022->adev->dev, "configure DMA\n"); | ||
970 | return -ENOTSUPP; | ||
971 | } | ||
972 | |||
973 | /** | ||
974 | * do_dma_transfer - It handles transfers of the current message | ||
975 | * if it is DMA xfer. | ||
976 | * NOT FULLY IMPLEMENTED | ||
977 | * @data: SSP driver's private data structure | ||
978 | */ | ||
979 | static void do_dma_transfer(void *data) | ||
980 | { | ||
981 | struct pl022 *pl022 = data; | ||
982 | 1329 | ||
983 | if (configure_dma(data)) { | 1330 | if (pl022->cur_chip->enable_dma) { |
984 | dev_dbg(&pl022->adev->dev, "configuration of DMA Failed!\n"); | 1331 | if (configure_dma(pl022)) { |
985 | goto err_config_dma; | 1332 | dev_dbg(&pl022->adev->dev, |
986 | } | 1333 | "configuration of DMA failed, fall back to interrupt mode\n"); |
987 | 1334 | goto err_config_dma; | |
988 | /* TODO: Implememt DMA setup of pipes here */ | 1335 | } |
989 | |||
990 | /* Enable target chip, set up transfer */ | ||
991 | pl022->cur_chip->cs_control(SSP_CHIP_SELECT); | ||
992 | if (set_up_next_transfer(pl022, pl022->cur_transfer)) { | ||
993 | /* Error path */ | ||
994 | pl022->cur_msg->state = STATE_ERROR; | ||
995 | pl022->cur_msg->status = -EIO; | ||
996 | giveback(pl022); | ||
997 | return; | 1336 | return; |
998 | } | 1337 | } |
999 | /* Enable SSP */ | ||
1000 | writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), | ||
1001 | SSP_CR1(pl022->virtbase)); | ||
1002 | |||
1003 | /* TODO: Enable the DMA transfer here */ | ||
1004 | return; | ||
1005 | 1338 | ||
1006 | err_config_dma: | 1339 | err_config_dma: |
1007 | pl022->cur_msg->state = STATE_ERROR; | 1340 | writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); |
1008 | pl022->cur_msg->status = -EIO; | ||
1009 | giveback(pl022); | ||
1010 | return; | ||
1011 | } | 1341 | } |
1012 | 1342 | ||
1013 | static void do_interrupt_transfer(void *data) | 1343 | static void do_interrupt_dma_transfer(struct pl022 *pl022) |
1014 | { | 1344 | { |
1015 | struct pl022 *pl022 = data; | 1345 | u32 irqflags = ENABLE_ALL_INTERRUPTS; |
1016 | 1346 | ||
1017 | /* Enable target chip */ | 1347 | /* Enable target chip */ |
1018 | pl022->cur_chip->cs_control(SSP_CHIP_SELECT); | 1348 | pl022->cur_chip->cs_control(SSP_CHIP_SELECT); |
@@ -1023,19 +1353,31 @@ static void do_interrupt_transfer(void *data) | |||
1023 | giveback(pl022); | 1353 | giveback(pl022); |
1024 | return; | 1354 | return; |
1025 | } | 1355 | } |
1356 | /* If we're using DMA, set up DMA here */ | ||
1357 | if (pl022->cur_chip->enable_dma) { | ||
1358 | /* Configure DMA transfer */ | ||
1359 | if (configure_dma(pl022)) { | ||
1360 | dev_dbg(&pl022->adev->dev, | ||
1361 | "configuration of DMA failed, fall back to interrupt mode\n"); | ||
1362 | goto err_config_dma; | ||
1363 | } | ||
1364 | /* Disable interrupts in DMA mode, IRQ from DMA controller */ | ||
1365 | irqflags = DISABLE_ALL_INTERRUPTS; | ||
1366 | } | ||
1367 | err_config_dma: | ||
1026 | /* Enable SSP, turn on interrupts */ | 1368 | /* Enable SSP, turn on interrupts */ |
1027 | writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), | 1369 | writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), |
1028 | SSP_CR1(pl022->virtbase)); | 1370 | SSP_CR1(pl022->virtbase)); |
1029 | writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); | 1371 | writew(irqflags, SSP_IMSC(pl022->virtbase)); |
1030 | } | 1372 | } |
1031 | 1373 | ||
1032 | static void do_polling_transfer(void *data) | 1374 | static void do_polling_transfer(struct pl022 *pl022) |
1033 | { | 1375 | { |
1034 | struct pl022 *pl022 = data; | ||
1035 | struct spi_message *message = NULL; | 1376 | struct spi_message *message = NULL; |
1036 | struct spi_transfer *transfer = NULL; | 1377 | struct spi_transfer *transfer = NULL; |
1037 | struct spi_transfer *previous = NULL; | 1378 | struct spi_transfer *previous = NULL; |
1038 | struct chip_data *chip; | 1379 | struct chip_data *chip; |
1380 | unsigned long time, timeout; | ||
1039 | 1381 | ||
1040 | chip = pl022->cur_chip; | 1382 | chip = pl022->cur_chip; |
1041 | message = pl022->cur_msg; | 1383 | message = pl022->cur_msg; |
@@ -1073,18 +1415,28 @@ static void do_polling_transfer(void *data) | |||
1073 | SSP_CR1(pl022->virtbase)); | 1415 | SSP_CR1(pl022->virtbase)); |
1074 | 1416 | ||
1075 | dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n"); | 1417 | dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n"); |
1076 | /* FIXME: insert a timeout so we don't hang here indefinately */ | 1418 | |
1077 | while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) | 1419 | timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT); |
1420 | while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) { | ||
1421 | time = jiffies; | ||
1078 | readwriter(pl022); | 1422 | readwriter(pl022); |
1423 | if (time_after(time, timeout)) { | ||
1424 | dev_warn(&pl022->adev->dev, | ||
1425 | "%s: timeout!\n", __func__); | ||
1426 | message->state = STATE_ERROR; | ||
1427 | goto out; | ||
1428 | } | ||
1429 | cpu_relax(); | ||
1430 | } | ||
1079 | 1431 | ||
1080 | /* Update total byte transfered */ | 1432 | /* Update total byte transferred */ |
1081 | message->actual_length += pl022->cur_transfer->len; | 1433 | message->actual_length += pl022->cur_transfer->len; |
1082 | if (pl022->cur_transfer->cs_change) | 1434 | if (pl022->cur_transfer->cs_change) |
1083 | pl022->cur_chip->cs_control(SSP_CHIP_DESELECT); | 1435 | pl022->cur_chip->cs_control(SSP_CHIP_DESELECT); |
1084 | /* Move to next transfer */ | 1436 | /* Move to next transfer */ |
1085 | message->state = next_transfer(pl022); | 1437 | message->state = next_transfer(pl022); |
1086 | } | 1438 | } |
1087 | 1439 | out: | |
1088 | /* Handle end of message */ | 1440 | /* Handle end of message */ |
1089 | if (message->state == STATE_DONE) | 1441 | if (message->state == STATE_DONE) |
1090 | message->status = 0; | 1442 | message->status = 0; |
@@ -1101,7 +1453,7 @@ static void do_polling_transfer(void *data) | |||
1101 | * | 1453 | * |
1102 | * This function checks if there is any spi message in the queue that | 1454 | * This function checks if there is any spi message in the queue that |
1103 | * needs processing and delegate control to appropriate function | 1455 | * needs processing and delegate control to appropriate function |
1104 | * do_polling_transfer()/do_interrupt_transfer()/do_dma_transfer() | 1456 | * do_polling_transfer()/do_interrupt_dma_transfer() |
1105 | * based on the kind of the transfer | 1457 | * based on the kind of the transfer |
1106 | * | 1458 | * |
1107 | */ | 1459 | */ |
@@ -1113,8 +1465,8 @@ static void pump_messages(struct work_struct *work) | |||
1113 | 1465 | ||
1114 | /* Lock queue and check for queue work */ | 1466 | /* Lock queue and check for queue work */ |
1115 | spin_lock_irqsave(&pl022->queue_lock, flags); | 1467 | spin_lock_irqsave(&pl022->queue_lock, flags); |
1116 | if (list_empty(&pl022->queue) || pl022->run == QUEUE_STOPPED) { | 1468 | if (list_empty(&pl022->queue) || !pl022->running) { |
1117 | pl022->busy = 0; | 1469 | pl022->busy = false; |
1118 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | 1470 | spin_unlock_irqrestore(&pl022->queue_lock, flags); |
1119 | return; | 1471 | return; |
1120 | } | 1472 | } |
@@ -1128,7 +1480,7 @@ static void pump_messages(struct work_struct *work) | |||
1128 | list_entry(pl022->queue.next, struct spi_message, queue); | 1480 | list_entry(pl022->queue.next, struct spi_message, queue); |
1129 | 1481 | ||
1130 | list_del_init(&pl022->cur_msg->queue); | 1482 | list_del_init(&pl022->cur_msg->queue); |
1131 | pl022->busy = 1; | 1483 | pl022->busy = true; |
1132 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | 1484 | spin_unlock_irqrestore(&pl022->queue_lock, flags); |
1133 | 1485 | ||
1134 | /* Initial message state */ | 1486 | /* Initial message state */ |
@@ -1140,9 +1492,11 @@ static void pump_messages(struct work_struct *work) | |||
1140 | /* Setup the SPI using the per chip configuration */ | 1492 | /* Setup the SPI using the per chip configuration */ |
1141 | pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); | 1493 | pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); |
1142 | /* | 1494 | /* |
1143 | * We enable the clocks here, then the clocks will be disabled when | 1495 | * We enable the core voltage and clocks here, then the clocks |
1144 | * giveback() is called in each method (poll/interrupt/DMA) | 1496 | * and core will be disabled when giveback() is called in each method |
1497 | * (poll/interrupt/DMA) | ||
1145 | */ | 1498 | */ |
1499 | amba_vcore_enable(pl022->adev); | ||
1146 | amba_pclk_enable(pl022->adev); | 1500 | amba_pclk_enable(pl022->adev); |
1147 | clk_enable(pl022->clk); | 1501 | clk_enable(pl022->clk); |
1148 | restore_state(pl022); | 1502 | restore_state(pl022); |
@@ -1150,10 +1504,8 @@ static void pump_messages(struct work_struct *work) | |||
1150 | 1504 | ||
1151 | if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) | 1505 | if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) |
1152 | do_polling_transfer(pl022); | 1506 | do_polling_transfer(pl022); |
1153 | else if (pl022->cur_chip->xfer_type == INTERRUPT_TRANSFER) | ||
1154 | do_interrupt_transfer(pl022); | ||
1155 | else | 1507 | else |
1156 | do_dma_transfer(pl022); | 1508 | do_interrupt_dma_transfer(pl022); |
1157 | } | 1509 | } |
1158 | 1510 | ||
1159 | 1511 | ||
@@ -1162,8 +1514,8 @@ static int __init init_queue(struct pl022 *pl022) | |||
1162 | INIT_LIST_HEAD(&pl022->queue); | 1514 | INIT_LIST_HEAD(&pl022->queue); |
1163 | spin_lock_init(&pl022->queue_lock); | 1515 | spin_lock_init(&pl022->queue_lock); |
1164 | 1516 | ||
1165 | pl022->run = QUEUE_STOPPED; | 1517 | pl022->running = false; |
1166 | pl022->busy = 0; | 1518 | pl022->busy = false; |
1167 | 1519 | ||
1168 | tasklet_init(&pl022->pump_transfers, | 1520 | tasklet_init(&pl022->pump_transfers, |
1169 | pump_transfers, (unsigned long)pl022); | 1521 | pump_transfers, (unsigned long)pl022); |
@@ -1184,12 +1536,12 @@ static int start_queue(struct pl022 *pl022) | |||
1184 | 1536 | ||
1185 | spin_lock_irqsave(&pl022->queue_lock, flags); | 1537 | spin_lock_irqsave(&pl022->queue_lock, flags); |
1186 | 1538 | ||
1187 | if (pl022->run == QUEUE_RUNNING || pl022->busy) { | 1539 | if (pl022->running || pl022->busy) { |
1188 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | 1540 | spin_unlock_irqrestore(&pl022->queue_lock, flags); |
1189 | return -EBUSY; | 1541 | return -EBUSY; |
1190 | } | 1542 | } |
1191 | 1543 | ||
1192 | pl022->run = QUEUE_RUNNING; | 1544 | pl022->running = true; |
1193 | pl022->cur_msg = NULL; | 1545 | pl022->cur_msg = NULL; |
1194 | pl022->cur_transfer = NULL; | 1546 | pl022->cur_transfer = NULL; |
1195 | pl022->cur_chip = NULL; | 1547 | pl022->cur_chip = NULL; |
@@ -1213,7 +1565,7 @@ static int stop_queue(struct pl022 *pl022) | |||
1213 | * A wait_queue on the pl022->busy could be used, but then the common | 1565 | * A wait_queue on the pl022->busy could be used, but then the common |
1214 | * execution path (pump_messages) would be required to call wake_up or | 1566 | * execution path (pump_messages) would be required to call wake_up or |
1215 | * friends on every SPI message. Do this instead */ | 1567 | * friends on every SPI message. Do this instead */ |
1216 | while (!list_empty(&pl022->queue) && pl022->busy && limit--) { | 1568 | while ((!list_empty(&pl022->queue) || pl022->busy) && limit--) { |
1217 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | 1569 | spin_unlock_irqrestore(&pl022->queue_lock, flags); |
1218 | msleep(10); | 1570 | msleep(10); |
1219 | spin_lock_irqsave(&pl022->queue_lock, flags); | 1571 | spin_lock_irqsave(&pl022->queue_lock, flags); |
@@ -1221,7 +1573,8 @@ static int stop_queue(struct pl022 *pl022) | |||
1221 | 1573 | ||
1222 | if (!list_empty(&pl022->queue) || pl022->busy) | 1574 | if (!list_empty(&pl022->queue) || pl022->busy) |
1223 | status = -EBUSY; | 1575 | status = -EBUSY; |
1224 | else pl022->run = QUEUE_STOPPED; | 1576 | else |
1577 | pl022->running = false; | ||
1225 | 1578 | ||
1226 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | 1579 | spin_unlock_irqrestore(&pl022->queue_lock, flags); |
1227 | 1580 | ||
@@ -1248,100 +1601,56 @@ static int destroy_queue(struct pl022 *pl022) | |||
1248 | } | 1601 | } |
1249 | 1602 | ||
1250 | static int verify_controller_parameters(struct pl022 *pl022, | 1603 | static int verify_controller_parameters(struct pl022 *pl022, |
1251 | struct pl022_config_chip *chip_info) | 1604 | struct pl022_config_chip const *chip_info) |
1252 | { | 1605 | { |
1253 | if ((chip_info->lbm != LOOPBACK_ENABLED) | ||
1254 | && (chip_info->lbm != LOOPBACK_DISABLED)) { | ||
1255 | dev_err(chip_info->dev, | ||
1256 | "loopback Mode is configured incorrectly\n"); | ||
1257 | return -EINVAL; | ||
1258 | } | ||
1259 | if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI) | 1606 | if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI) |
1260 | || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) { | 1607 | || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) { |
1261 | dev_err(chip_info->dev, | 1608 | dev_err(&pl022->adev->dev, |
1262 | "interface is configured incorrectly\n"); | 1609 | "interface is configured incorrectly\n"); |
1263 | return -EINVAL; | 1610 | return -EINVAL; |
1264 | } | 1611 | } |
1265 | if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) && | 1612 | if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) && |
1266 | (!pl022->vendor->unidir)) { | 1613 | (!pl022->vendor->unidir)) { |
1267 | dev_err(chip_info->dev, | 1614 | dev_err(&pl022->adev->dev, |
1268 | "unidirectional mode not supported in this " | 1615 | "unidirectional mode not supported in this " |
1269 | "hardware version\n"); | 1616 | "hardware version\n"); |
1270 | return -EINVAL; | 1617 | return -EINVAL; |
1271 | } | 1618 | } |
1272 | if ((chip_info->hierarchy != SSP_MASTER) | 1619 | if ((chip_info->hierarchy != SSP_MASTER) |
1273 | && (chip_info->hierarchy != SSP_SLAVE)) { | 1620 | && (chip_info->hierarchy != SSP_SLAVE)) { |
1274 | dev_err(chip_info->dev, | 1621 | dev_err(&pl022->adev->dev, |
1275 | "hierarchy is configured incorrectly\n"); | 1622 | "hierarchy is configured incorrectly\n"); |
1276 | return -EINVAL; | 1623 | return -EINVAL; |
1277 | } | 1624 | } |
1278 | if (((chip_info->clk_freq).cpsdvsr < CPSDVR_MIN) | ||
1279 | || ((chip_info->clk_freq).cpsdvsr > CPSDVR_MAX)) { | ||
1280 | dev_err(chip_info->dev, | ||
1281 | "cpsdvsr is configured incorrectly\n"); | ||
1282 | return -EINVAL; | ||
1283 | } | ||
1284 | if ((chip_info->endian_rx != SSP_RX_MSB) | ||
1285 | && (chip_info->endian_rx != SSP_RX_LSB)) { | ||
1286 | dev_err(chip_info->dev, | ||
1287 | "RX FIFO endianess is configured incorrectly\n"); | ||
1288 | return -EINVAL; | ||
1289 | } | ||
1290 | if ((chip_info->endian_tx != SSP_TX_MSB) | ||
1291 | && (chip_info->endian_tx != SSP_TX_LSB)) { | ||
1292 | dev_err(chip_info->dev, | ||
1293 | "TX FIFO endianess is configured incorrectly\n"); | ||
1294 | return -EINVAL; | ||
1295 | } | ||
1296 | if ((chip_info->data_size < SSP_DATA_BITS_4) | ||
1297 | || (chip_info->data_size > SSP_DATA_BITS_32)) { | ||
1298 | dev_err(chip_info->dev, | ||
1299 | "DATA Size is configured incorrectly\n"); | ||
1300 | return -EINVAL; | ||
1301 | } | ||
1302 | if ((chip_info->com_mode != INTERRUPT_TRANSFER) | 1625 | if ((chip_info->com_mode != INTERRUPT_TRANSFER) |
1303 | && (chip_info->com_mode != DMA_TRANSFER) | 1626 | && (chip_info->com_mode != DMA_TRANSFER) |
1304 | && (chip_info->com_mode != POLLING_TRANSFER)) { | 1627 | && (chip_info->com_mode != POLLING_TRANSFER)) { |
1305 | dev_err(chip_info->dev, | 1628 | dev_err(&pl022->adev->dev, |
1306 | "Communication mode is configured incorrectly\n"); | 1629 | "Communication mode is configured incorrectly\n"); |
1307 | return -EINVAL; | 1630 | return -EINVAL; |
1308 | } | 1631 | } |
1309 | if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM) | 1632 | if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM) |
1310 | || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) { | 1633 | || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) { |
1311 | dev_err(chip_info->dev, | 1634 | dev_err(&pl022->adev->dev, |
1312 | "RX FIFO Trigger Level is configured incorrectly\n"); | 1635 | "RX FIFO Trigger Level is configured incorrectly\n"); |
1313 | return -EINVAL; | 1636 | return -EINVAL; |
1314 | } | 1637 | } |
1315 | if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC) | 1638 | if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC) |
1316 | || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) { | 1639 | || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) { |
1317 | dev_err(chip_info->dev, | 1640 | dev_err(&pl022->adev->dev, |
1318 | "TX FIFO Trigger Level is configured incorrectly\n"); | 1641 | "TX FIFO Trigger Level is configured incorrectly\n"); |
1319 | return -EINVAL; | 1642 | return -EINVAL; |
1320 | } | 1643 | } |
1321 | if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) { | ||
1322 | if ((chip_info->clk_phase != SSP_CLK_FIRST_EDGE) | ||
1323 | && (chip_info->clk_phase != SSP_CLK_SECOND_EDGE)) { | ||
1324 | dev_err(chip_info->dev, | ||
1325 | "Clock Phase is configured incorrectly\n"); | ||
1326 | return -EINVAL; | ||
1327 | } | ||
1328 | if ((chip_info->clk_pol != SSP_CLK_POL_IDLE_LOW) | ||
1329 | && (chip_info->clk_pol != SSP_CLK_POL_IDLE_HIGH)) { | ||
1330 | dev_err(chip_info->dev, | ||
1331 | "Clock Polarity is configured incorrectly\n"); | ||
1332 | return -EINVAL; | ||
1333 | } | ||
1334 | } | ||
1335 | if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) { | 1644 | if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) { |
1336 | if ((chip_info->ctrl_len < SSP_BITS_4) | 1645 | if ((chip_info->ctrl_len < SSP_BITS_4) |
1337 | || (chip_info->ctrl_len > SSP_BITS_32)) { | 1646 | || (chip_info->ctrl_len > SSP_BITS_32)) { |
1338 | dev_err(chip_info->dev, | 1647 | dev_err(&pl022->adev->dev, |
1339 | "CTRL LEN is configured incorrectly\n"); | 1648 | "CTRL LEN is configured incorrectly\n"); |
1340 | return -EINVAL; | 1649 | return -EINVAL; |
1341 | } | 1650 | } |
1342 | if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO) | 1651 | if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO) |
1343 | && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) { | 1652 | && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) { |
1344 | dev_err(chip_info->dev, | 1653 | dev_err(&pl022->adev->dev, |
1345 | "Wait State is configured incorrectly\n"); | 1654 | "Wait State is configured incorrectly\n"); |
1346 | return -EINVAL; | 1655 | return -EINVAL; |
1347 | } | 1656 | } |
@@ -1350,24 +1659,20 @@ static int verify_controller_parameters(struct pl022 *pl022, | |||
1350 | if ((chip_info->duplex != | 1659 | if ((chip_info->duplex != |
1351 | SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) | 1660 | SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) |
1352 | && (chip_info->duplex != | 1661 | && (chip_info->duplex != |
1353 | SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) | 1662 | SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) { |
1354 | dev_err(chip_info->dev, | 1663 | dev_err(&pl022->adev->dev, |
1355 | "Microwire duplex mode is configured incorrectly\n"); | 1664 | "Microwire duplex mode is configured incorrectly\n"); |
1356 | return -EINVAL; | 1665 | return -EINVAL; |
1666 | } | ||
1357 | } else { | 1667 | } else { |
1358 | if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) | 1668 | if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) |
1359 | dev_err(chip_info->dev, | 1669 | dev_err(&pl022->adev->dev, |
1360 | "Microwire half duplex mode requested," | 1670 | "Microwire half duplex mode requested," |
1361 | " but this is only available in the" | 1671 | " but this is only available in the" |
1362 | " ST version of PL022\n"); | 1672 | " ST version of PL022\n"); |
1363 | return -EINVAL; | 1673 | return -EINVAL; |
1364 | } | 1674 | } |
1365 | } | 1675 | } |
1366 | if (chip_info->cs_control == NULL) { | ||
1367 | dev_warn(chip_info->dev, | ||
1368 | "Chip Select Function is NULL for this chip\n"); | ||
1369 | chip_info->cs_control = null_cs_control; | ||
1370 | } | ||
1371 | return 0; | 1676 | return 0; |
1372 | } | 1677 | } |
1373 | 1678 | ||
@@ -1387,7 +1692,7 @@ static int pl022_transfer(struct spi_device *spi, struct spi_message *msg) | |||
1387 | 1692 | ||
1388 | spin_lock_irqsave(&pl022->queue_lock, flags); | 1693 | spin_lock_irqsave(&pl022->queue_lock, flags); |
1389 | 1694 | ||
1390 | if (pl022->run == QUEUE_STOPPED) { | 1695 | if (!pl022->running) { |
1391 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | 1696 | spin_unlock_irqrestore(&pl022->queue_lock, flags); |
1392 | return -ESHUTDOWN; | 1697 | return -ESHUTDOWN; |
1393 | } | 1698 | } |
@@ -1396,7 +1701,7 @@ static int pl022_transfer(struct spi_device *spi, struct spi_message *msg) | |||
1396 | msg->state = STATE_START; | 1701 | msg->state = STATE_START; |
1397 | 1702 | ||
1398 | list_add_tail(&msg->queue, &pl022->queue); | 1703 | list_add_tail(&msg->queue, &pl022->queue); |
1399 | if (pl022->run == QUEUE_RUNNING && !pl022->busy) | 1704 | if (pl022->running && !pl022->busy) |
1400 | queue_work(pl022->workqueue, &pl022->pump_messages); | 1705 | queue_work(pl022->workqueue, &pl022->pump_messages); |
1401 | 1706 | ||
1402 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | 1707 | spin_unlock_irqrestore(&pl022->queue_lock, flags); |
@@ -1467,22 +1772,24 @@ static int calculate_effective_freq(struct pl022 *pl022, | |||
1467 | return 0; | 1772 | return 0; |
1468 | } | 1773 | } |
1469 | 1774 | ||
1470 | /** | 1775 | |
1471 | * NOT IMPLEMENTED | 1776 | /* |
1472 | * process_dma_info - Processes the DMA info provided by client drivers | 1777 | * A piece of default chip info unless the platform |
1473 | * @chip_info: chip info provided by client device | 1778 | * supplies it. |
1474 | * @chip: Runtime state maintained by the SSP controller for each spi device | ||
1475 | * | ||
1476 | * This function processes and stores DMA config provided by client driver | ||
1477 | * into the runtime state maintained by the SSP controller driver | ||
1478 | */ | 1779 | */ |
1479 | static int process_dma_info(struct pl022_config_chip *chip_info, | 1780 | static const struct pl022_config_chip pl022_default_chip_info = { |
1480 | struct chip_data *chip) | 1781 | .com_mode = POLLING_TRANSFER, |
1481 | { | 1782 | .iface = SSP_INTERFACE_MOTOROLA_SPI, |
1482 | dev_err(chip_info->dev, | 1783 | .hierarchy = SSP_SLAVE, |
1483 | "cannot process DMA info, DMA not implemented!\n"); | 1784 | .slave_tx_disable = DO_NOT_DRIVE_TX, |
1484 | return -ENOTSUPP; | 1785 | .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, |
1485 | } | 1786 | .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, |
1787 | .ctrl_len = SSP_BITS_8, | ||
1788 | .wait_state = SSP_MWIRE_WAIT_ZERO, | ||
1789 | .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, | ||
1790 | .cs_control = null_cs_control, | ||
1791 | }; | ||
1792 | |||
1486 | 1793 | ||
1487 | /** | 1794 | /** |
1488 | * pl022_setup - setup function registered to SPI master framework | 1795 | * pl022_setup - setup function registered to SPI master framework |
@@ -1496,23 +1803,15 @@ static int process_dma_info(struct pl022_config_chip *chip_info, | |||
1496 | * controller hardware here, that is not done until the actual transfer | 1803 | * controller hardware here, that is not done until the actual transfer |
1497 | * commence. | 1804 | * commence. |
1498 | */ | 1805 | */ |
1499 | |||
1500 | /* FIXME: JUST GUESSING the spi->mode bits understood by this driver */ | ||
1501 | #define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ | ||
1502 | | SPI_LSB_FIRST | SPI_LOOP) | ||
1503 | |||
1504 | static int pl022_setup(struct spi_device *spi) | 1806 | static int pl022_setup(struct spi_device *spi) |
1505 | { | 1807 | { |
1506 | struct pl022_config_chip *chip_info; | 1808 | struct pl022_config_chip const *chip_info; |
1507 | struct chip_data *chip; | 1809 | struct chip_data *chip; |
1810 | struct ssp_clock_params clk_freq = {0, }; | ||
1508 | int status = 0; | 1811 | int status = 0; |
1509 | struct pl022 *pl022 = spi_master_get_devdata(spi->master); | 1812 | struct pl022 *pl022 = spi_master_get_devdata(spi->master); |
1510 | 1813 | unsigned int bits = spi->bits_per_word; | |
1511 | if (spi->mode & ~MODEBITS) { | 1814 | u32 tmp; |
1512 | dev_dbg(&spi->dev, "unsupported mode bits %x\n", | ||
1513 | spi->mode & ~MODEBITS); | ||
1514 | return -EINVAL; | ||
1515 | } | ||
1516 | 1815 | ||
1517 | if (!spi->max_speed_hz) | 1816 | if (!spi->max_speed_hz) |
1518 | return -EINVAL; | 1817 | return -EINVAL; |
@@ -1535,48 +1834,13 @@ static int pl022_setup(struct spi_device *spi) | |||
1535 | chip_info = spi->controller_data; | 1834 | chip_info = spi->controller_data; |
1536 | 1835 | ||
1537 | if (chip_info == NULL) { | 1836 | if (chip_info == NULL) { |
1837 | chip_info = &pl022_default_chip_info; | ||
1538 | /* spi_board_info.controller_data not is supplied */ | 1838 | /* spi_board_info.controller_data not is supplied */ |
1539 | dev_dbg(&spi->dev, | 1839 | dev_dbg(&spi->dev, |
1540 | "using default controller_data settings\n"); | 1840 | "using default controller_data settings\n"); |
1541 | 1841 | } else | |
1542 | chip_info = | ||
1543 | kzalloc(sizeof(struct pl022_config_chip), GFP_KERNEL); | ||
1544 | |||
1545 | if (!chip_info) { | ||
1546 | dev_err(&spi->dev, | ||
1547 | "cannot allocate controller data\n"); | ||
1548 | status = -ENOMEM; | ||
1549 | goto err_first_setup; | ||
1550 | } | ||
1551 | |||
1552 | dev_dbg(&spi->dev, "allocated memory for controller data\n"); | ||
1553 | |||
1554 | /* Pointer back to the SPI device */ | ||
1555 | chip_info->dev = &spi->dev; | ||
1556 | /* | ||
1557 | * Set controller data default values: | ||
1558 | * Polling is supported by default | ||
1559 | */ | ||
1560 | chip_info->lbm = LOOPBACK_DISABLED; | ||
1561 | chip_info->com_mode = POLLING_TRANSFER; | ||
1562 | chip_info->iface = SSP_INTERFACE_MOTOROLA_SPI; | ||
1563 | chip_info->hierarchy = SSP_SLAVE; | ||
1564 | chip_info->slave_tx_disable = DO_NOT_DRIVE_TX; | ||
1565 | chip_info->endian_tx = SSP_TX_LSB; | ||
1566 | chip_info->endian_rx = SSP_RX_LSB; | ||
1567 | chip_info->data_size = SSP_DATA_BITS_12; | ||
1568 | chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM; | ||
1569 | chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC; | ||
1570 | chip_info->clk_phase = SSP_CLK_SECOND_EDGE; | ||
1571 | chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW; | ||
1572 | chip_info->ctrl_len = SSP_BITS_8; | ||
1573 | chip_info->wait_state = SSP_MWIRE_WAIT_ZERO; | ||
1574 | chip_info->duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX; | ||
1575 | chip_info->cs_control = null_cs_control; | ||
1576 | } else { | ||
1577 | dev_dbg(&spi->dev, | 1842 | dev_dbg(&spi->dev, |
1578 | "using user supplied controller_data settings\n"); | 1843 | "using user supplied controller_data settings\n"); |
1579 | } | ||
1580 | 1844 | ||
1581 | /* | 1845 | /* |
1582 | * We can override with custom divisors, else we use the board | 1846 | * We can override with custom divisors, else we use the board |
@@ -1586,29 +1850,49 @@ static int pl022_setup(struct spi_device *spi) | |||
1586 | && (0 == chip_info->clk_freq.scr)) { | 1850 | && (0 == chip_info->clk_freq.scr)) { |
1587 | status = calculate_effective_freq(pl022, | 1851 | status = calculate_effective_freq(pl022, |
1588 | spi->max_speed_hz, | 1852 | spi->max_speed_hz, |
1589 | &chip_info->clk_freq); | 1853 | &clk_freq); |
1590 | if (status < 0) | 1854 | if (status < 0) |
1591 | goto err_config_params; | 1855 | goto err_config_params; |
1592 | } else { | 1856 | } else { |
1593 | if ((chip_info->clk_freq.cpsdvsr % 2) != 0) | 1857 | memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq)); |
1594 | chip_info->clk_freq.cpsdvsr = | 1858 | if ((clk_freq.cpsdvsr % 2) != 0) |
1595 | chip_info->clk_freq.cpsdvsr - 1; | 1859 | clk_freq.cpsdvsr = |
1860 | clk_freq.cpsdvsr - 1; | ||
1861 | } | ||
1862 | if ((clk_freq.cpsdvsr < CPSDVR_MIN) | ||
1863 | || (clk_freq.cpsdvsr > CPSDVR_MAX)) { | ||
1864 | status = -EINVAL; | ||
1865 | dev_err(&spi->dev, | ||
1866 | "cpsdvsr is configured incorrectly\n"); | ||
1867 | goto err_config_params; | ||
1596 | } | 1868 | } |
1869 | |||
1870 | |||
1597 | status = verify_controller_parameters(pl022, chip_info); | 1871 | status = verify_controller_parameters(pl022, chip_info); |
1598 | if (status) { | 1872 | if (status) { |
1599 | dev_err(&spi->dev, "controller data is incorrect"); | 1873 | dev_err(&spi->dev, "controller data is incorrect"); |
1600 | goto err_config_params; | 1874 | goto err_config_params; |
1601 | } | 1875 | } |
1876 | |||
1602 | /* Now set controller state based on controller data */ | 1877 | /* Now set controller state based on controller data */ |
1603 | chip->xfer_type = chip_info->com_mode; | 1878 | chip->xfer_type = chip_info->com_mode; |
1604 | chip->cs_control = chip_info->cs_control; | 1879 | if (!chip_info->cs_control) { |
1605 | 1880 | chip->cs_control = null_cs_control; | |
1606 | if (chip_info->data_size <= 8) { | 1881 | dev_warn(&spi->dev, |
1607 | dev_dbg(&spi->dev, "1 <= n <=8 bits per word\n"); | 1882 | "chip select function is NULL for this chip\n"); |
1883 | } else | ||
1884 | chip->cs_control = chip_info->cs_control; | ||
1885 | |||
1886 | if (bits <= 3) { | ||
1887 | /* PL022 doesn't support less than 4-bits */ | ||
1888 | status = -ENOTSUPP; | ||
1889 | goto err_config_params; | ||
1890 | } else if (bits <= 8) { | ||
1891 | dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n"); | ||
1608 | chip->n_bytes = 1; | 1892 | chip->n_bytes = 1; |
1609 | chip->read = READING_U8; | 1893 | chip->read = READING_U8; |
1610 | chip->write = WRITING_U8; | 1894 | chip->write = WRITING_U8; |
1611 | } else if (chip_info->data_size <= 16) { | 1895 | } else if (bits <= 16) { |
1612 | dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n"); | 1896 | dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n"); |
1613 | chip->n_bytes = 2; | 1897 | chip->n_bytes = 2; |
1614 | chip->read = READING_U16; | 1898 | chip->read = READING_U16; |
@@ -1625,6 +1909,7 @@ static int pl022_setup(struct spi_device *spi) | |||
1625 | dev_err(&spi->dev, | 1909 | dev_err(&spi->dev, |
1626 | "a standard pl022 can only handle " | 1910 | "a standard pl022 can only handle " |
1627 | "1 <= n <= 16 bit words\n"); | 1911 | "1 <= n <= 16 bit words\n"); |
1912 | status = -ENOTSUPP; | ||
1628 | goto err_config_params; | 1913 | goto err_config_params; |
1629 | } | 1914 | } |
1630 | } | 1915 | } |
@@ -1636,17 +1921,14 @@ static int pl022_setup(struct spi_device *spi) | |||
1636 | chip->cpsr = 0; | 1921 | chip->cpsr = 0; |
1637 | if ((chip_info->com_mode == DMA_TRANSFER) | 1922 | if ((chip_info->com_mode == DMA_TRANSFER) |
1638 | && ((pl022->master_info)->enable_dma)) { | 1923 | && ((pl022->master_info)->enable_dma)) { |
1639 | chip->enable_dma = 1; | 1924 | chip->enable_dma = true; |
1640 | dev_dbg(&spi->dev, "DMA mode set in controller state\n"); | 1925 | dev_dbg(&spi->dev, "DMA mode set in controller state\n"); |
1641 | status = process_dma_info(chip_info, chip); | ||
1642 | if (status < 0) | ||
1643 | goto err_config_params; | ||
1644 | SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, | 1926 | SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, |
1645 | SSP_DMACR_MASK_RXDMAE, 0); | 1927 | SSP_DMACR_MASK_RXDMAE, 0); |
1646 | SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, | 1928 | SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, |
1647 | SSP_DMACR_MASK_TXDMAE, 1); | 1929 | SSP_DMACR_MASK_TXDMAE, 1); |
1648 | } else { | 1930 | } else { |
1649 | chip->enable_dma = 0; | 1931 | chip->enable_dma = false; |
1650 | dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n"); | 1932 | dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n"); |
1651 | SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, | 1933 | SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, |
1652 | SSP_DMACR_MASK_RXDMAE, 0); | 1934 | SSP_DMACR_MASK_RXDMAE, 0); |
@@ -1654,10 +1936,12 @@ static int pl022_setup(struct spi_device *spi) | |||
1654 | SSP_DMACR_MASK_TXDMAE, 1); | 1936 | SSP_DMACR_MASK_TXDMAE, 1); |
1655 | } | 1937 | } |
1656 | 1938 | ||
1657 | chip->cpsr = chip_info->clk_freq.cpsdvsr; | 1939 | chip->cpsr = clk_freq.cpsdvsr; |
1658 | 1940 | ||
1659 | /* Special setup for the ST micro extended control registers */ | 1941 | /* Special setup for the ST micro extended control registers */ |
1660 | if (pl022->vendor->extended_cr) { | 1942 | if (pl022->vendor->extended_cr) { |
1943 | u32 etx; | ||
1944 | |||
1661 | if (pl022->vendor->pl023) { | 1945 | if (pl022->vendor->pl023) { |
1662 | /* These bits are only in the PL023 */ | 1946 | /* These bits are only in the PL023 */ |
1663 | SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay, | 1947 | SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay, |
@@ -1673,29 +1957,51 @@ static int pl022_setup(struct spi_device *spi) | |||
1673 | SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, | 1957 | SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, |
1674 | SSP_CR1_MASK_MWAIT_ST, 6); | 1958 | SSP_CR1_MASK_MWAIT_ST, 6); |
1675 | } | 1959 | } |
1676 | SSP_WRITE_BITS(chip->cr0, chip_info->data_size, | 1960 | SSP_WRITE_BITS(chip->cr0, bits - 1, |
1677 | SSP_CR0_MASK_DSS_ST, 0); | 1961 | SSP_CR0_MASK_DSS_ST, 0); |
1678 | SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx, | 1962 | |
1679 | SSP_CR1_MASK_RENDN_ST, 4); | 1963 | if (spi->mode & SPI_LSB_FIRST) { |
1680 | SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx, | 1964 | tmp = SSP_RX_LSB; |
1681 | SSP_CR1_MASK_TENDN_ST, 5); | 1965 | etx = SSP_TX_LSB; |
1966 | } else { | ||
1967 | tmp = SSP_RX_MSB; | ||
1968 | etx = SSP_TX_MSB; | ||
1969 | } | ||
1970 | SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4); | ||
1971 | SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5); | ||
1682 | SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, | 1972 | SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, |
1683 | SSP_CR1_MASK_RXIFLSEL_ST, 7); | 1973 | SSP_CR1_MASK_RXIFLSEL_ST, 7); |
1684 | SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, | 1974 | SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, |
1685 | SSP_CR1_MASK_TXIFLSEL_ST, 10); | 1975 | SSP_CR1_MASK_TXIFLSEL_ST, 10); |
1686 | } else { | 1976 | } else { |
1687 | SSP_WRITE_BITS(chip->cr0, chip_info->data_size, | 1977 | SSP_WRITE_BITS(chip->cr0, bits - 1, |
1688 | SSP_CR0_MASK_DSS, 0); | 1978 | SSP_CR0_MASK_DSS, 0); |
1689 | SSP_WRITE_BITS(chip->cr0, chip_info->iface, | 1979 | SSP_WRITE_BITS(chip->cr0, chip_info->iface, |
1690 | SSP_CR0_MASK_FRF, 4); | 1980 | SSP_CR0_MASK_FRF, 4); |
1691 | } | 1981 | } |
1982 | |||
1692 | /* Stuff that is common for all versions */ | 1983 | /* Stuff that is common for all versions */ |
1693 | SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6); | 1984 | if (spi->mode & SPI_CPOL) |
1694 | SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7); | 1985 | tmp = SSP_CLK_POL_IDLE_HIGH; |
1695 | SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8); | 1986 | else |
1987 | tmp = SSP_CLK_POL_IDLE_LOW; | ||
1988 | SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6); | ||
1989 | |||
1990 | if (spi->mode & SPI_CPHA) | ||
1991 | tmp = SSP_CLK_SECOND_EDGE; | ||
1992 | else | ||
1993 | tmp = SSP_CLK_FIRST_EDGE; | ||
1994 | SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7); | ||
1995 | |||
1996 | SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8); | ||
1696 | /* Loopback is available on all versions except PL023 */ | 1997 | /* Loopback is available on all versions except PL023 */ |
1697 | if (!pl022->vendor->pl023) | 1998 | if (pl022->vendor->loopback) { |
1698 | SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0); | 1999 | if (spi->mode & SPI_LOOP) |
2000 | tmp = LOOPBACK_ENABLED; | ||
2001 | else | ||
2002 | tmp = LOOPBACK_DISABLED; | ||
2003 | SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0); | ||
2004 | } | ||
1699 | SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1); | 2005 | SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1); |
1700 | SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2); | 2006 | SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2); |
1701 | SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3); | 2007 | SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3); |
@@ -1704,7 +2010,7 @@ static int pl022_setup(struct spi_device *spi) | |||
1704 | spi_set_ctldata(spi, chip); | 2010 | spi_set_ctldata(spi, chip); |
1705 | return status; | 2011 | return status; |
1706 | err_config_params: | 2012 | err_config_params: |
1707 | err_first_setup: | 2013 | spi_set_ctldata(spi, NULL); |
1708 | kfree(chip); | 2014 | kfree(chip); |
1709 | return status; | 2015 | return status; |
1710 | } | 2016 | } |
@@ -1726,7 +2032,7 @@ static void pl022_cleanup(struct spi_device *spi) | |||
1726 | 2032 | ||
1727 | 2033 | ||
1728 | static int __devinit | 2034 | static int __devinit |
1729 | pl022_probe(struct amba_device *adev, struct amba_id *id) | 2035 | pl022_probe(struct amba_device *adev, const struct amba_id *id) |
1730 | { | 2036 | { |
1731 | struct device *dev = &adev->dev; | 2037 | struct device *dev = &adev->dev; |
1732 | struct pl022_ssp_controller *platform_info = adev->dev.platform_data; | 2038 | struct pl022_ssp_controller *platform_info = adev->dev.platform_data; |
@@ -1766,12 +2072,21 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) | |||
1766 | master->setup = pl022_setup; | 2072 | master->setup = pl022_setup; |
1767 | master->transfer = pl022_transfer; | 2073 | master->transfer = pl022_transfer; |
1768 | 2074 | ||
2075 | /* | ||
2076 | * Supports mode 0-3, loopback, and active low CS. Transfers are | ||
2077 | * always MS bit first on the original pl022. | ||
2078 | */ | ||
2079 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; | ||
2080 | if (pl022->vendor->extended_cr) | ||
2081 | master->mode_bits |= SPI_LSB_FIRST; | ||
2082 | |||
1769 | dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num); | 2083 | dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num); |
1770 | 2084 | ||
1771 | status = amba_request_regions(adev, NULL); | 2085 | status = amba_request_regions(adev, NULL); |
1772 | if (status) | 2086 | if (status) |
1773 | goto err_no_ioregion; | 2087 | goto err_no_ioregion; |
1774 | 2088 | ||
2089 | pl022->phybase = adev->res.start; | ||
1775 | pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res)); | 2090 | pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res)); |
1776 | if (pl022->virtbase == NULL) { | 2091 | if (pl022->virtbase == NULL) { |
1777 | status = -ENOMEM; | 2092 | status = -ENOMEM; |
@@ -1798,6 +2113,14 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) | |||
1798 | dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status); | 2113 | dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status); |
1799 | goto err_no_irq; | 2114 | goto err_no_irq; |
1800 | } | 2115 | } |
2116 | |||
2117 | /* Get DMA channels */ | ||
2118 | if (platform_info->enable_dma) { | ||
2119 | status = pl022_dma_probe(pl022); | ||
2120 | if (status != 0) | ||
2121 | platform_info->enable_dma = 0; | ||
2122 | } | ||
2123 | |||
1801 | /* Initialize and start queue */ | 2124 | /* Initialize and start queue */ |
1802 | status = init_queue(pl022); | 2125 | status = init_queue(pl022); |
1803 | if (status != 0) { | 2126 | if (status != 0) { |
@@ -1817,15 +2140,20 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) | |||
1817 | "probe - problem registering spi master\n"); | 2140 | "probe - problem registering spi master\n"); |
1818 | goto err_spi_register; | 2141 | goto err_spi_register; |
1819 | } | 2142 | } |
1820 | dev_dbg(dev, "probe succeded\n"); | 2143 | dev_dbg(dev, "probe succeeded\n"); |
1821 | /* Disable the silicon block pclk and clock it when needed */ | 2144 | /* |
2145 | * Disable the silicon block pclk and any voltage domain and just | ||
2146 | * power it up and clock it when it's needed | ||
2147 | */ | ||
1822 | amba_pclk_disable(adev); | 2148 | amba_pclk_disable(adev); |
2149 | amba_vcore_disable(adev); | ||
1823 | return 0; | 2150 | return 0; |
1824 | 2151 | ||
1825 | err_spi_register: | 2152 | err_spi_register: |
1826 | err_start_queue: | 2153 | err_start_queue: |
1827 | err_init_queue: | 2154 | err_init_queue: |
1828 | destroy_queue(pl022); | 2155 | destroy_queue(pl022); |
2156 | pl022_dma_remove(pl022); | ||
1829 | free_irq(adev->irq[0], pl022); | 2157 | free_irq(adev->irq[0], pl022); |
1830 | err_no_irq: | 2158 | err_no_irq: |
1831 | clk_put(pl022->clk); | 2159 | clk_put(pl022->clk); |
@@ -1856,6 +2184,7 @@ pl022_remove(struct amba_device *adev) | |||
1856 | return status; | 2184 | return status; |
1857 | } | 2185 | } |
1858 | load_ssp_default_config(pl022); | 2186 | load_ssp_default_config(pl022); |
2187 | pl022_dma_remove(pl022); | ||
1859 | free_irq(adev->irq[0], pl022); | 2188 | free_irq(adev->irq[0], pl022); |
1860 | clk_disable(pl022->clk); | 2189 | clk_disable(pl022->clk); |
1861 | clk_put(pl022->clk); | 2190 | clk_put(pl022->clk); |
@@ -1865,7 +2194,7 @@ pl022_remove(struct amba_device *adev) | |||
1865 | spi_unregister_master(pl022->master); | 2194 | spi_unregister_master(pl022->master); |
1866 | spi_master_put(pl022->master); | 2195 | spi_master_put(pl022->master); |
1867 | amba_set_drvdata(adev, NULL); | 2196 | amba_set_drvdata(adev, NULL); |
1868 | dev_dbg(&adev->dev, "remove succeded\n"); | 2197 | dev_dbg(&adev->dev, "remove succeeded\n"); |
1869 | return 0; | 2198 | return 0; |
1870 | } | 2199 | } |
1871 | 2200 | ||
@@ -1881,9 +2210,11 @@ static int pl022_suspend(struct amba_device *adev, pm_message_t state) | |||
1881 | return status; | 2210 | return status; |
1882 | } | 2211 | } |
1883 | 2212 | ||
2213 | amba_vcore_enable(adev); | ||
1884 | amba_pclk_enable(adev); | 2214 | amba_pclk_enable(adev); |
1885 | load_ssp_default_config(pl022); | 2215 | load_ssp_default_config(pl022); |
1886 | amba_pclk_disable(adev); | 2216 | amba_pclk_disable(adev); |
2217 | amba_vcore_disable(adev); | ||
1887 | dev_dbg(&adev->dev, "suspended\n"); | 2218 | dev_dbg(&adev->dev, "suspended\n"); |
1888 | return 0; | 2219 | return 0; |
1889 | } | 2220 | } |
@@ -1913,6 +2244,7 @@ static struct vendor_data vendor_arm = { | |||
1913 | .unidir = false, | 2244 | .unidir = false, |
1914 | .extended_cr = false, | 2245 | .extended_cr = false, |
1915 | .pl023 = false, | 2246 | .pl023 = false, |
2247 | .loopback = true, | ||
1916 | }; | 2248 | }; |
1917 | 2249 | ||
1918 | 2250 | ||
@@ -1922,6 +2254,7 @@ static struct vendor_data vendor_st = { | |||
1922 | .unidir = false, | 2254 | .unidir = false, |
1923 | .extended_cr = true, | 2255 | .extended_cr = true, |
1924 | .pl023 = false, | 2256 | .pl023 = false, |
2257 | .loopback = true, | ||
1925 | }; | 2258 | }; |
1926 | 2259 | ||
1927 | static struct vendor_data vendor_st_pl023 = { | 2260 | static struct vendor_data vendor_st_pl023 = { |
@@ -1930,6 +2263,16 @@ static struct vendor_data vendor_st_pl023 = { | |||
1930 | .unidir = false, | 2263 | .unidir = false, |
1931 | .extended_cr = true, | 2264 | .extended_cr = true, |
1932 | .pl023 = true, | 2265 | .pl023 = true, |
2266 | .loopback = false, | ||
2267 | }; | ||
2268 | |||
2269 | static struct vendor_data vendor_db5500_pl023 = { | ||
2270 | .fifodepth = 32, | ||
2271 | .max_bpw = 32, | ||
2272 | .unidir = false, | ||
2273 | .extended_cr = true, | ||
2274 | .pl023 = true, | ||
2275 | .loopback = true, | ||
1933 | }; | 2276 | }; |
1934 | 2277 | ||
1935 | static struct amba_id pl022_ids[] = { | 2278 | static struct amba_id pl022_ids[] = { |
@@ -1963,6 +2306,11 @@ static struct amba_id pl022_ids[] = { | |||
1963 | .mask = 0xffffffff, | 2306 | .mask = 0xffffffff, |
1964 | .data = &vendor_st_pl023, | 2307 | .data = &vendor_st_pl023, |
1965 | }, | 2308 | }, |
2309 | { | ||
2310 | .id = 0x10080023, | ||
2311 | .mask = 0xffffffff, | ||
2312 | .data = &vendor_db5500_pl023, | ||
2313 | }, | ||
1966 | { 0, 0 }, | 2314 | { 0, 0 }, |
1967 | }; | 2315 | }; |
1968 | 2316 | ||
diff --git a/drivers/spi/ath79_spi.c b/drivers/spi/ath79_spi.c new file mode 100644 index 000000000000..fcff810ea3b0 --- /dev/null +++ b/drivers/spi/ath79_spi.c | |||
@@ -0,0 +1,292 @@ | |||
1 | /* | ||
2 | * SPI controller driver for the Atheros AR71XX/AR724X/AR913X SoCs | ||
3 | * | ||
4 | * Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org> | ||
5 | * | ||
6 | * This driver has been based on the spi-gpio.c: | ||
7 | * Copyright (C) 2006,2008 David Brownell | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/workqueue.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/io.h> | ||
22 | #include <linux/spi/spi.h> | ||
23 | #include <linux/spi/spi_bitbang.h> | ||
24 | #include <linux/bitops.h> | ||
25 | #include <linux/gpio.h> | ||
26 | |||
27 | #include <asm/mach-ath79/ar71xx_regs.h> | ||
28 | #include <asm/mach-ath79/ath79_spi_platform.h> | ||
29 | |||
30 | #define DRV_NAME "ath79-spi" | ||
31 | |||
32 | struct ath79_spi { | ||
33 | struct spi_bitbang bitbang; | ||
34 | u32 ioc_base; | ||
35 | u32 reg_ctrl; | ||
36 | void __iomem *base; | ||
37 | }; | ||
38 | |||
39 | static inline u32 ath79_spi_rr(struct ath79_spi *sp, unsigned reg) | ||
40 | { | ||
41 | return ioread32(sp->base + reg); | ||
42 | } | ||
43 | |||
44 | static inline void ath79_spi_wr(struct ath79_spi *sp, unsigned reg, u32 val) | ||
45 | { | ||
46 | iowrite32(val, sp->base + reg); | ||
47 | } | ||
48 | |||
49 | static inline struct ath79_spi *ath79_spidev_to_sp(struct spi_device *spi) | ||
50 | { | ||
51 | return spi_master_get_devdata(spi->master); | ||
52 | } | ||
53 | |||
54 | static void ath79_spi_chipselect(struct spi_device *spi, int is_active) | ||
55 | { | ||
56 | struct ath79_spi *sp = ath79_spidev_to_sp(spi); | ||
57 | int cs_high = (spi->mode & SPI_CS_HIGH) ? is_active : !is_active; | ||
58 | |||
59 | if (is_active) { | ||
60 | /* set initial clock polarity */ | ||
61 | if (spi->mode & SPI_CPOL) | ||
62 | sp->ioc_base |= AR71XX_SPI_IOC_CLK; | ||
63 | else | ||
64 | sp->ioc_base &= ~AR71XX_SPI_IOC_CLK; | ||
65 | |||
66 | ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); | ||
67 | } | ||
68 | |||
69 | if (spi->chip_select) { | ||
70 | struct ath79_spi_controller_data *cdata = spi->controller_data; | ||
71 | |||
72 | /* SPI is normally active-low */ | ||
73 | gpio_set_value(cdata->gpio, cs_high); | ||
74 | } else { | ||
75 | if (cs_high) | ||
76 | sp->ioc_base |= AR71XX_SPI_IOC_CS0; | ||
77 | else | ||
78 | sp->ioc_base &= ~AR71XX_SPI_IOC_CS0; | ||
79 | |||
80 | ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); | ||
81 | } | ||
82 | |||
83 | } | ||
84 | |||
85 | static int ath79_spi_setup_cs(struct spi_device *spi) | ||
86 | { | ||
87 | struct ath79_spi *sp = ath79_spidev_to_sp(spi); | ||
88 | struct ath79_spi_controller_data *cdata; | ||
89 | |||
90 | cdata = spi->controller_data; | ||
91 | if (spi->chip_select && !cdata) | ||
92 | return -EINVAL; | ||
93 | |||
94 | /* enable GPIO mode */ | ||
95 | ath79_spi_wr(sp, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO); | ||
96 | |||
97 | /* save CTRL register */ | ||
98 | sp->reg_ctrl = ath79_spi_rr(sp, AR71XX_SPI_REG_CTRL); | ||
99 | sp->ioc_base = ath79_spi_rr(sp, AR71XX_SPI_REG_IOC); | ||
100 | |||
101 | /* TODO: setup speed? */ | ||
102 | ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43); | ||
103 | |||
104 | if (spi->chip_select) { | ||
105 | int status = 0; | ||
106 | |||
107 | status = gpio_request(cdata->gpio, dev_name(&spi->dev)); | ||
108 | if (status) | ||
109 | return status; | ||
110 | |||
111 | status = gpio_direction_output(cdata->gpio, | ||
112 | spi->mode & SPI_CS_HIGH); | ||
113 | if (status) { | ||
114 | gpio_free(cdata->gpio); | ||
115 | return status; | ||
116 | } | ||
117 | } else { | ||
118 | if (spi->mode & SPI_CS_HIGH) | ||
119 | sp->ioc_base |= AR71XX_SPI_IOC_CS0; | ||
120 | else | ||
121 | sp->ioc_base &= ~AR71XX_SPI_IOC_CS0; | ||
122 | ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base); | ||
123 | } | ||
124 | |||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | static void ath79_spi_cleanup_cs(struct spi_device *spi) | ||
129 | { | ||
130 | struct ath79_spi *sp = ath79_spidev_to_sp(spi); | ||
131 | |||
132 | if (spi->chip_select) { | ||
133 | struct ath79_spi_controller_data *cdata = spi->controller_data; | ||
134 | gpio_free(cdata->gpio); | ||
135 | } | ||
136 | |||
137 | /* restore CTRL register */ | ||
138 | ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, sp->reg_ctrl); | ||
139 | /* disable GPIO mode */ | ||
140 | ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0); | ||
141 | } | ||
142 | |||
143 | static int ath79_spi_setup(struct spi_device *spi) | ||
144 | { | ||
145 | int status = 0; | ||
146 | |||
147 | if (spi->bits_per_word > 32) | ||
148 | return -EINVAL; | ||
149 | |||
150 | if (!spi->controller_state) { | ||
151 | status = ath79_spi_setup_cs(spi); | ||
152 | if (status) | ||
153 | return status; | ||
154 | } | ||
155 | |||
156 | status = spi_bitbang_setup(spi); | ||
157 | if (status && !spi->controller_state) | ||
158 | ath79_spi_cleanup_cs(spi); | ||
159 | |||
160 | return status; | ||
161 | } | ||
162 | |||
163 | static void ath79_spi_cleanup(struct spi_device *spi) | ||
164 | { | ||
165 | ath79_spi_cleanup_cs(spi); | ||
166 | spi_bitbang_cleanup(spi); | ||
167 | } | ||
168 | |||
169 | static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned nsecs, | ||
170 | u32 word, u8 bits) | ||
171 | { | ||
172 | struct ath79_spi *sp = ath79_spidev_to_sp(spi); | ||
173 | u32 ioc = sp->ioc_base; | ||
174 | |||
175 | /* clock starts at inactive polarity */ | ||
176 | for (word <<= (32 - bits); likely(bits); bits--) { | ||
177 | u32 out; | ||
178 | |||
179 | if (word & (1 << 31)) | ||
180 | out = ioc | AR71XX_SPI_IOC_DO; | ||
181 | else | ||
182 | out = ioc & ~AR71XX_SPI_IOC_DO; | ||
183 | |||
184 | /* setup MSB (to slave) on trailing edge */ | ||
185 | ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out); | ||
186 | ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out | AR71XX_SPI_IOC_CLK); | ||
187 | |||
188 | word <<= 1; | ||
189 | } | ||
190 | |||
191 | return ath79_spi_rr(sp, AR71XX_SPI_REG_RDS); | ||
192 | } | ||
193 | |||
194 | static __devinit int ath79_spi_probe(struct platform_device *pdev) | ||
195 | { | ||
196 | struct spi_master *master; | ||
197 | struct ath79_spi *sp; | ||
198 | struct ath79_spi_platform_data *pdata; | ||
199 | struct resource *r; | ||
200 | int ret; | ||
201 | |||
202 | master = spi_alloc_master(&pdev->dev, sizeof(*sp)); | ||
203 | if (master == NULL) { | ||
204 | dev_err(&pdev->dev, "failed to allocate spi master\n"); | ||
205 | return -ENOMEM; | ||
206 | } | ||
207 | |||
208 | sp = spi_master_get_devdata(master); | ||
209 | platform_set_drvdata(pdev, sp); | ||
210 | |||
211 | pdata = pdev->dev.platform_data; | ||
212 | |||
213 | master->setup = ath79_spi_setup; | ||
214 | master->cleanup = ath79_spi_cleanup; | ||
215 | if (pdata) { | ||
216 | master->bus_num = pdata->bus_num; | ||
217 | master->num_chipselect = pdata->num_chipselect; | ||
218 | } else { | ||
219 | master->bus_num = -1; | ||
220 | master->num_chipselect = 1; | ||
221 | } | ||
222 | |||
223 | sp->bitbang.master = spi_master_get(master); | ||
224 | sp->bitbang.chipselect = ath79_spi_chipselect; | ||
225 | sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0; | ||
226 | sp->bitbang.setup_transfer = spi_bitbang_setup_transfer; | ||
227 | sp->bitbang.flags = SPI_CS_HIGH; | ||
228 | |||
229 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
230 | if (r == NULL) { | ||
231 | ret = -ENOENT; | ||
232 | goto err_put_master; | ||
233 | } | ||
234 | |||
235 | sp->base = ioremap(r->start, r->end - r->start + 1); | ||
236 | if (!sp->base) { | ||
237 | ret = -ENXIO; | ||
238 | goto err_put_master; | ||
239 | } | ||
240 | |||
241 | ret = spi_bitbang_start(&sp->bitbang); | ||
242 | if (ret) | ||
243 | goto err_unmap; | ||
244 | |||
245 | return 0; | ||
246 | |||
247 | err_unmap: | ||
248 | iounmap(sp->base); | ||
249 | err_put_master: | ||
250 | platform_set_drvdata(pdev, NULL); | ||
251 | spi_master_put(sp->bitbang.master); | ||
252 | |||
253 | return ret; | ||
254 | } | ||
255 | |||
256 | static __devexit int ath79_spi_remove(struct platform_device *pdev) | ||
257 | { | ||
258 | struct ath79_spi *sp = platform_get_drvdata(pdev); | ||
259 | |||
260 | spi_bitbang_stop(&sp->bitbang); | ||
261 | iounmap(sp->base); | ||
262 | platform_set_drvdata(pdev, NULL); | ||
263 | spi_master_put(sp->bitbang.master); | ||
264 | |||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | static struct platform_driver ath79_spi_driver = { | ||
269 | .probe = ath79_spi_probe, | ||
270 | .remove = __devexit_p(ath79_spi_remove), | ||
271 | .driver = { | ||
272 | .name = DRV_NAME, | ||
273 | .owner = THIS_MODULE, | ||
274 | }, | ||
275 | }; | ||
276 | |||
277 | static __init int ath79_spi_init(void) | ||
278 | { | ||
279 | return platform_driver_register(&ath79_spi_driver); | ||
280 | } | ||
281 | module_init(ath79_spi_init); | ||
282 | |||
283 | static __exit void ath79_spi_exit(void) | ||
284 | { | ||
285 | platform_driver_unregister(&ath79_spi_driver); | ||
286 | } | ||
287 | module_exit(ath79_spi_exit); | ||
288 | |||
289 | MODULE_DESCRIPTION("SPI controller driver for Atheros AR71XX/AR724X/AR913X"); | ||
290 | MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>"); | ||
291 | MODULE_LICENSE("GPL v2"); | ||
292 | MODULE_ALIAS("platform:" DRV_NAME); | ||
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c index c4e04428992d..08711e9202ab 100644 --- a/drivers/spi/atmel_spi.c +++ b/drivers/spi/atmel_spi.c | |||
@@ -341,9 +341,9 @@ static void atmel_spi_next_message(struct spi_master *master) | |||
341 | /* | 341 | /* |
342 | * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma: | 342 | * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma: |
343 | * - The buffer is either valid for CPU access, else NULL | 343 | * - The buffer is either valid for CPU access, else NULL |
344 | * - If the buffer is valid, so is its DMA addresss | 344 | * - If the buffer is valid, so is its DMA address |
345 | * | 345 | * |
346 | * This driver manages the dma addresss unless message->is_dma_mapped. | 346 | * This driver manages the dma address unless message->is_dma_mapped. |
347 | */ | 347 | */ |
348 | static int | 348 | static int |
349 | atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) | 349 | atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) |
@@ -352,8 +352,12 @@ atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) | |||
352 | 352 | ||
353 | xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS; | 353 | xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS; |
354 | if (xfer->tx_buf) { | 354 | if (xfer->tx_buf) { |
355 | /* tx_buf is a const void* where we need a void * for the dma | ||
356 | * mapping */ | ||
357 | void *nonconst_tx = (void *)xfer->tx_buf; | ||
358 | |||
355 | xfer->tx_dma = dma_map_single(dev, | 359 | xfer->tx_dma = dma_map_single(dev, |
356 | (void *) xfer->tx_buf, xfer->len, | 360 | nonconst_tx, xfer->len, |
357 | DMA_TO_DEVICE); | 361 | DMA_TO_DEVICE); |
358 | if (dma_mapping_error(dev, xfer->tx_dma)) | 362 | if (dma_mapping_error(dev, xfer->tx_dma)) |
359 | return -ENOMEM; | 363 | return -ENOMEM; |
@@ -654,6 +658,8 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) | |||
654 | struct spi_transfer *xfer; | 658 | struct spi_transfer *xfer; |
655 | unsigned long flags; | 659 | unsigned long flags; |
656 | struct device *controller = spi->master->dev.parent; | 660 | struct device *controller = spi->master->dev.parent; |
661 | u8 bits; | ||
662 | struct atmel_spi_device *asd; | ||
657 | 663 | ||
658 | as = spi_master_get_devdata(spi->master); | 664 | as = spi_master_get_devdata(spi->master); |
659 | 665 | ||
@@ -672,8 +678,18 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) | |||
672 | return -EINVAL; | 678 | return -EINVAL; |
673 | } | 679 | } |
674 | 680 | ||
681 | if (xfer->bits_per_word) { | ||
682 | asd = spi->controller_state; | ||
683 | bits = (asd->csr >> 4) & 0xf; | ||
684 | if (bits != xfer->bits_per_word - 8) { | ||
685 | dev_dbg(&spi->dev, "you can't yet change " | ||
686 | "bits_per_word in transfers\n"); | ||
687 | return -ENOPROTOOPT; | ||
688 | } | ||
689 | } | ||
690 | |||
675 | /* FIXME implement these protocol options!! */ | 691 | /* FIXME implement these protocol options!! */ |
676 | if (xfer->bits_per_word || xfer->speed_hz) { | 692 | if (xfer->speed_hz) { |
677 | dev_dbg(&spi->dev, "no protocol options yet\n"); | 693 | dev_dbg(&spi->dev, "no protocol options yet\n"); |
678 | return -ENOPROTOOPT; | 694 | return -ENOPROTOOPT; |
679 | } | 695 | } |
@@ -919,6 +935,6 @@ static void __exit atmel_spi_exit(void) | |||
919 | module_exit(atmel_spi_exit); | 935 | module_exit(atmel_spi_exit); |
920 | 936 | ||
921 | MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver"); | 937 | MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver"); |
922 | MODULE_AUTHOR("Haavard Skinnemoen <hskinnemoen@atmel.com>"); | 938 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
923 | MODULE_LICENSE("GPL"); | 939 | MODULE_LICENSE("GPL"); |
924 | MODULE_ALIAS("platform:atmel_spi"); | 940 | MODULE_ALIAS("platform:atmel_spi"); |
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c index 3c9ade69643f..b50563d320e1 100644 --- a/drivers/spi/au1550_spi.c +++ b/drivers/spi/au1550_spi.c | |||
@@ -480,7 +480,7 @@ static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw) | |||
480 | au1xxx_dbdma_stop(hw->dma_rx_ch); | 480 | au1xxx_dbdma_stop(hw->dma_rx_ch); |
481 | au1xxx_dbdma_stop(hw->dma_tx_ch); | 481 | au1xxx_dbdma_stop(hw->dma_tx_ch); |
482 | 482 | ||
483 | /* get number of transfered bytes */ | 483 | /* get number of transferred bytes */ |
484 | hw->rx_count = hw->len - au1xxx_get_dma_residue(hw->dma_rx_ch); | 484 | hw->rx_count = hw->len - au1xxx_get_dma_residue(hw->dma_rx_ch); |
485 | hw->tx_count = hw->len - au1xxx_get_dma_residue(hw->dma_tx_ch); | 485 | hw->tx_count = hw->len - au1xxx_get_dma_residue(hw->dma_tx_ch); |
486 | 486 | ||
diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/coldfire_qspi.c index 052b3c7fa6a0..ae2cd1c1fda8 100644 --- a/drivers/spi/coldfire_qspi.c +++ b/drivers/spi/coldfire_qspi.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/spi/spi.h> | 33 | #include <linux/spi/spi.h> |
34 | 34 | ||
35 | #include <asm/coldfire.h> | 35 | #include <asm/coldfire.h> |
36 | #include <asm/mcfsim.h> | ||
36 | #include <asm/mcfqspi.h> | 37 | #include <asm/mcfqspi.h> |
37 | 38 | ||
38 | #define DRIVER_NAME "mcfqspi" | 39 | #define DRIVER_NAME "mcfqspi" |
@@ -317,7 +318,7 @@ static void mcfqspi_work(struct work_struct *work) | |||
317 | msg = container_of(mcfqspi->msgq.next, struct spi_message, | 318 | msg = container_of(mcfqspi->msgq.next, struct spi_message, |
318 | queue); | 319 | queue); |
319 | 320 | ||
320 | list_del_init(&mcfqspi->msgq); | 321 | list_del_init(&msg->queue); |
321 | spin_unlock_irqrestore(&mcfqspi->lock, flags); | 322 | spin_unlock_irqrestore(&mcfqspi->lock, flags); |
322 | 323 | ||
323 | spi = msg->spi; | 324 | spi = msg->spi; |
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c index b85090caf7cf..1f0ed8005c91 100644 --- a/drivers/spi/davinci_spi.c +++ b/drivers/spi/davinci_spi.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2009 Texas Instruments. | 2 | * Copyright (C) 2009 Texas Instruments. |
3 | * Copyright (C) 2010 EF Johnson Technologies | ||
3 | * | 4 | * |
4 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
@@ -38,11 +39,6 @@ | |||
38 | 39 | ||
39 | #define CS_DEFAULT 0xFF | 40 | #define CS_DEFAULT 0xFF |
40 | 41 | ||
41 | #define SPI_BUFSIZ (SMP_CACHE_BYTES + 1) | ||
42 | #define DAVINCI_DMA_DATA_TYPE_S8 0x01 | ||
43 | #define DAVINCI_DMA_DATA_TYPE_S16 0x02 | ||
44 | #define DAVINCI_DMA_DATA_TYPE_S32 0x04 | ||
45 | |||
46 | #define SPIFMT_PHASE_MASK BIT(16) | 42 | #define SPIFMT_PHASE_MASK BIT(16) |
47 | #define SPIFMT_POLARITY_MASK BIT(17) | 43 | #define SPIFMT_POLARITY_MASK BIT(17) |
48 | #define SPIFMT_DISTIMER_MASK BIT(18) | 44 | #define SPIFMT_DISTIMER_MASK BIT(18) |
@@ -52,34 +48,43 @@ | |||
52 | #define SPIFMT_ODD_PARITY_MASK BIT(23) | 48 | #define SPIFMT_ODD_PARITY_MASK BIT(23) |
53 | #define SPIFMT_WDELAY_MASK 0x3f000000u | 49 | #define SPIFMT_WDELAY_MASK 0x3f000000u |
54 | #define SPIFMT_WDELAY_SHIFT 24 | 50 | #define SPIFMT_WDELAY_SHIFT 24 |
55 | #define SPIFMT_CHARLEN_MASK 0x0000001Fu | 51 | #define SPIFMT_PRESCALE_SHIFT 8 |
56 | |||
57 | /* SPIGCR1 */ | ||
58 | #define SPIGCR1_SPIENA_MASK 0x01000000u | ||
59 | 52 | ||
60 | /* SPIPC0 */ | 53 | /* SPIPC0 */ |
61 | #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */ | 54 | #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */ |
62 | #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */ | 55 | #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */ |
63 | #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */ | 56 | #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */ |
64 | #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */ | 57 | #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */ |
65 | #define SPIPC0_EN1FUN_MASK BIT(1) | ||
66 | #define SPIPC0_EN0FUN_MASK BIT(0) | ||
67 | 58 | ||
68 | #define SPIINT_MASKALL 0x0101035F | 59 | #define SPIINT_MASKALL 0x0101035F |
69 | #define SPI_INTLVL_1 0x000001FFu | 60 | #define SPIINT_MASKINT 0x0000015F |
70 | #define SPI_INTLVL_0 0x00000000u | 61 | #define SPI_INTLVL_1 0x000001FF |
62 | #define SPI_INTLVL_0 0x00000000 | ||
71 | 63 | ||
72 | /* SPIDAT1 */ | 64 | /* SPIDAT1 (upper 16 bit defines) */ |
73 | #define SPIDAT1_CSHOLD_SHIFT 28 | 65 | #define SPIDAT1_CSHOLD_MASK BIT(12) |
74 | #define SPIDAT1_CSNR_SHIFT 16 | 66 | |
67 | /* SPIGCR1 */ | ||
75 | #define SPIGCR1_CLKMOD_MASK BIT(1) | 68 | #define SPIGCR1_CLKMOD_MASK BIT(1) |
76 | #define SPIGCR1_MASTER_MASK BIT(0) | 69 | #define SPIGCR1_MASTER_MASK BIT(0) |
70 | #define SPIGCR1_POWERDOWN_MASK BIT(8) | ||
77 | #define SPIGCR1_LOOPBACK_MASK BIT(16) | 71 | #define SPIGCR1_LOOPBACK_MASK BIT(16) |
72 | #define SPIGCR1_SPIENA_MASK BIT(24) | ||
78 | 73 | ||
79 | /* SPIBUF */ | 74 | /* SPIBUF */ |
80 | #define SPIBUF_TXFULL_MASK BIT(29) | 75 | #define SPIBUF_TXFULL_MASK BIT(29) |
81 | #define SPIBUF_RXEMPTY_MASK BIT(31) | 76 | #define SPIBUF_RXEMPTY_MASK BIT(31) |
82 | 77 | ||
78 | /* SPIDELAY */ | ||
79 | #define SPIDELAY_C2TDELAY_SHIFT 24 | ||
80 | #define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT) | ||
81 | #define SPIDELAY_T2CDELAY_SHIFT 16 | ||
82 | #define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT) | ||
83 | #define SPIDELAY_T2EDELAY_SHIFT 8 | ||
84 | #define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT) | ||
85 | #define SPIDELAY_C2EDELAY_SHIFT 0 | ||
86 | #define SPIDELAY_C2EDELAY_MASK 0xFF | ||
87 | |||
83 | /* Error Masks */ | 88 | /* Error Masks */ |
84 | #define SPIFLG_DLEN_ERR_MASK BIT(0) | 89 | #define SPIFLG_DLEN_ERR_MASK BIT(0) |
85 | #define SPIFLG_TIMEOUT_MASK BIT(1) | 90 | #define SPIFLG_TIMEOUT_MASK BIT(1) |
@@ -87,29 +92,13 @@ | |||
87 | #define SPIFLG_DESYNC_MASK BIT(3) | 92 | #define SPIFLG_DESYNC_MASK BIT(3) |
88 | #define SPIFLG_BITERR_MASK BIT(4) | 93 | #define SPIFLG_BITERR_MASK BIT(4) |
89 | #define SPIFLG_OVRRUN_MASK BIT(6) | 94 | #define SPIFLG_OVRRUN_MASK BIT(6) |
90 | #define SPIFLG_RX_INTR_MASK BIT(8) | ||
91 | #define SPIFLG_TX_INTR_MASK BIT(9) | ||
92 | #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24) | 95 | #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24) |
93 | #define SPIFLG_MASK (SPIFLG_DLEN_ERR_MASK \ | 96 | #define SPIFLG_ERROR_MASK (SPIFLG_DLEN_ERR_MASK \ |
94 | | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \ | 97 | | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \ |
95 | | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \ | 98 | | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \ |
96 | | SPIFLG_OVRRUN_MASK | SPIFLG_RX_INTR_MASK \ | 99 | | SPIFLG_OVRRUN_MASK) |
97 | | SPIFLG_TX_INTR_MASK \ | ||
98 | | SPIFLG_BUF_INIT_ACTIVE_MASK) | ||
99 | |||
100 | #define SPIINT_DLEN_ERR_INTR BIT(0) | ||
101 | #define SPIINT_TIMEOUT_INTR BIT(1) | ||
102 | #define SPIINT_PARERR_INTR BIT(2) | ||
103 | #define SPIINT_DESYNC_INTR BIT(3) | ||
104 | #define SPIINT_BITERR_INTR BIT(4) | ||
105 | #define SPIINT_OVRRUN_INTR BIT(6) | ||
106 | #define SPIINT_RX_INTR BIT(8) | ||
107 | #define SPIINT_TX_INTR BIT(9) | ||
108 | #define SPIINT_DMA_REQ_EN BIT(16) | ||
109 | #define SPIINT_ENABLE_HIGHZ BIT(24) | ||
110 | 100 | ||
111 | #define SPI_T2CDELAY_SHIFT 16 | 101 | #define SPIINT_DMA_REQ_EN BIT(16) |
112 | #define SPI_C2TDELAY_SHIFT 24 | ||
113 | 102 | ||
114 | /* SPI Controller registers */ | 103 | /* SPI Controller registers */ |
115 | #define SPIGCR0 0x00 | 104 | #define SPIGCR0 0x00 |
@@ -118,44 +107,18 @@ | |||
118 | #define SPILVL 0x0c | 107 | #define SPILVL 0x0c |
119 | #define SPIFLG 0x10 | 108 | #define SPIFLG 0x10 |
120 | #define SPIPC0 0x14 | 109 | #define SPIPC0 0x14 |
121 | #define SPIPC1 0x18 | ||
122 | #define SPIPC2 0x1c | ||
123 | #define SPIPC3 0x20 | ||
124 | #define SPIPC4 0x24 | ||
125 | #define SPIPC5 0x28 | ||
126 | #define SPIPC6 0x2c | ||
127 | #define SPIPC7 0x30 | ||
128 | #define SPIPC8 0x34 | ||
129 | #define SPIDAT0 0x38 | ||
130 | #define SPIDAT1 0x3c | 110 | #define SPIDAT1 0x3c |
131 | #define SPIBUF 0x40 | 111 | #define SPIBUF 0x40 |
132 | #define SPIEMU 0x44 | ||
133 | #define SPIDELAY 0x48 | 112 | #define SPIDELAY 0x48 |
134 | #define SPIDEF 0x4c | 113 | #define SPIDEF 0x4c |
135 | #define SPIFMT0 0x50 | 114 | #define SPIFMT0 0x50 |
136 | #define SPIFMT1 0x54 | ||
137 | #define SPIFMT2 0x58 | ||
138 | #define SPIFMT3 0x5c | ||
139 | #define TGINTVEC0 0x60 | ||
140 | #define TGINTVEC1 0x64 | ||
141 | |||
142 | struct davinci_spi_slave { | ||
143 | u32 cmd_to_write; | ||
144 | u32 clk_ctrl_to_write; | ||
145 | u32 bytes_per_word; | ||
146 | u8 active_cs; | ||
147 | }; | ||
148 | 115 | ||
149 | /* We have 2 DMA channels per CS, one for RX and one for TX */ | 116 | /* We have 2 DMA channels per CS, one for RX and one for TX */ |
150 | struct davinci_spi_dma { | 117 | struct davinci_spi_dma { |
151 | int dma_tx_channel; | 118 | int tx_channel; |
152 | int dma_rx_channel; | 119 | int rx_channel; |
153 | int dma_tx_sync_dev; | 120 | int dummy_param_slot; |
154 | int dma_rx_sync_dev; | ||
155 | enum dma_event_q eventq; | 121 | enum dma_event_q eventq; |
156 | |||
157 | struct completion dma_tx_completion; | ||
158 | struct completion dma_rx_completion; | ||
159 | }; | 122 | }; |
160 | 123 | ||
161 | /* SPI Controller driver's private data. */ | 124 | /* SPI Controller driver's private data. */ |
@@ -166,58 +129,63 @@ struct davinci_spi { | |||
166 | u8 version; | 129 | u8 version; |
167 | resource_size_t pbase; | 130 | resource_size_t pbase; |
168 | void __iomem *base; | 131 | void __iomem *base; |
169 | size_t region_size; | ||
170 | u32 irq; | 132 | u32 irq; |
171 | struct completion done; | 133 | struct completion done; |
172 | 134 | ||
173 | const void *tx; | 135 | const void *tx; |
174 | void *rx; | 136 | void *rx; |
175 | u8 *tmp_buf; | 137 | #define SPI_TMP_BUFSZ (SMP_CACHE_BYTES + 1) |
176 | int count; | 138 | u8 rx_tmp_buf[SPI_TMP_BUFSZ]; |
177 | struct davinci_spi_dma *dma_channels; | 139 | int rcount; |
178 | struct davinci_spi_platform_data *pdata; | 140 | int wcount; |
141 | struct davinci_spi_dma dma; | ||
142 | struct davinci_spi_platform_data *pdata; | ||
179 | 143 | ||
180 | void (*get_rx)(u32 rx_data, struct davinci_spi *); | 144 | void (*get_rx)(u32 rx_data, struct davinci_spi *); |
181 | u32 (*get_tx)(struct davinci_spi *); | 145 | u32 (*get_tx)(struct davinci_spi *); |
182 | 146 | ||
183 | struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT]; | 147 | u8 bytes_per_word[SPI_MAX_CHIPSELECT]; |
184 | }; | 148 | }; |
185 | 149 | ||
186 | static unsigned use_dma; | 150 | static struct davinci_spi_config davinci_spi_default_cfg; |
187 | 151 | ||
188 | static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi) | 152 | static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *dspi) |
189 | { | 153 | { |
190 | u8 *rx = davinci_spi->rx; | 154 | if (dspi->rx) { |
191 | 155 | u8 *rx = dspi->rx; | |
192 | *rx++ = (u8)data; | 156 | *rx++ = (u8)data; |
193 | davinci_spi->rx = rx; | 157 | dspi->rx = rx; |
158 | } | ||
194 | } | 159 | } |
195 | 160 | ||
196 | static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi) | 161 | static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi) |
197 | { | 162 | { |
198 | u16 *rx = davinci_spi->rx; | 163 | if (dspi->rx) { |
199 | 164 | u16 *rx = dspi->rx; | |
200 | *rx++ = (u16)data; | 165 | *rx++ = (u16)data; |
201 | davinci_spi->rx = rx; | 166 | dspi->rx = rx; |
167 | } | ||
202 | } | 168 | } |
203 | 169 | ||
204 | static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi) | 170 | static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi) |
205 | { | 171 | { |
206 | u32 data; | 172 | u32 data = 0; |
207 | const u8 *tx = davinci_spi->tx; | 173 | if (dspi->tx) { |
208 | 174 | const u8 *tx = dspi->tx; | |
209 | data = *tx++; | 175 | data = *tx++; |
210 | davinci_spi->tx = tx; | 176 | dspi->tx = tx; |
177 | } | ||
211 | return data; | 178 | return data; |
212 | } | 179 | } |
213 | 180 | ||
214 | static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi) | 181 | static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi) |
215 | { | 182 | { |
216 | u32 data; | 183 | u32 data = 0; |
217 | const u16 *tx = davinci_spi->tx; | 184 | if (dspi->tx) { |
218 | 185 | const u16 *tx = dspi->tx; | |
219 | data = *tx++; | 186 | data = *tx++; |
220 | davinci_spi->tx = tx; | 187 | dspi->tx = tx; |
188 | } | ||
221 | return data; | 189 | return data; |
222 | } | 190 | } |
223 | 191 | ||
@@ -237,55 +205,67 @@ static inline void clear_io_bits(void __iomem *addr, u32 bits) | |||
237 | iowrite32(v, addr); | 205 | iowrite32(v, addr); |
238 | } | 206 | } |
239 | 207 | ||
240 | static inline void set_fmt_bits(void __iomem *addr, u32 bits, int cs_num) | ||
241 | { | ||
242 | set_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits); | ||
243 | } | ||
244 | |||
245 | static inline void clear_fmt_bits(void __iomem *addr, u32 bits, int cs_num) | ||
246 | { | ||
247 | clear_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits); | ||
248 | } | ||
249 | |||
250 | static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable) | ||
251 | { | ||
252 | struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master); | ||
253 | |||
254 | if (enable) | ||
255 | set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN); | ||
256 | else | ||
257 | clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN); | ||
258 | } | ||
259 | |||
260 | /* | 208 | /* |
261 | * Interface to control the chip select signal | 209 | * Interface to control the chip select signal |
262 | */ | 210 | */ |
263 | static void davinci_spi_chipselect(struct spi_device *spi, int value) | 211 | static void davinci_spi_chipselect(struct spi_device *spi, int value) |
264 | { | 212 | { |
265 | struct davinci_spi *davinci_spi; | 213 | struct davinci_spi *dspi; |
266 | struct davinci_spi_platform_data *pdata; | 214 | struct davinci_spi_platform_data *pdata; |
267 | u32 data1_reg_val = 0; | 215 | u8 chip_sel = spi->chip_select; |
216 | u16 spidat1 = CS_DEFAULT; | ||
217 | bool gpio_chipsel = false; | ||
268 | 218 | ||
269 | davinci_spi = spi_master_get_devdata(spi->master); | 219 | dspi = spi_master_get_devdata(spi->master); |
270 | pdata = davinci_spi->pdata; | 220 | pdata = dspi->pdata; |
221 | |||
222 | if (pdata->chip_sel && chip_sel < pdata->num_chipselect && | ||
223 | pdata->chip_sel[chip_sel] != SPI_INTERN_CS) | ||
224 | gpio_chipsel = true; | ||
271 | 225 | ||
272 | /* | 226 | /* |
273 | * Board specific chip select logic decides the polarity and cs | 227 | * Board specific chip select logic decides the polarity and cs |
274 | * line for the controller | 228 | * line for the controller |
275 | */ | 229 | */ |
276 | if (value == BITBANG_CS_INACTIVE) { | 230 | if (gpio_chipsel) { |
277 | set_io_bits(davinci_spi->base + SPIDEF, CS_DEFAULT); | 231 | if (value == BITBANG_CS_ACTIVE) |
278 | 232 | gpio_set_value(pdata->chip_sel[chip_sel], 0); | |
279 | data1_reg_val |= CS_DEFAULT << SPIDAT1_CSNR_SHIFT; | 233 | else |
280 | iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); | 234 | gpio_set_value(pdata->chip_sel[chip_sel], 1); |
235 | } else { | ||
236 | if (value == BITBANG_CS_ACTIVE) { | ||
237 | spidat1 |= SPIDAT1_CSHOLD_MASK; | ||
238 | spidat1 &= ~(0x1 << chip_sel); | ||
239 | } | ||
281 | 240 | ||
282 | while ((ioread32(davinci_spi->base + SPIBUF) | 241 | iowrite16(spidat1, dspi->base + SPIDAT1 + 2); |
283 | & SPIBUF_RXEMPTY_MASK) == 0) | ||
284 | cpu_relax(); | ||
285 | } | 242 | } |
286 | } | 243 | } |
287 | 244 | ||
288 | /** | 245 | /** |
246 | * davinci_spi_get_prescale - Calculates the correct prescale value | ||
247 | * @maxspeed_hz: the maximum rate the SPI clock can run at | ||
248 | * | ||
249 | * This function calculates the prescale value that generates a clock rate | ||
250 | * less than or equal to the specified maximum. | ||
251 | * | ||
252 | * Returns: calculated prescale - 1 for easy programming into SPI registers | ||
253 | * or negative error number if valid prescalar cannot be updated. | ||
254 | */ | ||
255 | static inline int davinci_spi_get_prescale(struct davinci_spi *dspi, | ||
256 | u32 max_speed_hz) | ||
257 | { | ||
258 | int ret; | ||
259 | |||
260 | ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz); | ||
261 | |||
262 | if (ret < 3 || ret > 256) | ||
263 | return -EINVAL; | ||
264 | |||
265 | return ret - 1; | ||
266 | } | ||
267 | |||
268 | /** | ||
289 | * davinci_spi_setup_transfer - This functions will determine transfer method | 269 | * davinci_spi_setup_transfer - This functions will determine transfer method |
290 | * @spi: spi device on which data transfer to be done | 270 | * @spi: spi device on which data transfer to be done |
291 | * @t: spi transfer in which transfer info is filled | 271 | * @t: spi transfer in which transfer info is filled |
@@ -298,13 +278,15 @@ static int davinci_spi_setup_transfer(struct spi_device *spi, | |||
298 | struct spi_transfer *t) | 278 | struct spi_transfer *t) |
299 | { | 279 | { |
300 | 280 | ||
301 | struct davinci_spi *davinci_spi; | 281 | struct davinci_spi *dspi; |
302 | struct davinci_spi_platform_data *pdata; | 282 | struct davinci_spi_config *spicfg; |
303 | u8 bits_per_word = 0; | 283 | u8 bits_per_word = 0; |
304 | u32 hz = 0, prescale = 0, clkspeed; | 284 | u32 hz = 0, spifmt = 0, prescale = 0; |
305 | 285 | ||
306 | davinci_spi = spi_master_get_devdata(spi->master); | 286 | dspi = spi_master_get_devdata(spi->master); |
307 | pdata = davinci_spi->pdata; | 287 | spicfg = (struct davinci_spi_config *)spi->controller_data; |
288 | if (!spicfg) | ||
289 | spicfg = &davinci_spi_default_cfg; | ||
308 | 290 | ||
309 | if (t) { | 291 | if (t) { |
310 | bits_per_word = t->bits_per_word; | 292 | bits_per_word = t->bits_per_word; |
@@ -320,111 +302,83 @@ static int davinci_spi_setup_transfer(struct spi_device *spi, | |||
320 | * 8bit, 16bit or 32bit transfer | 302 | * 8bit, 16bit or 32bit transfer |
321 | */ | 303 | */ |
322 | if (bits_per_word <= 8 && bits_per_word >= 2) { | 304 | if (bits_per_word <= 8 && bits_per_word >= 2) { |
323 | davinci_spi->get_rx = davinci_spi_rx_buf_u8; | 305 | dspi->get_rx = davinci_spi_rx_buf_u8; |
324 | davinci_spi->get_tx = davinci_spi_tx_buf_u8; | 306 | dspi->get_tx = davinci_spi_tx_buf_u8; |
325 | davinci_spi->slave[spi->chip_select].bytes_per_word = 1; | 307 | dspi->bytes_per_word[spi->chip_select] = 1; |
326 | } else if (bits_per_word <= 16 && bits_per_word >= 2) { | 308 | } else if (bits_per_word <= 16 && bits_per_word >= 2) { |
327 | davinci_spi->get_rx = davinci_spi_rx_buf_u16; | 309 | dspi->get_rx = davinci_spi_rx_buf_u16; |
328 | davinci_spi->get_tx = davinci_spi_tx_buf_u16; | 310 | dspi->get_tx = davinci_spi_tx_buf_u16; |
329 | davinci_spi->slave[spi->chip_select].bytes_per_word = 2; | 311 | dspi->bytes_per_word[spi->chip_select] = 2; |
330 | } else | 312 | } else |
331 | return -EINVAL; | 313 | return -EINVAL; |
332 | 314 | ||
333 | if (!hz) | 315 | if (!hz) |
334 | hz = spi->max_speed_hz; | 316 | hz = spi->max_speed_hz; |
335 | 317 | ||
336 | clear_fmt_bits(davinci_spi->base, SPIFMT_CHARLEN_MASK, | 318 | /* Set up SPIFMTn register, unique to this chipselect. */ |
337 | spi->chip_select); | ||
338 | set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f, | ||
339 | spi->chip_select); | ||
340 | 319 | ||
341 | clkspeed = clk_get_rate(davinci_spi->clk); | 320 | prescale = davinci_spi_get_prescale(dspi, hz); |
342 | if (hz > clkspeed / 2) | 321 | if (prescale < 0) |
343 | prescale = 1 << 8; | 322 | return prescale; |
344 | if (hz < clkspeed / 256) | ||
345 | prescale = 255 << 8; | ||
346 | if (!prescale) | ||
347 | prescale = ((clkspeed / hz - 1) << 8) & 0x0000ff00; | ||
348 | 323 | ||
349 | clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select); | 324 | spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f); |
350 | set_fmt_bits(davinci_spi->base, prescale, spi->chip_select); | ||
351 | 325 | ||
352 | return 0; | 326 | if (spi->mode & SPI_LSB_FIRST) |
353 | } | 327 | spifmt |= SPIFMT_SHIFTDIR_MASK; |
354 | 328 | ||
355 | static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data) | 329 | if (spi->mode & SPI_CPOL) |
356 | { | 330 | spifmt |= SPIFMT_POLARITY_MASK; |
357 | struct spi_device *spi = (struct spi_device *)data; | ||
358 | struct davinci_spi *davinci_spi; | ||
359 | struct davinci_spi_dma *davinci_spi_dma; | ||
360 | struct davinci_spi_platform_data *pdata; | ||
361 | 331 | ||
362 | davinci_spi = spi_master_get_devdata(spi->master); | 332 | if (!(spi->mode & SPI_CPHA)) |
363 | davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); | 333 | spifmt |= SPIFMT_PHASE_MASK; |
364 | pdata = davinci_spi->pdata; | ||
365 | 334 | ||
366 | if (ch_status == DMA_COMPLETE) | 335 | /* |
367 | edma_stop(davinci_spi_dma->dma_rx_channel); | 336 | * Version 1 hardware supports two basic SPI modes: |
368 | else | 337 | * - Standard SPI mode uses 4 pins, with chipselect |
369 | edma_clean_channel(davinci_spi_dma->dma_rx_channel); | 338 | * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS) |
339 | * (distinct from SPI_3WIRE, with just one data wire; | ||
340 | * or similar variants without MOSI or without MISO) | ||
341 | * | ||
342 | * Version 2 hardware supports an optional handshaking signal, | ||
343 | * so it can support two more modes: | ||
344 | * - 5 pin SPI variant is standard SPI plus SPI_READY | ||
345 | * - 4 pin with enable is (SPI_READY | SPI_NO_CS) | ||
346 | */ | ||
370 | 347 | ||
371 | complete(&davinci_spi_dma->dma_rx_completion); | 348 | if (dspi->version == SPI_VERSION_2) { |
372 | /* We must disable the DMA RX request */ | ||
373 | davinci_spi_set_dma_req(spi, 0); | ||
374 | } | ||
375 | 349 | ||
376 | static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data) | 350 | u32 delay = 0; |
377 | { | ||
378 | struct spi_device *spi = (struct spi_device *)data; | ||
379 | struct davinci_spi *davinci_spi; | ||
380 | struct davinci_spi_dma *davinci_spi_dma; | ||
381 | struct davinci_spi_platform_data *pdata; | ||
382 | 351 | ||
383 | davinci_spi = spi_master_get_devdata(spi->master); | 352 | spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT) |
384 | davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); | 353 | & SPIFMT_WDELAY_MASK); |
385 | pdata = davinci_spi->pdata; | ||
386 | 354 | ||
387 | if (ch_status == DMA_COMPLETE) | 355 | if (spicfg->odd_parity) |
388 | edma_stop(davinci_spi_dma->dma_tx_channel); | 356 | spifmt |= SPIFMT_ODD_PARITY_MASK; |
389 | else | ||
390 | edma_clean_channel(davinci_spi_dma->dma_tx_channel); | ||
391 | 357 | ||
392 | complete(&davinci_spi_dma->dma_tx_completion); | 358 | if (spicfg->parity_enable) |
393 | /* We must disable the DMA TX request */ | 359 | spifmt |= SPIFMT_PARITYENA_MASK; |
394 | davinci_spi_set_dma_req(spi, 0); | ||
395 | } | ||
396 | 360 | ||
397 | static int davinci_spi_request_dma(struct spi_device *spi) | 361 | if (spicfg->timer_disable) { |
398 | { | 362 | spifmt |= SPIFMT_DISTIMER_MASK; |
399 | struct davinci_spi *davinci_spi; | 363 | } else { |
400 | struct davinci_spi_dma *davinci_spi_dma; | 364 | delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT) |
401 | struct davinci_spi_platform_data *pdata; | 365 | & SPIDELAY_C2TDELAY_MASK; |
402 | struct device *sdev; | 366 | delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT) |
403 | int r; | 367 | & SPIDELAY_T2CDELAY_MASK; |
368 | } | ||
404 | 369 | ||
405 | davinci_spi = spi_master_get_devdata(spi->master); | 370 | if (spi->mode & SPI_READY) { |
406 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | 371 | spifmt |= SPIFMT_WAITENA_MASK; |
407 | pdata = davinci_spi->pdata; | 372 | delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT) |
408 | sdev = davinci_spi->bitbang.master->dev.parent; | 373 | & SPIDELAY_T2EDELAY_MASK; |
374 | delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT) | ||
375 | & SPIDELAY_C2EDELAY_MASK; | ||
376 | } | ||
409 | 377 | ||
410 | r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev, | 378 | iowrite32(delay, dspi->base + SPIDELAY); |
411 | davinci_spi_dma_rx_callback, spi, | ||
412 | davinci_spi_dma->eventq); | ||
413 | if (r < 0) { | ||
414 | dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n"); | ||
415 | return -EAGAIN; | ||
416 | } | ||
417 | davinci_spi_dma->dma_rx_channel = r; | ||
418 | r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev, | ||
419 | davinci_spi_dma_tx_callback, spi, | ||
420 | davinci_spi_dma->eventq); | ||
421 | if (r < 0) { | ||
422 | edma_free_channel(davinci_spi_dma->dma_rx_channel); | ||
423 | davinci_spi_dma->dma_rx_channel = -1; | ||
424 | dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n"); | ||
425 | return -EAGAIN; | ||
426 | } | 379 | } |
427 | davinci_spi_dma->dma_tx_channel = r; | 380 | |
381 | iowrite32(spifmt, dspi->base + SPIFMT0); | ||
428 | 382 | ||
429 | return 0; | 383 | return 0; |
430 | } | 384 | } |
@@ -435,190 +389,40 @@ static int davinci_spi_request_dma(struct spi_device *spi) | |||
435 | * | 389 | * |
436 | * This functions sets the default transfer method. | 390 | * This functions sets the default transfer method. |
437 | */ | 391 | */ |
438 | |||
439 | static int davinci_spi_setup(struct spi_device *spi) | 392 | static int davinci_spi_setup(struct spi_device *spi) |
440 | { | 393 | { |
441 | int retval; | 394 | int retval = 0; |
442 | struct davinci_spi *davinci_spi; | 395 | struct davinci_spi *dspi; |
443 | struct davinci_spi_dma *davinci_spi_dma; | 396 | struct davinci_spi_platform_data *pdata; |
444 | struct device *sdev; | ||
445 | 397 | ||
446 | davinci_spi = spi_master_get_devdata(spi->master); | 398 | dspi = spi_master_get_devdata(spi->master); |
447 | sdev = davinci_spi->bitbang.master->dev.parent; | 399 | pdata = dspi->pdata; |
448 | 400 | ||
449 | /* if bits per word length is zero then set it default 8 */ | 401 | /* if bits per word length is zero then set it default 8 */ |
450 | if (!spi->bits_per_word) | 402 | if (!spi->bits_per_word) |
451 | spi->bits_per_word = 8; | 403 | spi->bits_per_word = 8; |
452 | 404 | ||
453 | davinci_spi->slave[spi->chip_select].cmd_to_write = 0; | 405 | if (!(spi->mode & SPI_NO_CS)) { |
454 | 406 | if ((pdata->chip_sel == NULL) || | |
455 | if (use_dma && davinci_spi->dma_channels) { | 407 | (pdata->chip_sel[spi->chip_select] == SPI_INTERN_CS)) |
456 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | 408 | set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select); |
457 | |||
458 | if ((davinci_spi_dma->dma_rx_channel == -1) | ||
459 | || (davinci_spi_dma->dma_tx_channel == -1)) { | ||
460 | retval = davinci_spi_request_dma(spi); | ||
461 | if (retval < 0) | ||
462 | return retval; | ||
463 | } | ||
464 | } | ||
465 | |||
466 | /* | ||
467 | * SPI in DaVinci and DA8xx operate between | ||
468 | * 600 KHz and 50 MHz | ||
469 | */ | ||
470 | if (spi->max_speed_hz < 600000 || spi->max_speed_hz > 50000000) { | ||
471 | dev_dbg(sdev, "Operating frequency is not in acceptable " | ||
472 | "range\n"); | ||
473 | return -EINVAL; | ||
474 | } | ||
475 | |||
476 | /* | ||
477 | * Set up SPIFMTn register, unique to this chipselect. | ||
478 | * | ||
479 | * NOTE: we could do all of these with one write. Also, some | ||
480 | * of the "version 2" features are found in chips that don't | ||
481 | * support all of them... | ||
482 | */ | ||
483 | if (spi->mode & SPI_LSB_FIRST) | ||
484 | set_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK, | ||
485 | spi->chip_select); | ||
486 | else | ||
487 | clear_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK, | ||
488 | spi->chip_select); | ||
489 | |||
490 | if (spi->mode & SPI_CPOL) | ||
491 | set_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK, | ||
492 | spi->chip_select); | ||
493 | else | ||
494 | clear_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK, | ||
495 | spi->chip_select); | ||
496 | |||
497 | if (!(spi->mode & SPI_CPHA)) | ||
498 | set_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK, | ||
499 | spi->chip_select); | ||
500 | else | ||
501 | clear_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK, | ||
502 | spi->chip_select); | ||
503 | |||
504 | /* | ||
505 | * Version 1 hardware supports two basic SPI modes: | ||
506 | * - Standard SPI mode uses 4 pins, with chipselect | ||
507 | * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS) | ||
508 | * (distinct from SPI_3WIRE, with just one data wire; | ||
509 | * or similar variants without MOSI or without MISO) | ||
510 | * | ||
511 | * Version 2 hardware supports an optional handshaking signal, | ||
512 | * so it can support two more modes: | ||
513 | * - 5 pin SPI variant is standard SPI plus SPI_READY | ||
514 | * - 4 pin with enable is (SPI_READY | SPI_NO_CS) | ||
515 | */ | ||
516 | 409 | ||
517 | if (davinci_spi->version == SPI_VERSION_2) { | ||
518 | clear_fmt_bits(davinci_spi->base, SPIFMT_WDELAY_MASK, | ||
519 | spi->chip_select); | ||
520 | set_fmt_bits(davinci_spi->base, | ||
521 | (davinci_spi->pdata->wdelay | ||
522 | << SPIFMT_WDELAY_SHIFT) | ||
523 | & SPIFMT_WDELAY_MASK, | ||
524 | spi->chip_select); | ||
525 | |||
526 | if (davinci_spi->pdata->odd_parity) | ||
527 | set_fmt_bits(davinci_spi->base, | ||
528 | SPIFMT_ODD_PARITY_MASK, | ||
529 | spi->chip_select); | ||
530 | else | ||
531 | clear_fmt_bits(davinci_spi->base, | ||
532 | SPIFMT_ODD_PARITY_MASK, | ||
533 | spi->chip_select); | ||
534 | |||
535 | if (davinci_spi->pdata->parity_enable) | ||
536 | set_fmt_bits(davinci_spi->base, | ||
537 | SPIFMT_PARITYENA_MASK, | ||
538 | spi->chip_select); | ||
539 | else | ||
540 | clear_fmt_bits(davinci_spi->base, | ||
541 | SPIFMT_PARITYENA_MASK, | ||
542 | spi->chip_select); | ||
543 | |||
544 | if (davinci_spi->pdata->wait_enable) | ||
545 | set_fmt_bits(davinci_spi->base, | ||
546 | SPIFMT_WAITENA_MASK, | ||
547 | spi->chip_select); | ||
548 | else | ||
549 | clear_fmt_bits(davinci_spi->base, | ||
550 | SPIFMT_WAITENA_MASK, | ||
551 | spi->chip_select); | ||
552 | |||
553 | if (davinci_spi->pdata->timer_disable) | ||
554 | set_fmt_bits(davinci_spi->base, | ||
555 | SPIFMT_DISTIMER_MASK, | ||
556 | spi->chip_select); | ||
557 | else | ||
558 | clear_fmt_bits(davinci_spi->base, | ||
559 | SPIFMT_DISTIMER_MASK, | ||
560 | spi->chip_select); | ||
561 | } | 410 | } |
562 | 411 | ||
563 | retval = davinci_spi_setup_transfer(spi, NULL); | ||
564 | |||
565 | return retval; | ||
566 | } | ||
567 | |||
568 | static void davinci_spi_cleanup(struct spi_device *spi) | ||
569 | { | ||
570 | struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master); | ||
571 | struct davinci_spi_dma *davinci_spi_dma; | ||
572 | |||
573 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | ||
574 | |||
575 | if (use_dma && davinci_spi->dma_channels) { | ||
576 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | ||
577 | |||
578 | if ((davinci_spi_dma->dma_rx_channel != -1) | ||
579 | && (davinci_spi_dma->dma_tx_channel != -1)) { | ||
580 | edma_free_channel(davinci_spi_dma->dma_tx_channel); | ||
581 | edma_free_channel(davinci_spi_dma->dma_rx_channel); | ||
582 | } | ||
583 | } | ||
584 | } | ||
585 | |||
586 | static int davinci_spi_bufs_prep(struct spi_device *spi, | ||
587 | struct davinci_spi *davinci_spi) | ||
588 | { | ||
589 | int op_mode = 0; | ||
590 | |||
591 | /* | ||
592 | * REVISIT unless devices disagree about SPI_LOOP or | ||
593 | * SPI_READY (SPI_NO_CS only allows one device!), this | ||
594 | * should not need to be done before each message... | ||
595 | * optimize for both flags staying cleared. | ||
596 | */ | ||
597 | |||
598 | op_mode = SPIPC0_DIFUN_MASK | ||
599 | | SPIPC0_DOFUN_MASK | ||
600 | | SPIPC0_CLKFUN_MASK; | ||
601 | if (!(spi->mode & SPI_NO_CS)) | ||
602 | op_mode |= 1 << spi->chip_select; | ||
603 | if (spi->mode & SPI_READY) | 412 | if (spi->mode & SPI_READY) |
604 | op_mode |= SPIPC0_SPIENA_MASK; | 413 | set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK); |
605 | |||
606 | iowrite32(op_mode, davinci_spi->base + SPIPC0); | ||
607 | 414 | ||
608 | if (spi->mode & SPI_LOOP) | 415 | if (spi->mode & SPI_LOOP) |
609 | set_io_bits(davinci_spi->base + SPIGCR1, | 416 | set_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); |
610 | SPIGCR1_LOOPBACK_MASK); | ||
611 | else | 417 | else |
612 | clear_io_bits(davinci_spi->base + SPIGCR1, | 418 | clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); |
613 | SPIGCR1_LOOPBACK_MASK); | ||
614 | 419 | ||
615 | return 0; | 420 | return retval; |
616 | } | 421 | } |
617 | 422 | ||
618 | static int davinci_spi_check_error(struct davinci_spi *davinci_spi, | 423 | static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status) |
619 | int int_status) | ||
620 | { | 424 | { |
621 | struct device *sdev = davinci_spi->bitbang.master->dev.parent; | 425 | struct device *sdev = dspi->bitbang.master->dev.parent; |
622 | 426 | ||
623 | if (int_status & SPIFLG_TIMEOUT_MASK) { | 427 | if (int_status & SPIFLG_TIMEOUT_MASK) { |
624 | dev_dbg(sdev, "SPI Time-out Error\n"); | 428 | dev_dbg(sdev, "SPI Time-out Error\n"); |
@@ -633,7 +437,7 @@ static int davinci_spi_check_error(struct davinci_spi *davinci_spi, | |||
633 | return -EIO; | 437 | return -EIO; |
634 | } | 438 | } |
635 | 439 | ||
636 | if (davinci_spi->version == SPI_VERSION_2) { | 440 | if (dspi->version == SPI_VERSION_2) { |
637 | if (int_status & SPIFLG_DLEN_ERR_MASK) { | 441 | if (int_status & SPIFLG_DLEN_ERR_MASK) { |
638 | dev_dbg(sdev, "SPI Data Length Error\n"); | 442 | dev_dbg(sdev, "SPI Data Length Error\n"); |
639 | return -EIO; | 443 | return -EIO; |
@@ -646,10 +450,6 @@ static int davinci_spi_check_error(struct davinci_spi *davinci_spi, | |||
646 | dev_dbg(sdev, "SPI Data Overrun error\n"); | 450 | dev_dbg(sdev, "SPI Data Overrun error\n"); |
647 | return -EIO; | 451 | return -EIO; |
648 | } | 452 | } |
649 | if (int_status & SPIFLG_TX_INTR_MASK) { | ||
650 | dev_dbg(sdev, "SPI TX intr bit set\n"); | ||
651 | return -EIO; | ||
652 | } | ||
653 | if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) { | 453 | if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) { |
654 | dev_dbg(sdev, "SPI Buffer Init Active\n"); | 454 | dev_dbg(sdev, "SPI Buffer Init Active\n"); |
655 | return -EBUSY; | 455 | return -EBUSY; |
@@ -660,366 +460,355 @@ static int davinci_spi_check_error(struct davinci_spi *davinci_spi, | |||
660 | } | 460 | } |
661 | 461 | ||
662 | /** | 462 | /** |
663 | * davinci_spi_bufs - functions which will handle transfer data | 463 | * davinci_spi_process_events - check for and handle any SPI controller events |
664 | * @spi: spi device on which data transfer to be done | 464 | * @dspi: the controller data |
665 | * @t: spi transfer in which transfer info is filled | ||
666 | * | 465 | * |
667 | * This function will put data to be transferred into data register | 466 | * This function will check the SPIFLG register and handle any events that are |
668 | * of SPI controller and then wait until the completion will be marked | 467 | * detected there |
669 | * by the IRQ Handler. | ||
670 | */ | 468 | */ |
671 | static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t) | 469 | static int davinci_spi_process_events(struct davinci_spi *dspi) |
672 | { | 470 | { |
673 | struct davinci_spi *davinci_spi; | 471 | u32 buf, status, errors = 0, spidat1; |
674 | int int_status, count, ret; | ||
675 | u8 conv, tmp; | ||
676 | u32 tx_data, data1_reg_val; | ||
677 | u32 buf_val, flg_val; | ||
678 | struct davinci_spi_platform_data *pdata; | ||
679 | 472 | ||
680 | davinci_spi = spi_master_get_devdata(spi->master); | 473 | buf = ioread32(dspi->base + SPIBUF); |
681 | pdata = davinci_spi->pdata; | ||
682 | 474 | ||
683 | davinci_spi->tx = t->tx_buf; | 475 | if (dspi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) { |
684 | davinci_spi->rx = t->rx_buf; | 476 | dspi->get_rx(buf & 0xFFFF, dspi); |
685 | 477 | dspi->rcount--; | |
686 | /* convert len to words based on bits_per_word */ | 478 | } |
687 | conv = davinci_spi->slave[spi->chip_select].bytes_per_word; | ||
688 | davinci_spi->count = t->len / conv; | ||
689 | |||
690 | INIT_COMPLETION(davinci_spi->done); | ||
691 | |||
692 | ret = davinci_spi_bufs_prep(spi, davinci_spi); | ||
693 | if (ret) | ||
694 | return ret; | ||
695 | |||
696 | /* Enable SPI */ | ||
697 | set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); | ||
698 | |||
699 | iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) | | ||
700 | (pdata->t2cdelay << SPI_T2CDELAY_SHIFT), | ||
701 | davinci_spi->base + SPIDELAY); | ||
702 | |||
703 | count = davinci_spi->count; | ||
704 | data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT; | ||
705 | tmp = ~(0x1 << spi->chip_select); | ||
706 | |||
707 | clear_io_bits(davinci_spi->base + SPIDEF, ~tmp); | ||
708 | |||
709 | data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT; | ||
710 | |||
711 | while ((ioread32(davinci_spi->base + SPIBUF) | ||
712 | & SPIBUF_RXEMPTY_MASK) == 0) | ||
713 | cpu_relax(); | ||
714 | |||
715 | /* Determine the command to execute READ or WRITE */ | ||
716 | if (t->tx_buf) { | ||
717 | clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); | ||
718 | 479 | ||
719 | while (1) { | 480 | status = ioread32(dspi->base + SPIFLG); |
720 | tx_data = davinci_spi->get_tx(davinci_spi); | ||
721 | 481 | ||
722 | data1_reg_val &= ~(0xFFFF); | 482 | if (unlikely(status & SPIFLG_ERROR_MASK)) { |
723 | data1_reg_val |= (0xFFFF & tx_data); | 483 | errors = status & SPIFLG_ERROR_MASK; |
484 | goto out; | ||
485 | } | ||
724 | 486 | ||
725 | buf_val = ioread32(davinci_spi->base + SPIBUF); | 487 | if (dspi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) { |
726 | if ((buf_val & SPIBUF_TXFULL_MASK) == 0) { | 488 | spidat1 = ioread32(dspi->base + SPIDAT1); |
727 | iowrite32(data1_reg_val, | 489 | dspi->wcount--; |
728 | davinci_spi->base + SPIDAT1); | 490 | spidat1 &= ~0xFFFF; |
491 | spidat1 |= 0xFFFF & dspi->get_tx(dspi); | ||
492 | iowrite32(spidat1, dspi->base + SPIDAT1); | ||
493 | } | ||
729 | 494 | ||
730 | count--; | 495 | out: |
731 | } | 496 | return errors; |
732 | while (ioread32(davinci_spi->base + SPIBUF) | 497 | } |
733 | & SPIBUF_RXEMPTY_MASK) | ||
734 | cpu_relax(); | ||
735 | |||
736 | /* getting the returned byte */ | ||
737 | if (t->rx_buf) { | ||
738 | buf_val = ioread32(davinci_spi->base + SPIBUF); | ||
739 | davinci_spi->get_rx(buf_val, davinci_spi); | ||
740 | } | ||
741 | if (count <= 0) | ||
742 | break; | ||
743 | } | ||
744 | } else { | ||
745 | if (pdata->poll_mode) { | ||
746 | while (1) { | ||
747 | /* keeps the serial clock going */ | ||
748 | if ((ioread32(davinci_spi->base + SPIBUF) | ||
749 | & SPIBUF_TXFULL_MASK) == 0) | ||
750 | iowrite32(data1_reg_val, | ||
751 | davinci_spi->base + SPIDAT1); | ||
752 | |||
753 | while (ioread32(davinci_spi->base + SPIBUF) & | ||
754 | SPIBUF_RXEMPTY_MASK) | ||
755 | cpu_relax(); | ||
756 | |||
757 | flg_val = ioread32(davinci_spi->base + SPIFLG); | ||
758 | buf_val = ioread32(davinci_spi->base + SPIBUF); | ||
759 | |||
760 | davinci_spi->get_rx(buf_val, davinci_spi); | ||
761 | |||
762 | count--; | ||
763 | if (count <= 0) | ||
764 | break; | ||
765 | } | ||
766 | } else { /* Receive in Interrupt mode */ | ||
767 | int i; | ||
768 | 498 | ||
769 | for (i = 0; i < davinci_spi->count; i++) { | 499 | static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data) |
770 | set_io_bits(davinci_spi->base + SPIINT, | 500 | { |
771 | SPIINT_BITERR_INTR | 501 | struct davinci_spi *dspi = data; |
772 | | SPIINT_OVRRUN_INTR | 502 | struct davinci_spi_dma *dma = &dspi->dma; |
773 | | SPIINT_RX_INTR); | ||
774 | 503 | ||
775 | iowrite32(data1_reg_val, | 504 | edma_stop(lch); |
776 | davinci_spi->base + SPIDAT1); | ||
777 | 505 | ||
778 | while (ioread32(davinci_spi->base + SPIINT) & | 506 | if (status == DMA_COMPLETE) { |
779 | SPIINT_RX_INTR) | 507 | if (lch == dma->rx_channel) |
780 | cpu_relax(); | 508 | dspi->rcount = 0; |
781 | } | 509 | if (lch == dma->tx_channel) |
782 | iowrite32((data1_reg_val & 0x0ffcffff), | 510 | dspi->wcount = 0; |
783 | davinci_spi->base + SPIDAT1); | ||
784 | } | ||
785 | } | 511 | } |
786 | 512 | ||
787 | /* | 513 | if ((!dspi->wcount && !dspi->rcount) || (status != DMA_COMPLETE)) |
788 | * Check for bit error, desync error,parity error,timeout error and | 514 | complete(&dspi->done); |
789 | * receive overflow errors | ||
790 | */ | ||
791 | int_status = ioread32(davinci_spi->base + SPIFLG); | ||
792 | |||
793 | ret = davinci_spi_check_error(davinci_spi, int_status); | ||
794 | if (ret != 0) | ||
795 | return ret; | ||
796 | |||
797 | /* SPI Framework maintains the count only in bytes so convert back */ | ||
798 | davinci_spi->count *= conv; | ||
799 | |||
800 | return t->len; | ||
801 | } | 515 | } |
802 | 516 | ||
803 | #define DAVINCI_DMA_DATA_TYPE_S8 0x01 | 517 | /** |
804 | #define DAVINCI_DMA_DATA_TYPE_S16 0x02 | 518 | * davinci_spi_bufs - functions which will handle transfer data |
805 | #define DAVINCI_DMA_DATA_TYPE_S32 0x04 | 519 | * @spi: spi device on which data transfer to be done |
806 | 520 | * @t: spi transfer in which transfer info is filled | |
807 | static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) | 521 | * |
522 | * This function will put data to be transferred into data register | ||
523 | * of SPI controller and then wait until the completion will be marked | ||
524 | * by the IRQ Handler. | ||
525 | */ | ||
526 | static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) | ||
808 | { | 527 | { |
809 | struct davinci_spi *davinci_spi; | 528 | struct davinci_spi *dspi; |
810 | int int_status = 0; | 529 | int data_type, ret; |
811 | int count, temp_count; | 530 | u32 tx_data, spidat1; |
812 | u8 conv = 1; | 531 | u32 errors = 0; |
813 | u8 tmp; | 532 | struct davinci_spi_config *spicfg; |
814 | u32 data1_reg_val; | ||
815 | struct davinci_spi_dma *davinci_spi_dma; | ||
816 | int word_len, data_type, ret; | ||
817 | unsigned long tx_reg, rx_reg; | ||
818 | struct davinci_spi_platform_data *pdata; | 533 | struct davinci_spi_platform_data *pdata; |
534 | unsigned uninitialized_var(rx_buf_count); | ||
819 | struct device *sdev; | 535 | struct device *sdev; |
820 | 536 | ||
821 | davinci_spi = spi_master_get_devdata(spi->master); | 537 | dspi = spi_master_get_devdata(spi->master); |
822 | pdata = davinci_spi->pdata; | 538 | pdata = dspi->pdata; |
823 | sdev = davinci_spi->bitbang.master->dev.parent; | 539 | spicfg = (struct davinci_spi_config *)spi->controller_data; |
824 | 540 | if (!spicfg) | |
825 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | 541 | spicfg = &davinci_spi_default_cfg; |
826 | 542 | sdev = dspi->bitbang.master->dev.parent; | |
827 | tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1; | ||
828 | rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF; | ||
829 | |||
830 | davinci_spi->tx = t->tx_buf; | ||
831 | davinci_spi->rx = t->rx_buf; | ||
832 | 543 | ||
833 | /* convert len to words based on bits_per_word */ | 544 | /* convert len to words based on bits_per_word */ |
834 | conv = davinci_spi->slave[spi->chip_select].bytes_per_word; | 545 | data_type = dspi->bytes_per_word[spi->chip_select]; |
835 | davinci_spi->count = t->len / conv; | ||
836 | 546 | ||
837 | INIT_COMPLETION(davinci_spi->done); | 547 | dspi->tx = t->tx_buf; |
548 | dspi->rx = t->rx_buf; | ||
549 | dspi->wcount = t->len / data_type; | ||
550 | dspi->rcount = dspi->wcount; | ||
838 | 551 | ||
839 | init_completion(&davinci_spi_dma->dma_rx_completion); | 552 | spidat1 = ioread32(dspi->base + SPIDAT1); |
840 | init_completion(&davinci_spi_dma->dma_tx_completion); | ||
841 | 553 | ||
842 | word_len = conv * 8; | 554 | clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); |
555 | set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); | ||
843 | 556 | ||
844 | if (word_len <= 8) | 557 | INIT_COMPLETION(dspi->done); |
845 | data_type = DAVINCI_DMA_DATA_TYPE_S8; | ||
846 | else if (word_len <= 16) | ||
847 | data_type = DAVINCI_DMA_DATA_TYPE_S16; | ||
848 | else if (word_len <= 32) | ||
849 | data_type = DAVINCI_DMA_DATA_TYPE_S32; | ||
850 | else | ||
851 | return -EINVAL; | ||
852 | |||
853 | ret = davinci_spi_bufs_prep(spi, davinci_spi); | ||
854 | if (ret) | ||
855 | return ret; | ||
856 | |||
857 | /* Put delay val if required */ | ||
858 | iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) | | ||
859 | (pdata->t2cdelay << SPI_T2CDELAY_SHIFT), | ||
860 | davinci_spi->base + SPIDELAY); | ||
861 | |||
862 | count = davinci_spi->count; /* the number of elements */ | ||
863 | data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT; | ||
864 | |||
865 | /* CS default = 0xFF */ | ||
866 | tmp = ~(0x1 << spi->chip_select); | ||
867 | |||
868 | clear_io_bits(davinci_spi->base + SPIDEF, ~tmp); | ||
869 | |||
870 | data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT; | ||
871 | |||
872 | /* disable all interrupts for dma transfers */ | ||
873 | clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); | ||
874 | /* Disable SPI to write configuration bits in SPIDAT */ | ||
875 | clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); | ||
876 | iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); | ||
877 | /* Enable SPI */ | ||
878 | set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); | ||
879 | |||
880 | while ((ioread32(davinci_spi->base + SPIBUF) | ||
881 | & SPIBUF_RXEMPTY_MASK) == 0) | ||
882 | cpu_relax(); | ||
883 | 558 | ||
559 | if (spicfg->io_type == SPI_IO_TYPE_INTR) | ||
560 | set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); | ||
884 | 561 | ||
885 | if (t->tx_buf) { | 562 | if (spicfg->io_type != SPI_IO_TYPE_DMA) { |
886 | t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count, | 563 | /* start the transfer */ |
887 | DMA_TO_DEVICE); | 564 | dspi->wcount--; |
888 | if (dma_mapping_error(&spi->dev, t->tx_dma)) { | 565 | tx_data = dspi->get_tx(dspi); |
889 | dev_dbg(sdev, "Unable to DMA map a %d bytes" | 566 | spidat1 &= 0xFFFF0000; |
890 | " TX buffer\n", count); | 567 | spidat1 |= tx_data & 0xFFFF; |
891 | return -ENOMEM; | 568 | iowrite32(spidat1, dspi->base + SPIDAT1); |
892 | } | ||
893 | temp_count = count; | ||
894 | } else { | 569 | } else { |
895 | /* We need TX clocking for RX transaction */ | 570 | struct davinci_spi_dma *dma; |
896 | t->tx_dma = dma_map_single(&spi->dev, | 571 | unsigned long tx_reg, rx_reg; |
897 | (void *)davinci_spi->tmp_buf, count + 1, | 572 | struct edmacc_param param; |
898 | DMA_TO_DEVICE); | 573 | void *rx_buf; |
899 | if (dma_mapping_error(&spi->dev, t->tx_dma)) { | 574 | int b, c; |
900 | dev_dbg(sdev, "Unable to DMA map a %d bytes" | 575 | |
901 | " TX tmp buffer\n", count); | 576 | dma = &dspi->dma; |
902 | return -ENOMEM; | 577 | |
578 | tx_reg = (unsigned long)dspi->pbase + SPIDAT1; | ||
579 | rx_reg = (unsigned long)dspi->pbase + SPIBUF; | ||
580 | |||
581 | /* | ||
582 | * Transmit DMA setup | ||
583 | * | ||
584 | * If there is transmit data, map the transmit buffer, set it | ||
585 | * as the source of data and set the source B index to data | ||
586 | * size. If there is no transmit data, set the transmit register | ||
587 | * as the source of data, and set the source B index to zero. | ||
588 | * | ||
589 | * The destination is always the transmit register itself. And | ||
590 | * the destination never increments. | ||
591 | */ | ||
592 | |||
593 | if (t->tx_buf) { | ||
594 | t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, | ||
595 | t->len, DMA_TO_DEVICE); | ||
596 | if (dma_mapping_error(&spi->dev, t->tx_dma)) { | ||
597 | dev_dbg(sdev, "Unable to DMA map %d bytes" | ||
598 | "TX buffer\n", t->len); | ||
599 | return -ENOMEM; | ||
600 | } | ||
903 | } | 601 | } |
904 | temp_count = count + 1; | ||
905 | } | ||
906 | |||
907 | edma_set_transfer_params(davinci_spi_dma->dma_tx_channel, | ||
908 | data_type, temp_count, 1, 0, ASYNC); | ||
909 | edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT); | ||
910 | edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT); | ||
911 | edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0); | ||
912 | edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0); | ||
913 | 602 | ||
914 | if (t->rx_buf) { | 603 | /* |
915 | /* initiate transaction */ | 604 | * If number of words is greater than 65535, then we need |
916 | iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); | 605 | * to configure a 3 dimension transfer. Use the BCNTRLD |
606 | * feature to allow for transfers that aren't even multiples | ||
607 | * of 65535 (or any other possible b size) by first transferring | ||
608 | * the remainder amount then grabbing the next N blocks of | ||
609 | * 65535 words. | ||
610 | */ | ||
611 | |||
612 | c = dspi->wcount / (SZ_64K - 1); /* N 65535 Blocks */ | ||
613 | b = dspi->wcount - c * (SZ_64K - 1); /* Remainder */ | ||
614 | if (b) | ||
615 | c++; | ||
616 | else | ||
617 | b = SZ_64K - 1; | ||
618 | |||
619 | param.opt = TCINTEN | EDMA_TCC(dma->tx_channel); | ||
620 | param.src = t->tx_buf ? t->tx_dma : tx_reg; | ||
621 | param.a_b_cnt = b << 16 | data_type; | ||
622 | param.dst = tx_reg; | ||
623 | param.src_dst_bidx = t->tx_buf ? data_type : 0; | ||
624 | param.link_bcntrld = 0xffffffff; | ||
625 | param.src_dst_cidx = t->tx_buf ? data_type : 0; | ||
626 | param.ccnt = c; | ||
627 | edma_write_slot(dma->tx_channel, ¶m); | ||
628 | edma_link(dma->tx_channel, dma->dummy_param_slot); | ||
629 | |||
630 | /* | ||
631 | * Receive DMA setup | ||
632 | * | ||
633 | * If there is receive buffer, use it to receive data. If there | ||
634 | * is none provided, use a temporary receive buffer. Set the | ||
635 | * destination B index to 0 so effectively only one byte is used | ||
636 | * in the temporary buffer (address does not increment). | ||
637 | * | ||
638 | * The source of receive data is the receive data register. The | ||
639 | * source address never increments. | ||
640 | */ | ||
641 | |||
642 | if (t->rx_buf) { | ||
643 | rx_buf = t->rx_buf; | ||
644 | rx_buf_count = t->len; | ||
645 | } else { | ||
646 | rx_buf = dspi->rx_tmp_buf; | ||
647 | rx_buf_count = sizeof(dspi->rx_tmp_buf); | ||
648 | } | ||
917 | 649 | ||
918 | t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count, | 650 | t->rx_dma = dma_map_single(&spi->dev, rx_buf, rx_buf_count, |
919 | DMA_FROM_DEVICE); | 651 | DMA_FROM_DEVICE); |
920 | if (dma_mapping_error(&spi->dev, t->rx_dma)) { | 652 | if (dma_mapping_error(&spi->dev, t->rx_dma)) { |
921 | dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", | 653 | dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", |
922 | count); | 654 | rx_buf_count); |
923 | if (t->tx_buf != NULL) | 655 | if (t->tx_buf) |
924 | dma_unmap_single(NULL, t->tx_dma, | 656 | dma_unmap_single(NULL, t->tx_dma, t->len, |
925 | count, DMA_TO_DEVICE); | 657 | DMA_TO_DEVICE); |
926 | return -ENOMEM; | 658 | return -ENOMEM; |
927 | } | 659 | } |
928 | edma_set_transfer_params(davinci_spi_dma->dma_rx_channel, | ||
929 | data_type, count, 1, 0, ASYNC); | ||
930 | edma_set_src(davinci_spi_dma->dma_rx_channel, | ||
931 | rx_reg, INCR, W8BIT); | ||
932 | edma_set_dest(davinci_spi_dma->dma_rx_channel, | ||
933 | t->rx_dma, INCR, W8BIT); | ||
934 | edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0); | ||
935 | edma_set_dest_index(davinci_spi_dma->dma_rx_channel, | ||
936 | data_type, 0); | ||
937 | } | ||
938 | 660 | ||
939 | if ((t->tx_buf) || (t->rx_buf)) | 661 | param.opt = TCINTEN | EDMA_TCC(dma->rx_channel); |
940 | edma_start(davinci_spi_dma->dma_tx_channel); | 662 | param.src = rx_reg; |
663 | param.a_b_cnt = b << 16 | data_type; | ||
664 | param.dst = t->rx_dma; | ||
665 | param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16; | ||
666 | param.link_bcntrld = 0xffffffff; | ||
667 | param.src_dst_cidx = (t->rx_buf ? data_type : 0) << 16; | ||
668 | param.ccnt = c; | ||
669 | edma_write_slot(dma->rx_channel, ¶m); | ||
670 | |||
671 | if (pdata->cshold_bug) | ||
672 | iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2); | ||
673 | |||
674 | edma_start(dma->rx_channel); | ||
675 | edma_start(dma->tx_channel); | ||
676 | set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); | ||
677 | } | ||
941 | 678 | ||
942 | if (t->rx_buf) | 679 | /* Wait for the transfer to complete */ |
943 | edma_start(davinci_spi_dma->dma_rx_channel); | 680 | if (spicfg->io_type != SPI_IO_TYPE_POLL) { |
681 | wait_for_completion_interruptible(&(dspi->done)); | ||
682 | } else { | ||
683 | while (dspi->rcount > 0 || dspi->wcount > 0) { | ||
684 | errors = davinci_spi_process_events(dspi); | ||
685 | if (errors) | ||
686 | break; | ||
687 | cpu_relax(); | ||
688 | } | ||
689 | } | ||
944 | 690 | ||
945 | if ((t->rx_buf) || (t->tx_buf)) | 691 | clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL); |
946 | davinci_spi_set_dma_req(spi, 1); | 692 | if (spicfg->io_type == SPI_IO_TYPE_DMA) { |
947 | 693 | ||
948 | if (t->tx_buf) | 694 | if (t->tx_buf) |
949 | wait_for_completion_interruptible( | 695 | dma_unmap_single(NULL, t->tx_dma, t->len, |
950 | &davinci_spi_dma->dma_tx_completion); | 696 | DMA_TO_DEVICE); |
951 | 697 | ||
952 | if (t->rx_buf) | 698 | dma_unmap_single(NULL, t->rx_dma, rx_buf_count, |
953 | wait_for_completion_interruptible( | 699 | DMA_FROM_DEVICE); |
954 | &davinci_spi_dma->dma_rx_completion); | ||
955 | 700 | ||
956 | dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE); | 701 | clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); |
702 | } | ||
957 | 703 | ||
958 | if (t->rx_buf) | 704 | clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); |
959 | dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE); | 705 | set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); |
960 | 706 | ||
961 | /* | 707 | /* |
962 | * Check for bit error, desync error,parity error,timeout error and | 708 | * Check for bit error, desync error,parity error,timeout error and |
963 | * receive overflow errors | 709 | * receive overflow errors |
964 | */ | 710 | */ |
965 | int_status = ioread32(davinci_spi->base + SPIFLG); | 711 | if (errors) { |
966 | 712 | ret = davinci_spi_check_error(dspi, errors); | |
967 | ret = davinci_spi_check_error(davinci_spi, int_status); | 713 | WARN(!ret, "%s: error reported but no error found!\n", |
968 | if (ret != 0) | 714 | dev_name(&spi->dev)); |
969 | return ret; | 715 | return ret; |
716 | } | ||
970 | 717 | ||
971 | /* SPI Framework maintains the count only in bytes so convert back */ | 718 | if (dspi->rcount != 0 || dspi->wcount != 0) { |
972 | davinci_spi->count *= conv; | 719 | dev_err(sdev, "SPI data transfer error\n"); |
720 | return -EIO; | ||
721 | } | ||
973 | 722 | ||
974 | return t->len; | 723 | return t->len; |
975 | } | 724 | } |
976 | 725 | ||
977 | /** | 726 | /** |
978 | * davinci_spi_irq - IRQ handler for DaVinci SPI | 727 | * davinci_spi_irq - Interrupt handler for SPI Master Controller |
979 | * @irq: IRQ number for this SPI Master | 728 | * @irq: IRQ number for this SPI Master |
980 | * @context_data: structure for SPI Master controller davinci_spi | 729 | * @context_data: structure for SPI Master controller davinci_spi |
730 | * | ||
731 | * ISR will determine that interrupt arrives either for READ or WRITE command. | ||
732 | * According to command it will do the appropriate action. It will check | ||
733 | * transfer length and if it is not zero then dispatch transfer command again. | ||
734 | * If transfer length is zero then it will indicate the COMPLETION so that | ||
735 | * davinci_spi_bufs function can go ahead. | ||
981 | */ | 736 | */ |
982 | static irqreturn_t davinci_spi_irq(s32 irq, void *context_data) | 737 | static irqreturn_t davinci_spi_irq(s32 irq, void *data) |
983 | { | 738 | { |
984 | struct davinci_spi *davinci_spi = context_data; | 739 | struct davinci_spi *dspi = data; |
985 | u32 int_status, rx_data = 0; | 740 | int status; |
986 | irqreturn_t ret = IRQ_NONE; | ||
987 | 741 | ||
988 | int_status = ioread32(davinci_spi->base + SPIFLG); | 742 | status = davinci_spi_process_events(dspi); |
743 | if (unlikely(status != 0)) | ||
744 | clear_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); | ||
989 | 745 | ||
990 | while ((int_status & SPIFLG_RX_INTR_MASK)) { | 746 | if ((!dspi->rcount && !dspi->wcount) || status) |
991 | if (likely(int_status & SPIFLG_RX_INTR_MASK)) { | 747 | complete(&dspi->done); |
992 | ret = IRQ_HANDLED; | ||
993 | 748 | ||
994 | rx_data = ioread32(davinci_spi->base + SPIBUF); | 749 | return IRQ_HANDLED; |
995 | davinci_spi->get_rx(rx_data, davinci_spi); | 750 | } |
996 | 751 | ||
997 | /* Disable Receive Interrupt */ | 752 | static int davinci_spi_request_dma(struct davinci_spi *dspi) |
998 | iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR), | 753 | { |
999 | davinci_spi->base + SPIINT); | 754 | int r; |
1000 | } else | 755 | struct davinci_spi_dma *dma = &dspi->dma; |
1001 | (void)davinci_spi_check_error(davinci_spi, int_status); | ||
1002 | 756 | ||
1003 | int_status = ioread32(davinci_spi->base + SPIFLG); | 757 | r = edma_alloc_channel(dma->rx_channel, davinci_spi_dma_callback, dspi, |
758 | dma->eventq); | ||
759 | if (r < 0) { | ||
760 | pr_err("Unable to request DMA channel for SPI RX\n"); | ||
761 | r = -EAGAIN; | ||
762 | goto rx_dma_failed; | ||
1004 | } | 763 | } |
1005 | 764 | ||
1006 | return ret; | 765 | r = edma_alloc_channel(dma->tx_channel, davinci_spi_dma_callback, dspi, |
766 | dma->eventq); | ||
767 | if (r < 0) { | ||
768 | pr_err("Unable to request DMA channel for SPI TX\n"); | ||
769 | r = -EAGAIN; | ||
770 | goto tx_dma_failed; | ||
771 | } | ||
772 | |||
773 | r = edma_alloc_slot(EDMA_CTLR(dma->tx_channel), EDMA_SLOT_ANY); | ||
774 | if (r < 0) { | ||
775 | pr_err("Unable to request SPI TX DMA param slot\n"); | ||
776 | r = -EAGAIN; | ||
777 | goto param_failed; | ||
778 | } | ||
779 | dma->dummy_param_slot = r; | ||
780 | edma_link(dma->dummy_param_slot, dma->dummy_param_slot); | ||
781 | |||
782 | return 0; | ||
783 | param_failed: | ||
784 | edma_free_channel(dma->tx_channel); | ||
785 | tx_dma_failed: | ||
786 | edma_free_channel(dma->rx_channel); | ||
787 | rx_dma_failed: | ||
788 | return r; | ||
1007 | } | 789 | } |
1008 | 790 | ||
1009 | /** | 791 | /** |
1010 | * davinci_spi_probe - probe function for SPI Master Controller | 792 | * davinci_spi_probe - probe function for SPI Master Controller |
1011 | * @pdev: platform_device structure which contains plateform specific data | 793 | * @pdev: platform_device structure which contains plateform specific data |
794 | * | ||
795 | * According to Linux Device Model this function will be invoked by Linux | ||
796 | * with platform_device struct which contains the device specific info. | ||
797 | * This function will map the SPI controller's memory, register IRQ, | ||
798 | * Reset SPI controller and setting its registers to default value. | ||
799 | * It will invoke spi_bitbang_start to create work queue so that client driver | ||
800 | * can register transfer method to work queue. | ||
1012 | */ | 801 | */ |
1013 | static int davinci_spi_probe(struct platform_device *pdev) | 802 | static int davinci_spi_probe(struct platform_device *pdev) |
1014 | { | 803 | { |
1015 | struct spi_master *master; | 804 | struct spi_master *master; |
1016 | struct davinci_spi *davinci_spi; | 805 | struct davinci_spi *dspi; |
1017 | struct davinci_spi_platform_data *pdata; | 806 | struct davinci_spi_platform_data *pdata; |
1018 | struct resource *r, *mem; | 807 | struct resource *r, *mem; |
1019 | resource_size_t dma_rx_chan = SPI_NO_RESOURCE; | 808 | resource_size_t dma_rx_chan = SPI_NO_RESOURCE; |
1020 | resource_size_t dma_tx_chan = SPI_NO_RESOURCE; | 809 | resource_size_t dma_tx_chan = SPI_NO_RESOURCE; |
1021 | resource_size_t dma_eventq = SPI_NO_RESOURCE; | ||
1022 | int i = 0, ret = 0; | 810 | int i = 0, ret = 0; |
811 | u32 spipc0; | ||
1023 | 812 | ||
1024 | pdata = pdev->dev.platform_data; | 813 | pdata = pdev->dev.platform_data; |
1025 | if (pdata == NULL) { | 814 | if (pdata == NULL) { |
@@ -1035,8 +824,8 @@ static int davinci_spi_probe(struct platform_device *pdev) | |||
1035 | 824 | ||
1036 | dev_set_drvdata(&pdev->dev, master); | 825 | dev_set_drvdata(&pdev->dev, master); |
1037 | 826 | ||
1038 | davinci_spi = spi_master_get_devdata(master); | 827 | dspi = spi_master_get_devdata(master); |
1039 | if (davinci_spi == NULL) { | 828 | if (dspi == NULL) { |
1040 | ret = -ENOENT; | 829 | ret = -ENOENT; |
1041 | goto free_master; | 830 | goto free_master; |
1042 | } | 831 | } |
@@ -1047,164 +836,139 @@ static int davinci_spi_probe(struct platform_device *pdev) | |||
1047 | goto free_master; | 836 | goto free_master; |
1048 | } | 837 | } |
1049 | 838 | ||
1050 | davinci_spi->pbase = r->start; | 839 | dspi->pbase = r->start; |
1051 | davinci_spi->region_size = resource_size(r); | 840 | dspi->pdata = pdata; |
1052 | davinci_spi->pdata = pdata; | ||
1053 | 841 | ||
1054 | mem = request_mem_region(r->start, davinci_spi->region_size, | 842 | mem = request_mem_region(r->start, resource_size(r), pdev->name); |
1055 | pdev->name); | ||
1056 | if (mem == NULL) { | 843 | if (mem == NULL) { |
1057 | ret = -EBUSY; | 844 | ret = -EBUSY; |
1058 | goto free_master; | 845 | goto free_master; |
1059 | } | 846 | } |
1060 | 847 | ||
1061 | davinci_spi->base = (struct davinci_spi_reg __iomem *) | 848 | dspi->base = ioremap(r->start, resource_size(r)); |
1062 | ioremap(r->start, davinci_spi->region_size); | 849 | if (dspi->base == NULL) { |
1063 | if (davinci_spi->base == NULL) { | ||
1064 | ret = -ENOMEM; | 850 | ret = -ENOMEM; |
1065 | goto release_region; | 851 | goto release_region; |
1066 | } | 852 | } |
1067 | 853 | ||
1068 | davinci_spi->irq = platform_get_irq(pdev, 0); | 854 | dspi->irq = platform_get_irq(pdev, 0); |
1069 | if (davinci_spi->irq <= 0) { | 855 | if (dspi->irq <= 0) { |
1070 | ret = -EINVAL; | 856 | ret = -EINVAL; |
1071 | goto unmap_io; | 857 | goto unmap_io; |
1072 | } | 858 | } |
1073 | 859 | ||
1074 | ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED, | 860 | ret = request_irq(dspi->irq, davinci_spi_irq, 0, dev_name(&pdev->dev), |
1075 | dev_name(&pdev->dev), davinci_spi); | 861 | dspi); |
1076 | if (ret) | 862 | if (ret) |
1077 | goto unmap_io; | 863 | goto unmap_io; |
1078 | 864 | ||
1079 | /* Allocate tmp_buf for tx_buf */ | 865 | dspi->bitbang.master = spi_master_get(master); |
1080 | davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL); | 866 | if (dspi->bitbang.master == NULL) { |
1081 | if (davinci_spi->tmp_buf == NULL) { | ||
1082 | ret = -ENOMEM; | ||
1083 | goto irq_free; | ||
1084 | } | ||
1085 | |||
1086 | davinci_spi->bitbang.master = spi_master_get(master); | ||
1087 | if (davinci_spi->bitbang.master == NULL) { | ||
1088 | ret = -ENODEV; | 867 | ret = -ENODEV; |
1089 | goto free_tmp_buf; | 868 | goto irq_free; |
1090 | } | 869 | } |
1091 | 870 | ||
1092 | davinci_spi->clk = clk_get(&pdev->dev, NULL); | 871 | dspi->clk = clk_get(&pdev->dev, NULL); |
1093 | if (IS_ERR(davinci_spi->clk)) { | 872 | if (IS_ERR(dspi->clk)) { |
1094 | ret = -ENODEV; | 873 | ret = -ENODEV; |
1095 | goto put_master; | 874 | goto put_master; |
1096 | } | 875 | } |
1097 | clk_enable(davinci_spi->clk); | 876 | clk_enable(dspi->clk); |
1098 | |||
1099 | 877 | ||
1100 | master->bus_num = pdev->id; | 878 | master->bus_num = pdev->id; |
1101 | master->num_chipselect = pdata->num_chipselect; | 879 | master->num_chipselect = pdata->num_chipselect; |
1102 | master->setup = davinci_spi_setup; | 880 | master->setup = davinci_spi_setup; |
1103 | master->cleanup = davinci_spi_cleanup; | ||
1104 | |||
1105 | davinci_spi->bitbang.chipselect = davinci_spi_chipselect; | ||
1106 | davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer; | ||
1107 | |||
1108 | davinci_spi->version = pdata->version; | ||
1109 | use_dma = pdata->use_dma; | ||
1110 | |||
1111 | davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP; | ||
1112 | if (davinci_spi->version == SPI_VERSION_2) | ||
1113 | davinci_spi->bitbang.flags |= SPI_READY; | ||
1114 | |||
1115 | if (use_dma) { | ||
1116 | r = platform_get_resource(pdev, IORESOURCE_DMA, 0); | ||
1117 | if (r) | ||
1118 | dma_rx_chan = r->start; | ||
1119 | r = platform_get_resource(pdev, IORESOURCE_DMA, 1); | ||
1120 | if (r) | ||
1121 | dma_tx_chan = r->start; | ||
1122 | r = platform_get_resource(pdev, IORESOURCE_DMA, 2); | ||
1123 | if (r) | ||
1124 | dma_eventq = r->start; | ||
1125 | } | ||
1126 | 881 | ||
1127 | if (!use_dma || | 882 | dspi->bitbang.chipselect = davinci_spi_chipselect; |
1128 | dma_rx_chan == SPI_NO_RESOURCE || | 883 | dspi->bitbang.setup_transfer = davinci_spi_setup_transfer; |
1129 | dma_tx_chan == SPI_NO_RESOURCE || | 884 | |
1130 | dma_eventq == SPI_NO_RESOURCE) { | 885 | dspi->version = pdata->version; |
1131 | davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio; | 886 | |
1132 | use_dma = 0; | 887 | dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP; |
1133 | } else { | 888 | if (dspi->version == SPI_VERSION_2) |
1134 | davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma; | 889 | dspi->bitbang.flags |= SPI_READY; |
1135 | davinci_spi->dma_channels = kzalloc(master->num_chipselect | 890 | |
1136 | * sizeof(struct davinci_spi_dma), GFP_KERNEL); | 891 | r = platform_get_resource(pdev, IORESOURCE_DMA, 0); |
1137 | if (davinci_spi->dma_channels == NULL) { | 892 | if (r) |
1138 | ret = -ENOMEM; | 893 | dma_rx_chan = r->start; |
894 | r = platform_get_resource(pdev, IORESOURCE_DMA, 1); | ||
895 | if (r) | ||
896 | dma_tx_chan = r->start; | ||
897 | |||
898 | dspi->bitbang.txrx_bufs = davinci_spi_bufs; | ||
899 | if (dma_rx_chan != SPI_NO_RESOURCE && | ||
900 | dma_tx_chan != SPI_NO_RESOURCE) { | ||
901 | dspi->dma.rx_channel = dma_rx_chan; | ||
902 | dspi->dma.tx_channel = dma_tx_chan; | ||
903 | dspi->dma.eventq = pdata->dma_event_q; | ||
904 | |||
905 | ret = davinci_spi_request_dma(dspi); | ||
906 | if (ret) | ||
1139 | goto free_clk; | 907 | goto free_clk; |
1140 | } | ||
1141 | 908 | ||
1142 | for (i = 0; i < master->num_chipselect; i++) { | 909 | dev_info(&pdev->dev, "DMA: supported\n"); |
1143 | davinci_spi->dma_channels[i].dma_rx_channel = -1; | 910 | dev_info(&pdev->dev, "DMA: RX channel: %d, TX channel: %d, " |
1144 | davinci_spi->dma_channels[i].dma_rx_sync_dev = | 911 | "event queue: %d\n", dma_rx_chan, dma_tx_chan, |
1145 | dma_rx_chan; | 912 | pdata->dma_event_q); |
1146 | davinci_spi->dma_channels[i].dma_tx_channel = -1; | ||
1147 | davinci_spi->dma_channels[i].dma_tx_sync_dev = | ||
1148 | dma_tx_chan; | ||
1149 | davinci_spi->dma_channels[i].eventq = dma_eventq; | ||
1150 | } | ||
1151 | dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n" | ||
1152 | "Using RX channel = %d , TX channel = %d and " | ||
1153 | "event queue = %d", dma_rx_chan, dma_tx_chan, | ||
1154 | dma_eventq); | ||
1155 | } | 913 | } |
1156 | 914 | ||
1157 | davinci_spi->get_rx = davinci_spi_rx_buf_u8; | 915 | dspi->get_rx = davinci_spi_rx_buf_u8; |
1158 | davinci_spi->get_tx = davinci_spi_tx_buf_u8; | 916 | dspi->get_tx = davinci_spi_tx_buf_u8; |
1159 | 917 | ||
1160 | init_completion(&davinci_spi->done); | 918 | init_completion(&dspi->done); |
1161 | 919 | ||
1162 | /* Reset In/OUT SPI module */ | 920 | /* Reset In/OUT SPI module */ |
1163 | iowrite32(0, davinci_spi->base + SPIGCR0); | 921 | iowrite32(0, dspi->base + SPIGCR0); |
1164 | udelay(100); | 922 | udelay(100); |
1165 | iowrite32(1, davinci_spi->base + SPIGCR0); | 923 | iowrite32(1, dspi->base + SPIGCR0); |
1166 | 924 | ||
1167 | /* Clock internal */ | 925 | /* Set up SPIPC0. CS and ENA init is done in davinci_spi_setup */ |
1168 | if (davinci_spi->pdata->clk_internal) | 926 | spipc0 = SPIPC0_DIFUN_MASK | SPIPC0_DOFUN_MASK | SPIPC0_CLKFUN_MASK; |
1169 | set_io_bits(davinci_spi->base + SPIGCR1, | 927 | iowrite32(spipc0, dspi->base + SPIPC0); |
1170 | SPIGCR1_CLKMOD_MASK); | ||
1171 | else | ||
1172 | clear_io_bits(davinci_spi->base + SPIGCR1, | ||
1173 | SPIGCR1_CLKMOD_MASK); | ||
1174 | 928 | ||
1175 | /* master mode default */ | 929 | /* initialize chip selects */ |
1176 | set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK); | 930 | if (pdata->chip_sel) { |
931 | for (i = 0; i < pdata->num_chipselect; i++) { | ||
932 | if (pdata->chip_sel[i] != SPI_INTERN_CS) | ||
933 | gpio_direction_output(pdata->chip_sel[i], 1); | ||
934 | } | ||
935 | } | ||
1177 | 936 | ||
1178 | if (davinci_spi->pdata->intr_level) | 937 | if (pdata->intr_line) |
1179 | iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL); | 938 | iowrite32(SPI_INTLVL_1, dspi->base + SPILVL); |
1180 | else | 939 | else |
1181 | iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL); | 940 | iowrite32(SPI_INTLVL_0, dspi->base + SPILVL); |
1182 | 941 | ||
1183 | ret = spi_bitbang_start(&davinci_spi->bitbang); | 942 | iowrite32(CS_DEFAULT, dspi->base + SPIDEF); |
1184 | if (ret) | ||
1185 | goto free_clk; | ||
1186 | 943 | ||
1187 | dev_info(&pdev->dev, "Controller at 0x%p \n", davinci_spi->base); | 944 | /* master mode default */ |
945 | set_io_bits(dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK); | ||
946 | set_io_bits(dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK); | ||
947 | set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); | ||
948 | |||
949 | ret = spi_bitbang_start(&dspi->bitbang); | ||
950 | if (ret) | ||
951 | goto free_dma; | ||
1188 | 952 | ||
1189 | if (!pdata->poll_mode) | 953 | dev_info(&pdev->dev, "Controller at 0x%p\n", dspi->base); |
1190 | dev_info(&pdev->dev, "Operating in interrupt mode" | ||
1191 | " using IRQ %d\n", davinci_spi->irq); | ||
1192 | 954 | ||
1193 | return ret; | 955 | return ret; |
1194 | 956 | ||
957 | free_dma: | ||
958 | edma_free_channel(dspi->dma.tx_channel); | ||
959 | edma_free_channel(dspi->dma.rx_channel); | ||
960 | edma_free_slot(dspi->dma.dummy_param_slot); | ||
1195 | free_clk: | 961 | free_clk: |
1196 | clk_disable(davinci_spi->clk); | 962 | clk_disable(dspi->clk); |
1197 | clk_put(davinci_spi->clk); | 963 | clk_put(dspi->clk); |
1198 | put_master: | 964 | put_master: |
1199 | spi_master_put(master); | 965 | spi_master_put(master); |
1200 | free_tmp_buf: | ||
1201 | kfree(davinci_spi->tmp_buf); | ||
1202 | irq_free: | 966 | irq_free: |
1203 | free_irq(davinci_spi->irq, davinci_spi); | 967 | free_irq(dspi->irq, dspi); |
1204 | unmap_io: | 968 | unmap_io: |
1205 | iounmap(davinci_spi->base); | 969 | iounmap(dspi->base); |
1206 | release_region: | 970 | release_region: |
1207 | release_mem_region(davinci_spi->pbase, davinci_spi->region_size); | 971 | release_mem_region(dspi->pbase, resource_size(r)); |
1208 | free_master: | 972 | free_master: |
1209 | kfree(master); | 973 | kfree(master); |
1210 | err: | 974 | err: |
@@ -1222,27 +986,31 @@ err: | |||
1222 | */ | 986 | */ |
1223 | static int __exit davinci_spi_remove(struct platform_device *pdev) | 987 | static int __exit davinci_spi_remove(struct platform_device *pdev) |
1224 | { | 988 | { |
1225 | struct davinci_spi *davinci_spi; | 989 | struct davinci_spi *dspi; |
1226 | struct spi_master *master; | 990 | struct spi_master *master; |
991 | struct resource *r; | ||
1227 | 992 | ||
1228 | master = dev_get_drvdata(&pdev->dev); | 993 | master = dev_get_drvdata(&pdev->dev); |
1229 | davinci_spi = spi_master_get_devdata(master); | 994 | dspi = spi_master_get_devdata(master); |
1230 | 995 | ||
1231 | spi_bitbang_stop(&davinci_spi->bitbang); | 996 | spi_bitbang_stop(&dspi->bitbang); |
1232 | 997 | ||
1233 | clk_disable(davinci_spi->clk); | 998 | clk_disable(dspi->clk); |
1234 | clk_put(davinci_spi->clk); | 999 | clk_put(dspi->clk); |
1235 | spi_master_put(master); | 1000 | spi_master_put(master); |
1236 | kfree(davinci_spi->tmp_buf); | 1001 | free_irq(dspi->irq, dspi); |
1237 | free_irq(davinci_spi->irq, davinci_spi); | 1002 | iounmap(dspi->base); |
1238 | iounmap(davinci_spi->base); | 1003 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1239 | release_mem_region(davinci_spi->pbase, davinci_spi->region_size); | 1004 | release_mem_region(dspi->pbase, resource_size(r)); |
1240 | 1005 | ||
1241 | return 0; | 1006 | return 0; |
1242 | } | 1007 | } |
1243 | 1008 | ||
1244 | static struct platform_driver davinci_spi_driver = { | 1009 | static struct platform_driver davinci_spi_driver = { |
1245 | .driver.name = "spi_davinci", | 1010 | .driver = { |
1011 | .name = "spi_davinci", | ||
1012 | .owner = THIS_MODULE, | ||
1013 | }, | ||
1246 | .remove = __exit_p(davinci_spi_remove), | 1014 | .remove = __exit_p(davinci_spi_remove), |
1247 | }; | 1015 | }; |
1248 | 1016 | ||
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c index 56247853c298..919fa9d9e16b 100644 --- a/drivers/spi/dw_spi.c +++ b/drivers/spi/dw_spi.c | |||
@@ -22,10 +22,10 @@ | |||
22 | #include <linux/highmem.h> | 22 | #include <linux/highmem.h> |
23 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | |||
26 | #include <linux/spi/dw_spi.h> | ||
27 | #include <linux/spi/spi.h> | 25 | #include <linux/spi/spi.h> |
28 | 26 | ||
27 | #include "dw_spi.h" | ||
28 | |||
29 | #ifdef CONFIG_DEBUG_FS | 29 | #ifdef CONFIG_DEBUG_FS |
30 | #include <linux/debugfs.h> | 30 | #include <linux/debugfs.h> |
31 | #endif | 31 | #endif |
@@ -58,8 +58,6 @@ struct chip_data { | |||
58 | u8 bits_per_word; | 58 | u8 bits_per_word; |
59 | u16 clk_div; /* baud rate divider */ | 59 | u16 clk_div; /* baud rate divider */ |
60 | u32 speed_hz; /* baud rate */ | 60 | u32 speed_hz; /* baud rate */ |
61 | int (*write)(struct dw_spi *dws); | ||
62 | int (*read)(struct dw_spi *dws); | ||
63 | void (*cs_control)(u32 command); | 61 | void (*cs_control)(u32 command); |
64 | }; | 62 | }; |
65 | 63 | ||
@@ -131,6 +129,7 @@ static const struct file_operations mrst_spi_regs_ops = { | |||
131 | .owner = THIS_MODULE, | 129 | .owner = THIS_MODULE, |
132 | .open = spi_show_regs_open, | 130 | .open = spi_show_regs_open, |
133 | .read = spi_show_regs, | 131 | .read = spi_show_regs, |
132 | .llseek = default_llseek, | ||
134 | }; | 133 | }; |
135 | 134 | ||
136 | static int mrst_spi_debugfs_init(struct dw_spi *dws) | 135 | static int mrst_spi_debugfs_init(struct dw_spi *dws) |
@@ -161,104 +160,70 @@ static inline void mrst_spi_debugfs_remove(struct dw_spi *dws) | |||
161 | } | 160 | } |
162 | #endif /* CONFIG_DEBUG_FS */ | 161 | #endif /* CONFIG_DEBUG_FS */ |
163 | 162 | ||
164 | static void wait_till_not_busy(struct dw_spi *dws) | 163 | /* Return the max entries we can fill into tx fifo */ |
165 | { | 164 | static inline u32 tx_max(struct dw_spi *dws) |
166 | unsigned long end = jiffies + 1 + usecs_to_jiffies(1000); | ||
167 | |||
168 | while (time_before(jiffies, end)) { | ||
169 | if (!(dw_readw(dws, sr) & SR_BUSY)) | ||
170 | return; | ||
171 | } | ||
172 | dev_err(&dws->master->dev, | ||
173 | "DW SPI: Status keeps busy for 1000us after a read/write!\n"); | ||
174 | } | ||
175 | |||
176 | static void flush(struct dw_spi *dws) | ||
177 | { | 165 | { |
178 | while (dw_readw(dws, sr) & SR_RF_NOT_EMPT) | 166 | u32 tx_left, tx_room, rxtx_gap; |
179 | dw_readw(dws, dr); | ||
180 | |||
181 | wait_till_not_busy(dws); | ||
182 | } | ||
183 | 167 | ||
184 | static int null_writer(struct dw_spi *dws) | 168 | tx_left = (dws->tx_end - dws->tx) / dws->n_bytes; |
185 | { | 169 | tx_room = dws->fifo_len - dw_readw(dws, txflr); |
186 | u8 n_bytes = dws->n_bytes; | ||
187 | 170 | ||
188 | if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL) | 171 | /* |
189 | || (dws->tx == dws->tx_end)) | 172 | * Another concern is about the tx/rx mismatch, we |
190 | return 0; | 173 | * though to use (dws->fifo_len - rxflr - txflr) as |
191 | dw_writew(dws, dr, 0); | 174 | * one maximum value for tx, but it doesn't cover the |
192 | dws->tx += n_bytes; | 175 | * data which is out of tx/rx fifo and inside the |
176 | * shift registers. So a control from sw point of | ||
177 | * view is taken. | ||
178 | */ | ||
179 | rxtx_gap = ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx)) | ||
180 | / dws->n_bytes; | ||
193 | 181 | ||
194 | wait_till_not_busy(dws); | 182 | return min3(tx_left, tx_room, (u32) (dws->fifo_len - rxtx_gap)); |
195 | return 1; | ||
196 | } | 183 | } |
197 | 184 | ||
198 | static int null_reader(struct dw_spi *dws) | 185 | /* Return the max entries we should read out of rx fifo */ |
186 | static inline u32 rx_max(struct dw_spi *dws) | ||
199 | { | 187 | { |
200 | u8 n_bytes = dws->n_bytes; | 188 | u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes; |
201 | 189 | ||
202 | while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT) | 190 | return min(rx_left, (u32)dw_readw(dws, rxflr)); |
203 | && (dws->rx < dws->rx_end)) { | ||
204 | dw_readw(dws, dr); | ||
205 | dws->rx += n_bytes; | ||
206 | } | ||
207 | wait_till_not_busy(dws); | ||
208 | return dws->rx == dws->rx_end; | ||
209 | } | 191 | } |
210 | 192 | ||
211 | static int u8_writer(struct dw_spi *dws) | 193 | static void dw_writer(struct dw_spi *dws) |
212 | { | 194 | { |
213 | if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL) | 195 | u32 max = tx_max(dws); |
214 | || (dws->tx == dws->tx_end)) | 196 | u16 txw = 0; |
215 | return 0; | ||
216 | |||
217 | dw_writew(dws, dr, *(u8 *)(dws->tx)); | ||
218 | ++dws->tx; | ||
219 | |||
220 | wait_till_not_busy(dws); | ||
221 | return 1; | ||
222 | } | ||
223 | 197 | ||
224 | static int u8_reader(struct dw_spi *dws) | 198 | while (max--) { |
225 | { | 199 | /* Set the tx word if the transfer's original "tx" is not null */ |
226 | while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT) | 200 | if (dws->tx_end - dws->len) { |
227 | && (dws->rx < dws->rx_end)) { | 201 | if (dws->n_bytes == 1) |
228 | *(u8 *)(dws->rx) = dw_readw(dws, dr); | 202 | txw = *(u8 *)(dws->tx); |
229 | ++dws->rx; | 203 | else |
204 | txw = *(u16 *)(dws->tx); | ||
205 | } | ||
206 | dw_writew(dws, dr, txw); | ||
207 | dws->tx += dws->n_bytes; | ||
230 | } | 208 | } |
231 | |||
232 | wait_till_not_busy(dws); | ||
233 | return dws->rx == dws->rx_end; | ||
234 | } | ||
235 | |||
236 | static int u16_writer(struct dw_spi *dws) | ||
237 | { | ||
238 | if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL) | ||
239 | || (dws->tx == dws->tx_end)) | ||
240 | return 0; | ||
241 | |||
242 | dw_writew(dws, dr, *(u16 *)(dws->tx)); | ||
243 | dws->tx += 2; | ||
244 | |||
245 | wait_till_not_busy(dws); | ||
246 | return 1; | ||
247 | } | 209 | } |
248 | 210 | ||
249 | static int u16_reader(struct dw_spi *dws) | 211 | static void dw_reader(struct dw_spi *dws) |
250 | { | 212 | { |
251 | u16 temp; | 213 | u32 max = rx_max(dws); |
214 | u16 rxw; | ||
252 | 215 | ||
253 | while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT) | 216 | while (max--) { |
254 | && (dws->rx < dws->rx_end)) { | 217 | rxw = dw_readw(dws, dr); |
255 | temp = dw_readw(dws, dr); | 218 | /* Care rx only if the transfer's original "rx" is not null */ |
256 | *(u16 *)(dws->rx) = temp; | 219 | if (dws->rx_end - dws->len) { |
257 | dws->rx += 2; | 220 | if (dws->n_bytes == 1) |
221 | *(u8 *)(dws->rx) = rxw; | ||
222 | else | ||
223 | *(u16 *)(dws->rx) = rxw; | ||
224 | } | ||
225 | dws->rx += dws->n_bytes; | ||
258 | } | 226 | } |
259 | |||
260 | wait_till_not_busy(dws); | ||
261 | return dws->rx == dws->rx_end; | ||
262 | } | 227 | } |
263 | 228 | ||
264 | static void *next_transfer(struct dw_spi *dws) | 229 | static void *next_transfer(struct dw_spi *dws) |
@@ -284,8 +249,10 @@ static void *next_transfer(struct dw_spi *dws) | |||
284 | */ | 249 | */ |
285 | static int map_dma_buffers(struct dw_spi *dws) | 250 | static int map_dma_buffers(struct dw_spi *dws) |
286 | { | 251 | { |
287 | if (!dws->cur_msg->is_dma_mapped || !dws->dma_inited | 252 | if (!dws->cur_msg->is_dma_mapped |
288 | || !dws->cur_chip->enable_dma) | 253 | || !dws->dma_inited |
254 | || !dws->cur_chip->enable_dma | ||
255 | || !dws->dma_ops) | ||
289 | return 0; | 256 | return 0; |
290 | 257 | ||
291 | if (dws->cur_transfer->tx_dma) | 258 | if (dws->cur_transfer->tx_dma) |
@@ -328,8 +295,7 @@ static void giveback(struct dw_spi *dws) | |||
328 | 295 | ||
329 | static void int_error_stop(struct dw_spi *dws, const char *msg) | 296 | static void int_error_stop(struct dw_spi *dws, const char *msg) |
330 | { | 297 | { |
331 | /* Stop and reset hw */ | 298 | /* Stop the hw */ |
332 | flush(dws); | ||
333 | spi_enable_chip(dws, 0); | 299 | spi_enable_chip(dws, 0); |
334 | 300 | ||
335 | dev_err(&dws->master->dev, "%s\n", msg); | 301 | dev_err(&dws->master->dev, "%s\n", msg); |
@@ -337,9 +303,9 @@ static void int_error_stop(struct dw_spi *dws, const char *msg) | |||
337 | tasklet_schedule(&dws->pump_transfers); | 303 | tasklet_schedule(&dws->pump_transfers); |
338 | } | 304 | } |
339 | 305 | ||
340 | static void transfer_complete(struct dw_spi *dws) | 306 | void dw_spi_xfer_done(struct dw_spi *dws) |
341 | { | 307 | { |
342 | /* Update total byte transfered return count actual bytes read */ | 308 | /* Update total byte transferred return count actual bytes read */ |
343 | dws->cur_msg->actual_length += dws->len; | 309 | dws->cur_msg->actual_length += dws->len; |
344 | 310 | ||
345 | /* Move to next transfer */ | 311 | /* Move to next transfer */ |
@@ -352,38 +318,32 @@ static void transfer_complete(struct dw_spi *dws) | |||
352 | } else | 318 | } else |
353 | tasklet_schedule(&dws->pump_transfers); | 319 | tasklet_schedule(&dws->pump_transfers); |
354 | } | 320 | } |
321 | EXPORT_SYMBOL_GPL(dw_spi_xfer_done); | ||
355 | 322 | ||
356 | static irqreturn_t interrupt_transfer(struct dw_spi *dws) | 323 | static irqreturn_t interrupt_transfer(struct dw_spi *dws) |
357 | { | 324 | { |
358 | u16 irq_status, irq_mask = 0x3f; | 325 | u16 irq_status = dw_readw(dws, isr); |
359 | u32 int_level = dws->fifo_len / 2; | ||
360 | u32 left; | ||
361 | 326 | ||
362 | irq_status = dw_readw(dws, isr) & irq_mask; | ||
363 | /* Error handling */ | 327 | /* Error handling */ |
364 | if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) { | 328 | if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) { |
365 | dw_readw(dws, txoicr); | 329 | dw_readw(dws, txoicr); |
366 | dw_readw(dws, rxoicr); | 330 | dw_readw(dws, rxoicr); |
367 | dw_readw(dws, rxuicr); | 331 | dw_readw(dws, rxuicr); |
368 | int_error_stop(dws, "interrupt_transfer: fifo overrun"); | 332 | int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun"); |
369 | return IRQ_HANDLED; | 333 | return IRQ_HANDLED; |
370 | } | 334 | } |
371 | 335 | ||
336 | dw_reader(dws); | ||
337 | if (dws->rx_end == dws->rx) { | ||
338 | spi_mask_intr(dws, SPI_INT_TXEI); | ||
339 | dw_spi_xfer_done(dws); | ||
340 | return IRQ_HANDLED; | ||
341 | } | ||
372 | if (irq_status & SPI_INT_TXEI) { | 342 | if (irq_status & SPI_INT_TXEI) { |
373 | spi_mask_intr(dws, SPI_INT_TXEI); | 343 | spi_mask_intr(dws, SPI_INT_TXEI); |
374 | 344 | dw_writer(dws); | |
375 | left = (dws->tx_end - dws->tx) / dws->n_bytes; | 345 | /* Enable TX irq always, it will be disabled when RX finished */ |
376 | left = (left > int_level) ? int_level : left; | 346 | spi_umask_intr(dws, SPI_INT_TXEI); |
377 | |||
378 | while (left--) | ||
379 | dws->write(dws); | ||
380 | dws->read(dws); | ||
381 | |||
382 | /* Re-enable the IRQ if there is still data left to tx */ | ||
383 | if (dws->tx_end > dws->tx) | ||
384 | spi_umask_intr(dws, SPI_INT_TXEI); | ||
385 | else | ||
386 | transfer_complete(dws); | ||
387 | } | 347 | } |
388 | 348 | ||
389 | return IRQ_HANDLED; | 349 | return IRQ_HANDLED; |
@@ -392,15 +352,13 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws) | |||
392 | static irqreturn_t dw_spi_irq(int irq, void *dev_id) | 352 | static irqreturn_t dw_spi_irq(int irq, void *dev_id) |
393 | { | 353 | { |
394 | struct dw_spi *dws = dev_id; | 354 | struct dw_spi *dws = dev_id; |
395 | u16 irq_status, irq_mask = 0x3f; | 355 | u16 irq_status = dw_readw(dws, isr) & 0x3f; |
396 | 356 | ||
397 | irq_status = dw_readw(dws, isr) & irq_mask; | ||
398 | if (!irq_status) | 357 | if (!irq_status) |
399 | return IRQ_NONE; | 358 | return IRQ_NONE; |
400 | 359 | ||
401 | if (!dws->cur_msg) { | 360 | if (!dws->cur_msg) { |
402 | spi_mask_intr(dws, SPI_INT_TXEI); | 361 | spi_mask_intr(dws, SPI_INT_TXEI); |
403 | /* Never fail */ | ||
404 | return IRQ_HANDLED; | 362 | return IRQ_HANDLED; |
405 | } | 363 | } |
406 | 364 | ||
@@ -410,14 +368,13 @@ static irqreturn_t dw_spi_irq(int irq, void *dev_id) | |||
410 | /* Must be called inside pump_transfers() */ | 368 | /* Must be called inside pump_transfers() */ |
411 | static void poll_transfer(struct dw_spi *dws) | 369 | static void poll_transfer(struct dw_spi *dws) |
412 | { | 370 | { |
413 | while (dws->write(dws)) | 371 | do { |
414 | dws->read(dws); | 372 | dw_writer(dws); |
373 | dw_reader(dws); | ||
374 | cpu_relax(); | ||
375 | } while (dws->rx_end > dws->rx); | ||
415 | 376 | ||
416 | transfer_complete(dws); | 377 | dw_spi_xfer_done(dws); |
417 | } | ||
418 | |||
419 | static void dma_transfer(struct dw_spi *dws, int cs_change) | ||
420 | { | ||
421 | } | 378 | } |
422 | 379 | ||
423 | static void pump_transfers(unsigned long data) | 380 | static void pump_transfers(unsigned long data) |
@@ -475,8 +432,6 @@ static void pump_transfers(unsigned long data) | |||
475 | dws->tx_end = dws->tx + transfer->len; | 432 | dws->tx_end = dws->tx + transfer->len; |
476 | dws->rx = transfer->rx_buf; | 433 | dws->rx = transfer->rx_buf; |
477 | dws->rx_end = dws->rx + transfer->len; | 434 | dws->rx_end = dws->rx + transfer->len; |
478 | dws->write = dws->tx ? chip->write : null_writer; | ||
479 | dws->read = dws->rx ? chip->read : null_reader; | ||
480 | dws->cs_change = transfer->cs_change; | 435 | dws->cs_change = transfer->cs_change; |
481 | dws->len = dws->cur_transfer->len; | 436 | dws->len = dws->cur_transfer->len; |
482 | if (chip != dws->prev_chip) | 437 | if (chip != dws->prev_chip) |
@@ -510,20 +465,8 @@ static void pump_transfers(unsigned long data) | |||
510 | 465 | ||
511 | switch (bits) { | 466 | switch (bits) { |
512 | case 8: | 467 | case 8: |
513 | dws->n_bytes = 1; | ||
514 | dws->dma_width = 1; | ||
515 | dws->read = (dws->read != null_reader) ? | ||
516 | u8_reader : null_reader; | ||
517 | dws->write = (dws->write != null_writer) ? | ||
518 | u8_writer : null_writer; | ||
519 | break; | ||
520 | case 16: | 468 | case 16: |
521 | dws->n_bytes = 2; | 469 | dws->n_bytes = dws->dma_width = bits >> 3; |
522 | dws->dma_width = 2; | ||
523 | dws->read = (dws->read != null_reader) ? | ||
524 | u16_reader : null_reader; | ||
525 | dws->write = (dws->write != null_writer) ? | ||
526 | u16_writer : null_writer; | ||
527 | break; | 470 | break; |
528 | default: | 471 | default: |
529 | printk(KERN_ERR "MRST SPI0: unsupported bits:" | 472 | printk(KERN_ERR "MRST SPI0: unsupported bits:" |
@@ -567,7 +510,7 @@ static void pump_transfers(unsigned long data) | |||
567 | txint_level = dws->fifo_len / 2; | 510 | txint_level = dws->fifo_len / 2; |
568 | txint_level = (templen > txint_level) ? txint_level : templen; | 511 | txint_level = (templen > txint_level) ? txint_level : templen; |
569 | 512 | ||
570 | imask |= SPI_INT_TXEI; | 513 | imask |= SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI; |
571 | dws->transfer_handler = interrupt_transfer; | 514 | dws->transfer_handler = interrupt_transfer; |
572 | } | 515 | } |
573 | 516 | ||
@@ -586,7 +529,7 @@ static void pump_transfers(unsigned long data) | |||
586 | spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); | 529 | spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); |
587 | spi_chip_sel(dws, spi->chip_select); | 530 | spi_chip_sel(dws, spi->chip_select); |
588 | 531 | ||
589 | /* Set the interrupt mask, for poll mode just diable all int */ | 532 | /* Set the interrupt mask, for poll mode just disable all int */ |
590 | spi_mask_intr(dws, 0xff); | 533 | spi_mask_intr(dws, 0xff); |
591 | if (imask) | 534 | if (imask) |
592 | spi_umask_intr(dws, imask); | 535 | spi_umask_intr(dws, imask); |
@@ -599,7 +542,7 @@ static void pump_transfers(unsigned long data) | |||
599 | } | 542 | } |
600 | 543 | ||
601 | if (dws->dma_mapped) | 544 | if (dws->dma_mapped) |
602 | dma_transfer(dws, cs_change); | 545 | dws->dma_ops->dma_transfer(dws, cs_change); |
603 | 546 | ||
604 | if (chip->poll_mode) | 547 | if (chip->poll_mode) |
605 | poll_transfer(dws); | 548 | poll_transfer(dws); |
@@ -725,13 +668,9 @@ static int dw_spi_setup(struct spi_device *spi) | |||
725 | if (spi->bits_per_word <= 8) { | 668 | if (spi->bits_per_word <= 8) { |
726 | chip->n_bytes = 1; | 669 | chip->n_bytes = 1; |
727 | chip->dma_width = 1; | 670 | chip->dma_width = 1; |
728 | chip->read = u8_reader; | ||
729 | chip->write = u8_writer; | ||
730 | } else if (spi->bits_per_word <= 16) { | 671 | } else if (spi->bits_per_word <= 16) { |
731 | chip->n_bytes = 2; | 672 | chip->n_bytes = 2; |
732 | chip->dma_width = 2; | 673 | chip->dma_width = 2; |
733 | chip->read = u16_reader; | ||
734 | chip->write = u16_writer; | ||
735 | } else { | 674 | } else { |
736 | /* Never take >16b case for MRST SPIC */ | 675 | /* Never take >16b case for MRST SPIC */ |
737 | dev_err(&spi->dev, "invalid wordsize\n"); | 676 | dev_err(&spi->dev, "invalid wordsize\n"); |
@@ -813,7 +752,7 @@ static int stop_queue(struct dw_spi *dws) | |||
813 | 752 | ||
814 | spin_lock_irqsave(&dws->lock, flags); | 753 | spin_lock_irqsave(&dws->lock, flags); |
815 | dws->run = QUEUE_STOPPED; | 754 | dws->run = QUEUE_STOPPED; |
816 | while (!list_empty(&dws->queue) && dws->busy && limit--) { | 755 | while ((!list_empty(&dws->queue) || dws->busy) && limit--) { |
817 | spin_unlock_irqrestore(&dws->lock, flags); | 756 | spin_unlock_irqrestore(&dws->lock, flags); |
818 | msleep(10); | 757 | msleep(10); |
819 | spin_lock_irqsave(&dws->lock, flags); | 758 | spin_lock_irqsave(&dws->lock, flags); |
@@ -843,7 +782,6 @@ static void spi_hw_init(struct dw_spi *dws) | |||
843 | spi_enable_chip(dws, 0); | 782 | spi_enable_chip(dws, 0); |
844 | spi_mask_intr(dws, 0xff); | 783 | spi_mask_intr(dws, 0xff); |
845 | spi_enable_chip(dws, 1); | 784 | spi_enable_chip(dws, 1); |
846 | flush(dws); | ||
847 | 785 | ||
848 | /* | 786 | /* |
849 | * Try to detect the FIFO depth if not set by interface driver, | 787 | * Try to detect the FIFO depth if not set by interface driver, |
@@ -895,11 +833,17 @@ int __devinit dw_spi_add_host(struct dw_spi *dws) | |||
895 | master->setup = dw_spi_setup; | 833 | master->setup = dw_spi_setup; |
896 | master->transfer = dw_spi_transfer; | 834 | master->transfer = dw_spi_transfer; |
897 | 835 | ||
898 | dws->dma_inited = 0; | ||
899 | |||
900 | /* Basic HW init */ | 836 | /* Basic HW init */ |
901 | spi_hw_init(dws); | 837 | spi_hw_init(dws); |
902 | 838 | ||
839 | if (dws->dma_ops && dws->dma_ops->dma_init) { | ||
840 | ret = dws->dma_ops->dma_init(dws); | ||
841 | if (ret) { | ||
842 | dev_warn(&master->dev, "DMA init failed\n"); | ||
843 | dws->dma_inited = 0; | ||
844 | } | ||
845 | } | ||
846 | |||
903 | /* Initial and start queue */ | 847 | /* Initial and start queue */ |
904 | ret = init_queue(dws); | 848 | ret = init_queue(dws); |
905 | if (ret) { | 849 | if (ret) { |
@@ -924,6 +868,8 @@ int __devinit dw_spi_add_host(struct dw_spi *dws) | |||
924 | 868 | ||
925 | err_queue_alloc: | 869 | err_queue_alloc: |
926 | destroy_queue(dws); | 870 | destroy_queue(dws); |
871 | if (dws->dma_ops && dws->dma_ops->dma_exit) | ||
872 | dws->dma_ops->dma_exit(dws); | ||
927 | err_diable_hw: | 873 | err_diable_hw: |
928 | spi_enable_chip(dws, 0); | 874 | spi_enable_chip(dws, 0); |
929 | free_irq(dws->irq, dws); | 875 | free_irq(dws->irq, dws); |
@@ -932,7 +878,7 @@ err_free_master: | |||
932 | exit: | 878 | exit: |
933 | return ret; | 879 | return ret; |
934 | } | 880 | } |
935 | EXPORT_SYMBOL(dw_spi_add_host); | 881 | EXPORT_SYMBOL_GPL(dw_spi_add_host); |
936 | 882 | ||
937 | void __devexit dw_spi_remove_host(struct dw_spi *dws) | 883 | void __devexit dw_spi_remove_host(struct dw_spi *dws) |
938 | { | 884 | { |
@@ -948,6 +894,8 @@ void __devexit dw_spi_remove_host(struct dw_spi *dws) | |||
948 | dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not " | 894 | dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not " |
949 | "complete, message memory not freed\n"); | 895 | "complete, message memory not freed\n"); |
950 | 896 | ||
897 | if (dws->dma_ops && dws->dma_ops->dma_exit) | ||
898 | dws->dma_ops->dma_exit(dws); | ||
951 | spi_enable_chip(dws, 0); | 899 | spi_enable_chip(dws, 0); |
952 | /* Disable clk */ | 900 | /* Disable clk */ |
953 | spi_set_clk(dws, 0); | 901 | spi_set_clk(dws, 0); |
@@ -956,7 +904,7 @@ void __devexit dw_spi_remove_host(struct dw_spi *dws) | |||
956 | /* Disconnect from the SPI framework */ | 904 | /* Disconnect from the SPI framework */ |
957 | spi_unregister_master(dws->master); | 905 | spi_unregister_master(dws->master); |
958 | } | 906 | } |
959 | EXPORT_SYMBOL(dw_spi_remove_host); | 907 | EXPORT_SYMBOL_GPL(dw_spi_remove_host); |
960 | 908 | ||
961 | int dw_spi_suspend_host(struct dw_spi *dws) | 909 | int dw_spi_suspend_host(struct dw_spi *dws) |
962 | { | 910 | { |
@@ -969,7 +917,7 @@ int dw_spi_suspend_host(struct dw_spi *dws) | |||
969 | spi_set_clk(dws, 0); | 917 | spi_set_clk(dws, 0); |
970 | return ret; | 918 | return ret; |
971 | } | 919 | } |
972 | EXPORT_SYMBOL(dw_spi_suspend_host); | 920 | EXPORT_SYMBOL_GPL(dw_spi_suspend_host); |
973 | 921 | ||
974 | int dw_spi_resume_host(struct dw_spi *dws) | 922 | int dw_spi_resume_host(struct dw_spi *dws) |
975 | { | 923 | { |
@@ -981,7 +929,7 @@ int dw_spi_resume_host(struct dw_spi *dws) | |||
981 | dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); | 929 | dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); |
982 | return ret; | 930 | return ret; |
983 | } | 931 | } |
984 | EXPORT_SYMBOL(dw_spi_resume_host); | 932 | EXPORT_SYMBOL_GPL(dw_spi_resume_host); |
985 | 933 | ||
986 | MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>"); | 934 | MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>"); |
987 | MODULE_DESCRIPTION("Driver for DesignWare SPI controller core"); | 935 | MODULE_DESCRIPTION("Driver for DesignWare SPI controller core"); |
diff --git a/drivers/spi/dw_spi.h b/drivers/spi/dw_spi.h new file mode 100644 index 000000000000..7a5e78d2a5cb --- /dev/null +++ b/drivers/spi/dw_spi.h | |||
@@ -0,0 +1,232 @@ | |||
1 | #ifndef DW_SPI_HEADER_H | ||
2 | #define DW_SPI_HEADER_H | ||
3 | |||
4 | #include <linux/io.h> | ||
5 | #include <linux/scatterlist.h> | ||
6 | |||
7 | /* Bit fields in CTRLR0 */ | ||
8 | #define SPI_DFS_OFFSET 0 | ||
9 | |||
10 | #define SPI_FRF_OFFSET 4 | ||
11 | #define SPI_FRF_SPI 0x0 | ||
12 | #define SPI_FRF_SSP 0x1 | ||
13 | #define SPI_FRF_MICROWIRE 0x2 | ||
14 | #define SPI_FRF_RESV 0x3 | ||
15 | |||
16 | #define SPI_MODE_OFFSET 6 | ||
17 | #define SPI_SCPH_OFFSET 6 | ||
18 | #define SPI_SCOL_OFFSET 7 | ||
19 | |||
20 | #define SPI_TMOD_OFFSET 8 | ||
21 | #define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET) | ||
22 | #define SPI_TMOD_TR 0x0 /* xmit & recv */ | ||
23 | #define SPI_TMOD_TO 0x1 /* xmit only */ | ||
24 | #define SPI_TMOD_RO 0x2 /* recv only */ | ||
25 | #define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */ | ||
26 | |||
27 | #define SPI_SLVOE_OFFSET 10 | ||
28 | #define SPI_SRL_OFFSET 11 | ||
29 | #define SPI_CFS_OFFSET 12 | ||
30 | |||
31 | /* Bit fields in SR, 7 bits */ | ||
32 | #define SR_MASK 0x7f /* cover 7 bits */ | ||
33 | #define SR_BUSY (1 << 0) | ||
34 | #define SR_TF_NOT_FULL (1 << 1) | ||
35 | #define SR_TF_EMPT (1 << 2) | ||
36 | #define SR_RF_NOT_EMPT (1 << 3) | ||
37 | #define SR_RF_FULL (1 << 4) | ||
38 | #define SR_TX_ERR (1 << 5) | ||
39 | #define SR_DCOL (1 << 6) | ||
40 | |||
41 | /* Bit fields in ISR, IMR, RISR, 7 bits */ | ||
42 | #define SPI_INT_TXEI (1 << 0) | ||
43 | #define SPI_INT_TXOI (1 << 1) | ||
44 | #define SPI_INT_RXUI (1 << 2) | ||
45 | #define SPI_INT_RXOI (1 << 3) | ||
46 | #define SPI_INT_RXFI (1 << 4) | ||
47 | #define SPI_INT_MSTI (1 << 5) | ||
48 | |||
49 | /* TX RX interrupt level threshold, max can be 256 */ | ||
50 | #define SPI_INT_THRESHOLD 32 | ||
51 | |||
52 | enum dw_ssi_type { | ||
53 | SSI_MOTO_SPI = 0, | ||
54 | SSI_TI_SSP, | ||
55 | SSI_NS_MICROWIRE, | ||
56 | }; | ||
57 | |||
58 | struct dw_spi_reg { | ||
59 | u32 ctrl0; | ||
60 | u32 ctrl1; | ||
61 | u32 ssienr; | ||
62 | u32 mwcr; | ||
63 | u32 ser; | ||
64 | u32 baudr; | ||
65 | u32 txfltr; | ||
66 | u32 rxfltr; | ||
67 | u32 txflr; | ||
68 | u32 rxflr; | ||
69 | u32 sr; | ||
70 | u32 imr; | ||
71 | u32 isr; | ||
72 | u32 risr; | ||
73 | u32 txoicr; | ||
74 | u32 rxoicr; | ||
75 | u32 rxuicr; | ||
76 | u32 msticr; | ||
77 | u32 icr; | ||
78 | u32 dmacr; | ||
79 | u32 dmatdlr; | ||
80 | u32 dmardlr; | ||
81 | u32 idr; | ||
82 | u32 version; | ||
83 | u32 dr; /* Currently oper as 32 bits, | ||
84 | though only low 16 bits matters */ | ||
85 | } __packed; | ||
86 | |||
87 | struct dw_spi; | ||
88 | struct dw_spi_dma_ops { | ||
89 | int (*dma_init)(struct dw_spi *dws); | ||
90 | void (*dma_exit)(struct dw_spi *dws); | ||
91 | int (*dma_transfer)(struct dw_spi *dws, int cs_change); | ||
92 | }; | ||
93 | |||
94 | struct dw_spi { | ||
95 | struct spi_master *master; | ||
96 | struct spi_device *cur_dev; | ||
97 | struct device *parent_dev; | ||
98 | enum dw_ssi_type type; | ||
99 | |||
100 | void __iomem *regs; | ||
101 | unsigned long paddr; | ||
102 | u32 iolen; | ||
103 | int irq; | ||
104 | u32 fifo_len; /* depth of the FIFO buffer */ | ||
105 | u32 max_freq; /* max bus freq supported */ | ||
106 | |||
107 | u16 bus_num; | ||
108 | u16 num_cs; /* supported slave numbers */ | ||
109 | |||
110 | /* Driver message queue */ | ||
111 | struct workqueue_struct *workqueue; | ||
112 | struct work_struct pump_messages; | ||
113 | spinlock_t lock; | ||
114 | struct list_head queue; | ||
115 | int busy; | ||
116 | int run; | ||
117 | |||
118 | /* Message Transfer pump */ | ||
119 | struct tasklet_struct pump_transfers; | ||
120 | |||
121 | /* Current message transfer state info */ | ||
122 | struct spi_message *cur_msg; | ||
123 | struct spi_transfer *cur_transfer; | ||
124 | struct chip_data *cur_chip; | ||
125 | struct chip_data *prev_chip; | ||
126 | size_t len; | ||
127 | void *tx; | ||
128 | void *tx_end; | ||
129 | void *rx; | ||
130 | void *rx_end; | ||
131 | int dma_mapped; | ||
132 | dma_addr_t rx_dma; | ||
133 | dma_addr_t tx_dma; | ||
134 | size_t rx_map_len; | ||
135 | size_t tx_map_len; | ||
136 | u8 n_bytes; /* current is a 1/2 bytes op */ | ||
137 | u8 max_bits_per_word; /* maxim is 16b */ | ||
138 | u32 dma_width; | ||
139 | int cs_change; | ||
140 | irqreturn_t (*transfer_handler)(struct dw_spi *dws); | ||
141 | void (*cs_control)(u32 command); | ||
142 | |||
143 | /* Dma info */ | ||
144 | int dma_inited; | ||
145 | struct dma_chan *txchan; | ||
146 | struct scatterlist tx_sgl; | ||
147 | struct dma_chan *rxchan; | ||
148 | struct scatterlist rx_sgl; | ||
149 | int dma_chan_done; | ||
150 | struct device *dma_dev; | ||
151 | dma_addr_t dma_addr; /* phy address of the Data register */ | ||
152 | struct dw_spi_dma_ops *dma_ops; | ||
153 | void *dma_priv; /* platform relate info */ | ||
154 | struct pci_dev *dmac; | ||
155 | |||
156 | /* Bus interface info */ | ||
157 | void *priv; | ||
158 | #ifdef CONFIG_DEBUG_FS | ||
159 | struct dentry *debugfs; | ||
160 | #endif | ||
161 | }; | ||
162 | |||
163 | #define dw_readl(dw, name) \ | ||
164 | __raw_readl(&(((struct dw_spi_reg *)dw->regs)->name)) | ||
165 | #define dw_writel(dw, name, val) \ | ||
166 | __raw_writel((val), &(((struct dw_spi_reg *)dw->regs)->name)) | ||
167 | #define dw_readw(dw, name) \ | ||
168 | __raw_readw(&(((struct dw_spi_reg *)dw->regs)->name)) | ||
169 | #define dw_writew(dw, name, val) \ | ||
170 | __raw_writew((val), &(((struct dw_spi_reg *)dw->regs)->name)) | ||
171 | |||
172 | static inline void spi_enable_chip(struct dw_spi *dws, int enable) | ||
173 | { | ||
174 | dw_writel(dws, ssienr, (enable ? 1 : 0)); | ||
175 | } | ||
176 | |||
177 | static inline void spi_set_clk(struct dw_spi *dws, u16 div) | ||
178 | { | ||
179 | dw_writel(dws, baudr, div); | ||
180 | } | ||
181 | |||
182 | static inline void spi_chip_sel(struct dw_spi *dws, u16 cs) | ||
183 | { | ||
184 | if (cs > dws->num_cs) | ||
185 | return; | ||
186 | |||
187 | if (dws->cs_control) | ||
188 | dws->cs_control(1); | ||
189 | |||
190 | dw_writel(dws, ser, 1 << cs); | ||
191 | } | ||
192 | |||
193 | /* Disable IRQ bits */ | ||
194 | static inline void spi_mask_intr(struct dw_spi *dws, u32 mask) | ||
195 | { | ||
196 | u32 new_mask; | ||
197 | |||
198 | new_mask = dw_readl(dws, imr) & ~mask; | ||
199 | dw_writel(dws, imr, new_mask); | ||
200 | } | ||
201 | |||
202 | /* Enable IRQ bits */ | ||
203 | static inline void spi_umask_intr(struct dw_spi *dws, u32 mask) | ||
204 | { | ||
205 | u32 new_mask; | ||
206 | |||
207 | new_mask = dw_readl(dws, imr) | mask; | ||
208 | dw_writel(dws, imr, new_mask); | ||
209 | } | ||
210 | |||
211 | /* | ||
212 | * Each SPI slave device to work with dw_api controller should | ||
213 | * has such a structure claiming its working mode (PIO/DMA etc), | ||
214 | * which can be save in the "controller_data" member of the | ||
215 | * struct spi_device | ||
216 | */ | ||
217 | struct dw_spi_chip { | ||
218 | u8 poll_mode; /* 0 for contoller polling mode */ | ||
219 | u8 type; /* SPI/SSP/Micrwire */ | ||
220 | u8 enable_dma; | ||
221 | void (*cs_control)(u32 command); | ||
222 | }; | ||
223 | |||
224 | extern int dw_spi_add_host(struct dw_spi *dws); | ||
225 | extern void dw_spi_remove_host(struct dw_spi *dws); | ||
226 | extern int dw_spi_suspend_host(struct dw_spi *dws); | ||
227 | extern int dw_spi_resume_host(struct dw_spi *dws); | ||
228 | extern void dw_spi_xfer_done(struct dw_spi *dws); | ||
229 | |||
230 | /* platform related setup */ | ||
231 | extern int dw_spi_mid_init(struct dw_spi *dws); /* Intel MID platforms */ | ||
232 | #endif /* DW_SPI_HEADER_H */ | ||
diff --git a/drivers/spi/dw_spi_mid.c b/drivers/spi/dw_spi_mid.c new file mode 100644 index 000000000000..489178243d88 --- /dev/null +++ b/drivers/spi/dw_spi_mid.c | |||
@@ -0,0 +1,224 @@ | |||
1 | /* | ||
2 | * dw_spi_mid.c - special handling for DW core on Intel MID platform | ||
3 | * | ||
4 | * Copyright (c) 2009, Intel Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License along | ||
16 | * with this program; if not, write to the Free Software Foundation, | ||
17 | * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | */ | ||
19 | |||
20 | #include <linux/dma-mapping.h> | ||
21 | #include <linux/dmaengine.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/spi/spi.h> | ||
25 | |||
26 | #include "dw_spi.h" | ||
27 | |||
28 | #ifdef CONFIG_SPI_DW_MID_DMA | ||
29 | #include <linux/intel_mid_dma.h> | ||
30 | #include <linux/pci.h> | ||
31 | |||
32 | struct mid_dma { | ||
33 | struct intel_mid_dma_slave dmas_tx; | ||
34 | struct intel_mid_dma_slave dmas_rx; | ||
35 | }; | ||
36 | |||
37 | static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param) | ||
38 | { | ||
39 | struct dw_spi *dws = param; | ||
40 | |||
41 | return dws->dmac && (&dws->dmac->dev == chan->device->dev); | ||
42 | } | ||
43 | |||
44 | static int mid_spi_dma_init(struct dw_spi *dws) | ||
45 | { | ||
46 | struct mid_dma *dw_dma = dws->dma_priv; | ||
47 | struct intel_mid_dma_slave *rxs, *txs; | ||
48 | dma_cap_mask_t mask; | ||
49 | |||
50 | /* | ||
51 | * Get pci device for DMA controller, currently it could only | ||
52 | * be the DMA controller of either Moorestown or Medfield | ||
53 | */ | ||
54 | dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0813, NULL); | ||
55 | if (!dws->dmac) | ||
56 | dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); | ||
57 | |||
58 | dma_cap_zero(mask); | ||
59 | dma_cap_set(DMA_SLAVE, mask); | ||
60 | |||
61 | /* 1. Init rx channel */ | ||
62 | dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws); | ||
63 | if (!dws->rxchan) | ||
64 | goto err_exit; | ||
65 | rxs = &dw_dma->dmas_rx; | ||
66 | rxs->hs_mode = LNW_DMA_HW_HS; | ||
67 | rxs->cfg_mode = LNW_DMA_PER_TO_MEM; | ||
68 | dws->rxchan->private = rxs; | ||
69 | |||
70 | /* 2. Init tx channel */ | ||
71 | dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws); | ||
72 | if (!dws->txchan) | ||
73 | goto free_rxchan; | ||
74 | txs = &dw_dma->dmas_tx; | ||
75 | txs->hs_mode = LNW_DMA_HW_HS; | ||
76 | txs->cfg_mode = LNW_DMA_MEM_TO_PER; | ||
77 | dws->txchan->private = txs; | ||
78 | |||
79 | dws->dma_inited = 1; | ||
80 | return 0; | ||
81 | |||
82 | free_rxchan: | ||
83 | dma_release_channel(dws->rxchan); | ||
84 | err_exit: | ||
85 | return -1; | ||
86 | |||
87 | } | ||
88 | |||
89 | static void mid_spi_dma_exit(struct dw_spi *dws) | ||
90 | { | ||
91 | dma_release_channel(dws->txchan); | ||
92 | dma_release_channel(dws->rxchan); | ||
93 | } | ||
94 | |||
95 | /* | ||
96 | * dws->dma_chan_done is cleared before the dma transfer starts, | ||
97 | * callback for rx/tx channel will each increment it by 1. | ||
98 | * Reaching 2 means the whole spi transaction is done. | ||
99 | */ | ||
100 | static void dw_spi_dma_done(void *arg) | ||
101 | { | ||
102 | struct dw_spi *dws = arg; | ||
103 | |||
104 | if (++dws->dma_chan_done != 2) | ||
105 | return; | ||
106 | dw_spi_xfer_done(dws); | ||
107 | } | ||
108 | |||
109 | static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change) | ||
110 | { | ||
111 | struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL; | ||
112 | struct dma_chan *txchan, *rxchan; | ||
113 | struct dma_slave_config txconf, rxconf; | ||
114 | u16 dma_ctrl = 0; | ||
115 | |||
116 | /* 1. setup DMA related registers */ | ||
117 | if (cs_change) { | ||
118 | spi_enable_chip(dws, 0); | ||
119 | dw_writew(dws, dmardlr, 0xf); | ||
120 | dw_writew(dws, dmatdlr, 0x10); | ||
121 | if (dws->tx_dma) | ||
122 | dma_ctrl |= 0x2; | ||
123 | if (dws->rx_dma) | ||
124 | dma_ctrl |= 0x1; | ||
125 | dw_writew(dws, dmacr, dma_ctrl); | ||
126 | spi_enable_chip(dws, 1); | ||
127 | } | ||
128 | |||
129 | dws->dma_chan_done = 0; | ||
130 | txchan = dws->txchan; | ||
131 | rxchan = dws->rxchan; | ||
132 | |||
133 | /* 2. Prepare the TX dma transfer */ | ||
134 | txconf.direction = DMA_TO_DEVICE; | ||
135 | txconf.dst_addr = dws->dma_addr; | ||
136 | txconf.dst_maxburst = LNW_DMA_MSIZE_16; | ||
137 | txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
138 | txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
139 | |||
140 | txchan->device->device_control(txchan, DMA_SLAVE_CONFIG, | ||
141 | (unsigned long) &txconf); | ||
142 | |||
143 | memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl)); | ||
144 | dws->tx_sgl.dma_address = dws->tx_dma; | ||
145 | dws->tx_sgl.length = dws->len; | ||
146 | |||
147 | txdesc = txchan->device->device_prep_slave_sg(txchan, | ||
148 | &dws->tx_sgl, | ||
149 | 1, | ||
150 | DMA_TO_DEVICE, | ||
151 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); | ||
152 | txdesc->callback = dw_spi_dma_done; | ||
153 | txdesc->callback_param = dws; | ||
154 | |||
155 | /* 3. Prepare the RX dma transfer */ | ||
156 | rxconf.direction = DMA_FROM_DEVICE; | ||
157 | rxconf.src_addr = dws->dma_addr; | ||
158 | rxconf.src_maxburst = LNW_DMA_MSIZE_16; | ||
159 | rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
160 | rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
161 | |||
162 | rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG, | ||
163 | (unsigned long) &rxconf); | ||
164 | |||
165 | memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl)); | ||
166 | dws->rx_sgl.dma_address = dws->rx_dma; | ||
167 | dws->rx_sgl.length = dws->len; | ||
168 | |||
169 | rxdesc = rxchan->device->device_prep_slave_sg(rxchan, | ||
170 | &dws->rx_sgl, | ||
171 | 1, | ||
172 | DMA_FROM_DEVICE, | ||
173 | DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP); | ||
174 | rxdesc->callback = dw_spi_dma_done; | ||
175 | rxdesc->callback_param = dws; | ||
176 | |||
177 | /* rx must be started before tx due to spi instinct */ | ||
178 | rxdesc->tx_submit(rxdesc); | ||
179 | txdesc->tx_submit(txdesc); | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | static struct dw_spi_dma_ops mid_dma_ops = { | ||
184 | .dma_init = mid_spi_dma_init, | ||
185 | .dma_exit = mid_spi_dma_exit, | ||
186 | .dma_transfer = mid_spi_dma_transfer, | ||
187 | }; | ||
188 | #endif | ||
189 | |||
190 | /* Some specific info for SPI0 controller on Moorestown */ | ||
191 | |||
192 | /* HW info for MRST CLk Control Unit, one 32b reg */ | ||
193 | #define MRST_SPI_CLK_BASE 100000000 /* 100m */ | ||
194 | #define MRST_CLK_SPI0_REG 0xff11d86c | ||
195 | #define CLK_SPI_BDIV_OFFSET 0 | ||
196 | #define CLK_SPI_BDIV_MASK 0x00000007 | ||
197 | #define CLK_SPI_CDIV_OFFSET 9 | ||
198 | #define CLK_SPI_CDIV_MASK 0x00000e00 | ||
199 | #define CLK_SPI_DISABLE_OFFSET 8 | ||
200 | |||
201 | int dw_spi_mid_init(struct dw_spi *dws) | ||
202 | { | ||
203 | u32 *clk_reg, clk_cdiv; | ||
204 | |||
205 | clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16); | ||
206 | if (!clk_reg) | ||
207 | return -ENOMEM; | ||
208 | |||
209 | /* get SPI controller operating freq info */ | ||
210 | clk_cdiv = (readl(clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET; | ||
211 | dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1); | ||
212 | iounmap(clk_reg); | ||
213 | |||
214 | dws->num_cs = 16; | ||
215 | dws->fifo_len = 40; /* FIFO has 40 words buffer */ | ||
216 | |||
217 | #ifdef CONFIG_SPI_DW_MID_DMA | ||
218 | dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL); | ||
219 | if (!dws->dma_priv) | ||
220 | return -ENOMEM; | ||
221 | dws->dma_ops = &mid_dma_ops; | ||
222 | #endif | ||
223 | return 0; | ||
224 | } | ||
diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/dw_spi_mmio.c index db35bd9c1b24..e0e813dad150 100644 --- a/drivers/spi/dw_spi_mmio.c +++ b/drivers/spi/dw_spi_mmio.c | |||
@@ -9,11 +9,14 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/clk.h> | 11 | #include <linux/clk.h> |
12 | #include <linux/err.h> | ||
12 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
13 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
14 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
15 | #include <linux/spi/dw_spi.h> | ||
16 | #include <linux/spi/spi.h> | 16 | #include <linux/spi/spi.h> |
17 | #include <linux/scatterlist.h> | ||
18 | |||
19 | #include "dw_spi.h" | ||
17 | 20 | ||
18 | #define DRIVER_NAME "dw_spi_mmio" | 21 | #define DRIVER_NAME "dw_spi_mmio" |
19 | 22 | ||
@@ -68,8 +71,8 @@ static int __devinit dw_spi_mmio_probe(struct platform_device *pdev) | |||
68 | } | 71 | } |
69 | 72 | ||
70 | dwsmmio->clk = clk_get(&pdev->dev, NULL); | 73 | dwsmmio->clk = clk_get(&pdev->dev, NULL); |
71 | if (!dwsmmio->clk) { | 74 | if (IS_ERR(dwsmmio->clk)) { |
72 | ret = -ENODEV; | 75 | ret = PTR_ERR(dwsmmio->clk); |
73 | goto err_irq; | 76 | goto err_irq; |
74 | } | 77 | } |
75 | clk_enable(dwsmmio->clk); | 78 | clk_enable(dwsmmio->clk); |
diff --git a/drivers/spi/dw_spi_pci.c b/drivers/spi/dw_spi_pci.c index 1f52755dc878..ad260aa5e526 100644 --- a/drivers/spi/dw_spi_pci.c +++ b/drivers/spi/dw_spi_pci.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * mrst_spi_pci.c - PCI interface driver for DW SPI Core | 2 | * dw_spi_pci.c - PCI interface driver for DW SPI Core |
3 | * | 3 | * |
4 | * Copyright (c) 2009, Intel Corporation. | 4 | * Copyright (c) 2009, Intel Corporation. |
5 | * | 5 | * |
@@ -20,14 +20,15 @@ | |||
20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
21 | #include <linux/pci.h> | 21 | #include <linux/pci.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/spi/dw_spi.h> | ||
24 | #include <linux/spi/spi.h> | 23 | #include <linux/spi/spi.h> |
25 | 24 | ||
25 | #include "dw_spi.h" | ||
26 | |||
26 | #define DRIVER_NAME "dw_spi_pci" | 27 | #define DRIVER_NAME "dw_spi_pci" |
27 | 28 | ||
28 | struct dw_spi_pci { | 29 | struct dw_spi_pci { |
29 | struct pci_dev *pdev; | 30 | struct pci_dev *pdev; |
30 | struct dw_spi dws; | 31 | struct dw_spi dws; |
31 | }; | 32 | }; |
32 | 33 | ||
33 | static int __devinit spi_pci_probe(struct pci_dev *pdev, | 34 | static int __devinit spi_pci_probe(struct pci_dev *pdev, |
@@ -72,9 +73,17 @@ static int __devinit spi_pci_probe(struct pci_dev *pdev, | |||
72 | dws->parent_dev = &pdev->dev; | 73 | dws->parent_dev = &pdev->dev; |
73 | dws->bus_num = 0; | 74 | dws->bus_num = 0; |
74 | dws->num_cs = 4; | 75 | dws->num_cs = 4; |
75 | dws->max_freq = 25000000; /* for Moorestwon */ | ||
76 | dws->irq = pdev->irq; | 76 | dws->irq = pdev->irq; |
77 | dws->fifo_len = 40; /* FIFO has 40 words buffer */ | 77 | |
78 | /* | ||
79 | * Specific handling for Intel MID paltforms, like dma setup, | ||
80 | * clock rate, FIFO depth. | ||
81 | */ | ||
82 | if (pdev->device == 0x0800) { | ||
83 | ret = dw_spi_mid_init(dws); | ||
84 | if (ret) | ||
85 | goto err_unmap; | ||
86 | } | ||
78 | 87 | ||
79 | ret = dw_spi_add_host(dws); | 88 | ret = dw_spi_add_host(dws); |
80 | if (ret) | 89 | if (ret) |
@@ -140,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev) | |||
140 | #endif | 149 | #endif |
141 | 150 | ||
142 | static const struct pci_device_id pci_ids[] __devinitdata = { | 151 | static const struct pci_device_id pci_ids[] __devinitdata = { |
143 | /* Intel Moorestown platform SPI controller 0 */ | 152 | /* Intel MID platform SPI controller 0 */ |
144 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) }, | 153 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) }, |
145 | {}, | 154 | {}, |
146 | }; | 155 | }; |
diff --git a/drivers/spi/ep93xx_spi.c b/drivers/spi/ep93xx_spi.c index 0ba35df9a6df..d3570071e98f 100644 --- a/drivers/spi/ep93xx_spi.c +++ b/drivers/spi/ep93xx_spi.c | |||
@@ -512,7 +512,7 @@ static int ep93xx_spi_read_write(struct ep93xx_spi *espi) | |||
512 | * | 512 | * |
513 | * This function processes one SPI transfer given in @t. Function waits until | 513 | * This function processes one SPI transfer given in @t. Function waits until |
514 | * transfer is complete (may sleep) and updates @msg->status based on whether | 514 | * transfer is complete (may sleep) and updates @msg->status based on whether |
515 | * transfer was succesfully processed or not. | 515 | * transfer was successfully processed or not. |
516 | */ | 516 | */ |
517 | static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, | 517 | static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, |
518 | struct spi_message *msg, | 518 | struct spi_message *msg, |
diff --git a/drivers/spi/mpc512x_psc_spi.c b/drivers/spi/mpc512x_psc_spi.c index 77d9e7ee8b27..6a5b4238fb6b 100644 --- a/drivers/spi/mpc512x_psc_spi.c +++ b/drivers/spi/mpc512x_psc_spi.c | |||
@@ -507,8 +507,7 @@ static int __devexit mpc512x_psc_spi_do_remove(struct device *dev) | |||
507 | return 0; | 507 | return 0; |
508 | } | 508 | } |
509 | 509 | ||
510 | static int __devinit mpc512x_psc_spi_of_probe(struct platform_device *op, | 510 | static int __devinit mpc512x_psc_spi_of_probe(struct platform_device *op) |
511 | const struct of_device_id *match) | ||
512 | { | 511 | { |
513 | const u32 *regaddr_p; | 512 | const u32 *regaddr_p; |
514 | u64 regaddr64, size64; | 513 | u64 regaddr64, size64; |
@@ -551,7 +550,7 @@ static struct of_device_id mpc512x_psc_spi_of_match[] = { | |||
551 | 550 | ||
552 | MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match); | 551 | MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match); |
553 | 552 | ||
554 | static struct of_platform_driver mpc512x_psc_spi_of_driver = { | 553 | static struct platform_driver mpc512x_psc_spi_of_driver = { |
555 | .probe = mpc512x_psc_spi_of_probe, | 554 | .probe = mpc512x_psc_spi_of_probe, |
556 | .remove = __devexit_p(mpc512x_psc_spi_of_remove), | 555 | .remove = __devexit_p(mpc512x_psc_spi_of_remove), |
557 | .driver = { | 556 | .driver = { |
@@ -563,13 +562,13 @@ static struct of_platform_driver mpc512x_psc_spi_of_driver = { | |||
563 | 562 | ||
564 | static int __init mpc512x_psc_spi_init(void) | 563 | static int __init mpc512x_psc_spi_init(void) |
565 | { | 564 | { |
566 | return of_register_platform_driver(&mpc512x_psc_spi_of_driver); | 565 | return platform_driver_register(&mpc512x_psc_spi_of_driver); |
567 | } | 566 | } |
568 | module_init(mpc512x_psc_spi_init); | 567 | module_init(mpc512x_psc_spi_init); |
569 | 568 | ||
570 | static void __exit mpc512x_psc_spi_exit(void) | 569 | static void __exit mpc512x_psc_spi_exit(void) |
571 | { | 570 | { |
572 | of_unregister_platform_driver(&mpc512x_psc_spi_of_driver); | 571 | platform_driver_unregister(&mpc512x_psc_spi_of_driver); |
573 | } | 572 | } |
574 | module_exit(mpc512x_psc_spi_exit); | 573 | module_exit(mpc512x_psc_spi_exit); |
575 | 574 | ||
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c index 983fbbfce76e..e30baf0852ac 100644 --- a/drivers/spi/mpc52xx_psc_spi.c +++ b/drivers/spi/mpc52xx_psc_spi.c | |||
@@ -363,7 +363,7 @@ static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id) | |||
363 | } | 363 | } |
364 | 364 | ||
365 | /* bus_num is used only for the case dev->platform_data == NULL */ | 365 | /* bus_num is used only for the case dev->platform_data == NULL */ |
366 | static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, | 366 | static int __devinit mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, |
367 | u32 size, unsigned int irq, s16 bus_num) | 367 | u32 size, unsigned int irq, s16 bus_num) |
368 | { | 368 | { |
369 | struct fsl_spi_platform_data *pdata = dev->platform_data; | 369 | struct fsl_spi_platform_data *pdata = dev->platform_data; |
@@ -450,23 +450,7 @@ free_master: | |||
450 | return ret; | 450 | return ret; |
451 | } | 451 | } |
452 | 452 | ||
453 | static int __exit mpc52xx_psc_spi_do_remove(struct device *dev) | 453 | static int __devinit mpc52xx_psc_spi_of_probe(struct platform_device *op) |
454 | { | ||
455 | struct spi_master *master = dev_get_drvdata(dev); | ||
456 | struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master); | ||
457 | |||
458 | flush_workqueue(mps->workqueue); | ||
459 | destroy_workqueue(mps->workqueue); | ||
460 | spi_unregister_master(master); | ||
461 | free_irq(mps->irq, mps); | ||
462 | if (mps->psc) | ||
463 | iounmap(mps->psc); | ||
464 | |||
465 | return 0; | ||
466 | } | ||
467 | |||
468 | static int __init mpc52xx_psc_spi_of_probe(struct platform_device *op, | ||
469 | const struct of_device_id *match) | ||
470 | { | 454 | { |
471 | const u32 *regaddr_p; | 455 | const u32 *regaddr_p; |
472 | u64 regaddr64, size64; | 456 | u64 regaddr64, size64; |
@@ -495,9 +479,19 @@ static int __init mpc52xx_psc_spi_of_probe(struct platform_device *op, | |||
495 | irq_of_parse_and_map(op->dev.of_node, 0), id); | 479 | irq_of_parse_and_map(op->dev.of_node, 0), id); |
496 | } | 480 | } |
497 | 481 | ||
498 | static int __exit mpc52xx_psc_spi_of_remove(struct platform_device *op) | 482 | static int __devexit mpc52xx_psc_spi_of_remove(struct platform_device *op) |
499 | { | 483 | { |
500 | return mpc52xx_psc_spi_do_remove(&op->dev); | 484 | struct spi_master *master = dev_get_drvdata(&op->dev); |
485 | struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master); | ||
486 | |||
487 | flush_workqueue(mps->workqueue); | ||
488 | destroy_workqueue(mps->workqueue); | ||
489 | spi_unregister_master(master); | ||
490 | free_irq(mps->irq, mps); | ||
491 | if (mps->psc) | ||
492 | iounmap(mps->psc); | ||
493 | |||
494 | return 0; | ||
501 | } | 495 | } |
502 | 496 | ||
503 | static const struct of_device_id mpc52xx_psc_spi_of_match[] = { | 497 | static const struct of_device_id mpc52xx_psc_spi_of_match[] = { |
@@ -508,9 +502,9 @@ static const struct of_device_id mpc52xx_psc_spi_of_match[] = { | |||
508 | 502 | ||
509 | MODULE_DEVICE_TABLE(of, mpc52xx_psc_spi_of_match); | 503 | MODULE_DEVICE_TABLE(of, mpc52xx_psc_spi_of_match); |
510 | 504 | ||
511 | static struct of_platform_driver mpc52xx_psc_spi_of_driver = { | 505 | static struct platform_driver mpc52xx_psc_spi_of_driver = { |
512 | .probe = mpc52xx_psc_spi_of_probe, | 506 | .probe = mpc52xx_psc_spi_of_probe, |
513 | .remove = __exit_p(mpc52xx_psc_spi_of_remove), | 507 | .remove = __devexit_p(mpc52xx_psc_spi_of_remove), |
514 | .driver = { | 508 | .driver = { |
515 | .name = "mpc52xx-psc-spi", | 509 | .name = "mpc52xx-psc-spi", |
516 | .owner = THIS_MODULE, | 510 | .owner = THIS_MODULE, |
@@ -520,13 +514,13 @@ static struct of_platform_driver mpc52xx_psc_spi_of_driver = { | |||
520 | 514 | ||
521 | static int __init mpc52xx_psc_spi_init(void) | 515 | static int __init mpc52xx_psc_spi_init(void) |
522 | { | 516 | { |
523 | return of_register_platform_driver(&mpc52xx_psc_spi_of_driver); | 517 | return platform_driver_register(&mpc52xx_psc_spi_of_driver); |
524 | } | 518 | } |
525 | module_init(mpc52xx_psc_spi_init); | 519 | module_init(mpc52xx_psc_spi_init); |
526 | 520 | ||
527 | static void __exit mpc52xx_psc_spi_exit(void) | 521 | static void __exit mpc52xx_psc_spi_exit(void) |
528 | { | 522 | { |
529 | of_unregister_platform_driver(&mpc52xx_psc_spi_of_driver); | 523 | platform_driver_unregister(&mpc52xx_psc_spi_of_driver); |
530 | } | 524 | } |
531 | module_exit(mpc52xx_psc_spi_exit); | 525 | module_exit(mpc52xx_psc_spi_exit); |
532 | 526 | ||
diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c index ec9f0b1bf864..015a974bed72 100644 --- a/drivers/spi/mpc52xx_spi.c +++ b/drivers/spi/mpc52xx_spi.c | |||
@@ -390,8 +390,7 @@ static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m) | |||
390 | /* | 390 | /* |
391 | * OF Platform Bus Binding | 391 | * OF Platform Bus Binding |
392 | */ | 392 | */ |
393 | static int __devinit mpc52xx_spi_probe(struct platform_device *op, | 393 | static int __devinit mpc52xx_spi_probe(struct platform_device *op) |
394 | const struct of_device_id *match) | ||
395 | { | 394 | { |
396 | struct spi_master *master; | 395 | struct spi_master *master; |
397 | struct mpc52xx_spi *ms; | 396 | struct mpc52xx_spi *ms; |
@@ -556,25 +555,25 @@ static const struct of_device_id mpc52xx_spi_match[] __devinitconst = { | |||
556 | }; | 555 | }; |
557 | MODULE_DEVICE_TABLE(of, mpc52xx_spi_match); | 556 | MODULE_DEVICE_TABLE(of, mpc52xx_spi_match); |
558 | 557 | ||
559 | static struct of_platform_driver mpc52xx_spi_of_driver = { | 558 | static struct platform_driver mpc52xx_spi_of_driver = { |
560 | .driver = { | 559 | .driver = { |
561 | .name = "mpc52xx-spi", | 560 | .name = "mpc52xx-spi", |
562 | .owner = THIS_MODULE, | 561 | .owner = THIS_MODULE, |
563 | .of_match_table = mpc52xx_spi_match, | 562 | .of_match_table = mpc52xx_spi_match, |
564 | }, | 563 | }, |
565 | .probe = mpc52xx_spi_probe, | 564 | .probe = mpc52xx_spi_probe, |
566 | .remove = __exit_p(mpc52xx_spi_remove), | 565 | .remove = __devexit_p(mpc52xx_spi_remove), |
567 | }; | 566 | }; |
568 | 567 | ||
569 | static int __init mpc52xx_spi_init(void) | 568 | static int __init mpc52xx_spi_init(void) |
570 | { | 569 | { |
571 | return of_register_platform_driver(&mpc52xx_spi_of_driver); | 570 | return platform_driver_register(&mpc52xx_spi_of_driver); |
572 | } | 571 | } |
573 | module_init(mpc52xx_spi_init); | 572 | module_init(mpc52xx_spi_init); |
574 | 573 | ||
575 | static void __exit mpc52xx_spi_exit(void) | 574 | static void __exit mpc52xx_spi_exit(void) |
576 | { | 575 | { |
577 | of_unregister_platform_driver(&mpc52xx_spi_of_driver); | 576 | platform_driver_unregister(&mpc52xx_spi_of_driver); |
578 | } | 577 | } |
579 | module_exit(mpc52xx_spi_exit); | 578 | module_exit(mpc52xx_spi_exit); |
580 | 579 | ||
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c index b3a94ca0a75a..969cdd2fe124 100644 --- a/drivers/spi/omap2_mcspi.c +++ b/drivers/spi/omap2_mcspi.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2005, 2006 Nokia Corporation | 4 | * Copyright (C) 2005, 2006 Nokia Corporation |
5 | * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and | 5 | * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and |
6 | * Juha Yrjölä <juha.yrjola@nokia.com> | 6 | * Juha Yrj�l� <juha.yrjola@nokia.com> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by | 9 | * it under the terms of the GNU General Public License as published by |
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/clk.h> | 33 | #include <linux/clk.h> |
34 | #include <linux/io.h> | 34 | #include <linux/io.h> |
35 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
36 | #include <linux/pm_runtime.h> | ||
36 | 37 | ||
37 | #include <linux/spi/spi.h> | 38 | #include <linux/spi/spi.h> |
38 | 39 | ||
@@ -46,7 +47,6 @@ | |||
46 | #define OMAP2_MCSPI_MAX_CTRL 4 | 47 | #define OMAP2_MCSPI_MAX_CTRL 4 |
47 | 48 | ||
48 | #define OMAP2_MCSPI_REVISION 0x00 | 49 | #define OMAP2_MCSPI_REVISION 0x00 |
49 | #define OMAP2_MCSPI_SYSCONFIG 0x10 | ||
50 | #define OMAP2_MCSPI_SYSSTATUS 0x14 | 50 | #define OMAP2_MCSPI_SYSSTATUS 0x14 |
51 | #define OMAP2_MCSPI_IRQSTATUS 0x18 | 51 | #define OMAP2_MCSPI_IRQSTATUS 0x18 |
52 | #define OMAP2_MCSPI_IRQENABLE 0x1c | 52 | #define OMAP2_MCSPI_IRQENABLE 0x1c |
@@ -63,13 +63,6 @@ | |||
63 | 63 | ||
64 | /* per-register bitmasks: */ | 64 | /* per-register bitmasks: */ |
65 | 65 | ||
66 | #define OMAP2_MCSPI_SYSCONFIG_SMARTIDLE BIT(4) | ||
67 | #define OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP BIT(2) | ||
68 | #define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE BIT(0) | ||
69 | #define OMAP2_MCSPI_SYSCONFIG_SOFTRESET BIT(1) | ||
70 | |||
71 | #define OMAP2_MCSPI_SYSSTATUS_RESETDONE BIT(0) | ||
72 | |||
73 | #define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0) | 66 | #define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0) |
74 | #define OMAP2_MCSPI_MODULCTRL_MS BIT(2) | 67 | #define OMAP2_MCSPI_MODULCTRL_MS BIT(2) |
75 | #define OMAP2_MCSPI_MODULCTRL_STEST BIT(3) | 68 | #define OMAP2_MCSPI_MODULCTRL_STEST BIT(3) |
@@ -122,13 +115,12 @@ struct omap2_mcspi { | |||
122 | spinlock_t lock; | 115 | spinlock_t lock; |
123 | struct list_head msg_queue; | 116 | struct list_head msg_queue; |
124 | struct spi_master *master; | 117 | struct spi_master *master; |
125 | struct clk *ick; | ||
126 | struct clk *fck; | ||
127 | /* Virtual base address of the controller */ | 118 | /* Virtual base address of the controller */ |
128 | void __iomem *base; | 119 | void __iomem *base; |
129 | unsigned long phys; | 120 | unsigned long phys; |
130 | /* SPI1 has 4 channels, while SPI2 has 2 */ | 121 | /* SPI1 has 4 channels, while SPI2 has 2 */ |
131 | struct omap2_mcspi_dma *dma_channels; | 122 | struct omap2_mcspi_dma *dma_channels; |
123 | struct device *dev; | ||
132 | }; | 124 | }; |
133 | 125 | ||
134 | struct omap2_mcspi_cs { | 126 | struct omap2_mcspi_cs { |
@@ -144,7 +136,6 @@ struct omap2_mcspi_cs { | |||
144 | * corresponding registers are modified. | 136 | * corresponding registers are modified. |
145 | */ | 137 | */ |
146 | struct omap2_mcspi_regs { | 138 | struct omap2_mcspi_regs { |
147 | u32 sysconfig; | ||
148 | u32 modulctrl; | 139 | u32 modulctrl; |
149 | u32 wakeupenable; | 140 | u32 wakeupenable; |
150 | struct list_head cs; | 141 | struct list_head cs; |
@@ -268,9 +259,6 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi) | |||
268 | mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, | 259 | mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, |
269 | omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl); | 260 | omap2_mcspi_ctx[spi_cntrl->bus_num - 1].modulctrl); |
270 | 261 | ||
271 | mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_SYSCONFIG, | ||
272 | omap2_mcspi_ctx[spi_cntrl->bus_num - 1].sysconfig); | ||
273 | |||
274 | mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, | 262 | mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, |
275 | omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable); | 263 | omap2_mcspi_ctx[spi_cntrl->bus_num - 1].wakeupenable); |
276 | 264 | ||
@@ -280,19 +268,24 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi) | |||
280 | } | 268 | } |
281 | static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi) | 269 | static void omap2_mcspi_disable_clocks(struct omap2_mcspi *mcspi) |
282 | { | 270 | { |
283 | clk_disable(mcspi->ick); | 271 | pm_runtime_put_sync(mcspi->dev); |
284 | clk_disable(mcspi->fck); | ||
285 | } | 272 | } |
286 | 273 | ||
287 | static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi) | 274 | static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi) |
288 | { | 275 | { |
289 | if (clk_enable(mcspi->ick)) | 276 | return pm_runtime_get_sync(mcspi->dev); |
290 | return -ENODEV; | 277 | } |
291 | if (clk_enable(mcspi->fck)) | ||
292 | return -ENODEV; | ||
293 | 278 | ||
294 | omap2_mcspi_restore_ctx(mcspi); | 279 | static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) |
280 | { | ||
281 | unsigned long timeout; | ||
295 | 282 | ||
283 | timeout = jiffies + msecs_to_jiffies(1000); | ||
284 | while (!(__raw_readl(reg) & bit)) { | ||
285 | if (time_after(jiffies, timeout)) | ||
286 | return -1; | ||
287 | cpu_relax(); | ||
288 | } | ||
296 | return 0; | 289 | return 0; |
297 | } | 290 | } |
298 | 291 | ||
@@ -305,15 +298,18 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) | |||
305 | unsigned int count, c; | 298 | unsigned int count, c; |
306 | unsigned long base, tx_reg, rx_reg; | 299 | unsigned long base, tx_reg, rx_reg; |
307 | int word_len, data_type, element_count; | 300 | int word_len, data_type, element_count; |
308 | int elements; | 301 | int elements = 0; |
309 | u32 l; | 302 | u32 l; |
310 | u8 * rx; | 303 | u8 * rx; |
311 | const u8 * tx; | 304 | const u8 * tx; |
305 | void __iomem *chstat_reg; | ||
312 | 306 | ||
313 | mcspi = spi_master_get_devdata(spi->master); | 307 | mcspi = spi_master_get_devdata(spi->master); |
314 | mcspi_dma = &mcspi->dma_channels[spi->chip_select]; | 308 | mcspi_dma = &mcspi->dma_channels[spi->chip_select]; |
315 | l = mcspi_cached_chconf0(spi); | 309 | l = mcspi_cached_chconf0(spi); |
316 | 310 | ||
311 | chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; | ||
312 | |||
317 | count = xfer->len; | 313 | count = xfer->len; |
318 | c = count; | 314 | c = count; |
319 | word_len = cs->word_len; | 315 | word_len = cs->word_len; |
@@ -381,12 +377,22 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) | |||
381 | 377 | ||
382 | if (tx != NULL) { | 378 | if (tx != NULL) { |
383 | wait_for_completion(&mcspi_dma->dma_tx_completion); | 379 | wait_for_completion(&mcspi_dma->dma_tx_completion); |
384 | dma_unmap_single(NULL, xfer->tx_dma, count, DMA_TO_DEVICE); | 380 | dma_unmap_single(&spi->dev, xfer->tx_dma, count, DMA_TO_DEVICE); |
381 | |||
382 | /* for TX_ONLY mode, be sure all words have shifted out */ | ||
383 | if (rx == NULL) { | ||
384 | if (mcspi_wait_for_reg_bit(chstat_reg, | ||
385 | OMAP2_MCSPI_CHSTAT_TXS) < 0) | ||
386 | dev_err(&spi->dev, "TXS timed out\n"); | ||
387 | else if (mcspi_wait_for_reg_bit(chstat_reg, | ||
388 | OMAP2_MCSPI_CHSTAT_EOT) < 0) | ||
389 | dev_err(&spi->dev, "EOT timed out\n"); | ||
390 | } | ||
385 | } | 391 | } |
386 | 392 | ||
387 | if (rx != NULL) { | 393 | if (rx != NULL) { |
388 | wait_for_completion(&mcspi_dma->dma_rx_completion); | 394 | wait_for_completion(&mcspi_dma->dma_rx_completion); |
389 | dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE); | 395 | dma_unmap_single(&spi->dev, xfer->rx_dma, count, DMA_FROM_DEVICE); |
390 | omap2_mcspi_set_enable(spi, 0); | 396 | omap2_mcspi_set_enable(spi, 0); |
391 | 397 | ||
392 | if (l & OMAP2_MCSPI_CHCONF_TURBO) { | 398 | if (l & OMAP2_MCSPI_CHCONF_TURBO) { |
@@ -435,19 +441,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) | |||
435 | return count; | 441 | return count; |
436 | } | 442 | } |
437 | 443 | ||
438 | static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) | ||
439 | { | ||
440 | unsigned long timeout; | ||
441 | |||
442 | timeout = jiffies + msecs_to_jiffies(1000); | ||
443 | while (!(__raw_readl(reg) & bit)) { | ||
444 | if (time_after(jiffies, timeout)) | ||
445 | return -1; | ||
446 | cpu_relax(); | ||
447 | } | ||
448 | return 0; | ||
449 | } | ||
450 | |||
451 | static unsigned | 444 | static unsigned |
452 | omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) | 445 | omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) |
453 | { | 446 | { |
@@ -474,6 +467,9 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) | |||
474 | rx_reg = base + OMAP2_MCSPI_RX0; | 467 | rx_reg = base + OMAP2_MCSPI_RX0; |
475 | chstat_reg = base + OMAP2_MCSPI_CHSTAT0; | 468 | chstat_reg = base + OMAP2_MCSPI_CHSTAT0; |
476 | 469 | ||
470 | if (c < (word_len>>3)) | ||
471 | return 0; | ||
472 | |||
477 | if (word_len <= 8) { | 473 | if (word_len <= 8) { |
478 | u8 *rx; | 474 | u8 *rx; |
479 | const u8 *tx; | 475 | const u8 *tx; |
@@ -489,10 +485,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) | |||
489 | dev_err(&spi->dev, "TXS timed out\n"); | 485 | dev_err(&spi->dev, "TXS timed out\n"); |
490 | goto out; | 486 | goto out; |
491 | } | 487 | } |
492 | #ifdef VERBOSE | 488 | dev_vdbg(&spi->dev, "write-%d %02x\n", |
493 | dev_dbg(&spi->dev, "write-%d %02x\n", | ||
494 | word_len, *tx); | 489 | word_len, *tx); |
495 | #endif | ||
496 | __raw_writel(*tx++, tx_reg); | 490 | __raw_writel(*tx++, tx_reg); |
497 | } | 491 | } |
498 | if (rx != NULL) { | 492 | if (rx != NULL) { |
@@ -506,10 +500,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) | |||
506 | (l & OMAP2_MCSPI_CHCONF_TURBO)) { | 500 | (l & OMAP2_MCSPI_CHCONF_TURBO)) { |
507 | omap2_mcspi_set_enable(spi, 0); | 501 | omap2_mcspi_set_enable(spi, 0); |
508 | *rx++ = __raw_readl(rx_reg); | 502 | *rx++ = __raw_readl(rx_reg); |
509 | #ifdef VERBOSE | 503 | dev_vdbg(&spi->dev, "read-%d %02x\n", |
510 | dev_dbg(&spi->dev, "read-%d %02x\n", | ||
511 | word_len, *(rx - 1)); | 504 | word_len, *(rx - 1)); |
512 | #endif | ||
513 | if (mcspi_wait_for_reg_bit(chstat_reg, | 505 | if (mcspi_wait_for_reg_bit(chstat_reg, |
514 | OMAP2_MCSPI_CHSTAT_RXS) < 0) { | 506 | OMAP2_MCSPI_CHSTAT_RXS) < 0) { |
515 | dev_err(&spi->dev, | 507 | dev_err(&spi->dev, |
@@ -522,10 +514,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) | |||
522 | } | 514 | } |
523 | 515 | ||
524 | *rx++ = __raw_readl(rx_reg); | 516 | *rx++ = __raw_readl(rx_reg); |
525 | #ifdef VERBOSE | 517 | dev_vdbg(&spi->dev, "read-%d %02x\n", |
526 | dev_dbg(&spi->dev, "read-%d %02x\n", | ||
527 | word_len, *(rx - 1)); | 518 | word_len, *(rx - 1)); |
528 | #endif | ||
529 | } | 519 | } |
530 | } while (c); | 520 | } while (c); |
531 | } else if (word_len <= 16) { | 521 | } else if (word_len <= 16) { |
@@ -542,10 +532,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) | |||
542 | dev_err(&spi->dev, "TXS timed out\n"); | 532 | dev_err(&spi->dev, "TXS timed out\n"); |
543 | goto out; | 533 | goto out; |
544 | } | 534 | } |
545 | #ifdef VERBOSE | 535 | dev_vdbg(&spi->dev, "write-%d %04x\n", |
546 | dev_dbg(&spi->dev, "write-%d %04x\n", | ||
547 | word_len, *tx); | 536 | word_len, *tx); |
548 | #endif | ||
549 | __raw_writel(*tx++, tx_reg); | 537 | __raw_writel(*tx++, tx_reg); |
550 | } | 538 | } |
551 | if (rx != NULL) { | 539 | if (rx != NULL) { |
@@ -559,10 +547,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) | |||
559 | (l & OMAP2_MCSPI_CHCONF_TURBO)) { | 547 | (l & OMAP2_MCSPI_CHCONF_TURBO)) { |
560 | omap2_mcspi_set_enable(spi, 0); | 548 | omap2_mcspi_set_enable(spi, 0); |
561 | *rx++ = __raw_readl(rx_reg); | 549 | *rx++ = __raw_readl(rx_reg); |
562 | #ifdef VERBOSE | 550 | dev_vdbg(&spi->dev, "read-%d %04x\n", |
563 | dev_dbg(&spi->dev, "read-%d %04x\n", | ||
564 | word_len, *(rx - 1)); | 551 | word_len, *(rx - 1)); |
565 | #endif | ||
566 | if (mcspi_wait_for_reg_bit(chstat_reg, | 552 | if (mcspi_wait_for_reg_bit(chstat_reg, |
567 | OMAP2_MCSPI_CHSTAT_RXS) < 0) { | 553 | OMAP2_MCSPI_CHSTAT_RXS) < 0) { |
568 | dev_err(&spi->dev, | 554 | dev_err(&spi->dev, |
@@ -575,12 +561,10 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) | |||
575 | } | 561 | } |
576 | 562 | ||
577 | *rx++ = __raw_readl(rx_reg); | 563 | *rx++ = __raw_readl(rx_reg); |
578 | #ifdef VERBOSE | 564 | dev_vdbg(&spi->dev, "read-%d %04x\n", |
579 | dev_dbg(&spi->dev, "read-%d %04x\n", | ||
580 | word_len, *(rx - 1)); | 565 | word_len, *(rx - 1)); |
581 | #endif | ||
582 | } | 566 | } |
583 | } while (c); | 567 | } while (c >= 2); |
584 | } else if (word_len <= 32) { | 568 | } else if (word_len <= 32) { |
585 | u32 *rx; | 569 | u32 *rx; |
586 | const u32 *tx; | 570 | const u32 *tx; |
@@ -595,10 +579,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) | |||
595 | dev_err(&spi->dev, "TXS timed out\n"); | 579 | dev_err(&spi->dev, "TXS timed out\n"); |
596 | goto out; | 580 | goto out; |
597 | } | 581 | } |
598 | #ifdef VERBOSE | 582 | dev_vdbg(&spi->dev, "write-%d %08x\n", |
599 | dev_dbg(&spi->dev, "write-%d %08x\n", | ||
600 | word_len, *tx); | 583 | word_len, *tx); |
601 | #endif | ||
602 | __raw_writel(*tx++, tx_reg); | 584 | __raw_writel(*tx++, tx_reg); |
603 | } | 585 | } |
604 | if (rx != NULL) { | 586 | if (rx != NULL) { |
@@ -612,10 +594,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) | |||
612 | (l & OMAP2_MCSPI_CHCONF_TURBO)) { | 594 | (l & OMAP2_MCSPI_CHCONF_TURBO)) { |
613 | omap2_mcspi_set_enable(spi, 0); | 595 | omap2_mcspi_set_enable(spi, 0); |
614 | *rx++ = __raw_readl(rx_reg); | 596 | *rx++ = __raw_readl(rx_reg); |
615 | #ifdef VERBOSE | 597 | dev_vdbg(&spi->dev, "read-%d %08x\n", |
616 | dev_dbg(&spi->dev, "read-%d %08x\n", | ||
617 | word_len, *(rx - 1)); | 598 | word_len, *(rx - 1)); |
618 | #endif | ||
619 | if (mcspi_wait_for_reg_bit(chstat_reg, | 599 | if (mcspi_wait_for_reg_bit(chstat_reg, |
620 | OMAP2_MCSPI_CHSTAT_RXS) < 0) { | 600 | OMAP2_MCSPI_CHSTAT_RXS) < 0) { |
621 | dev_err(&spi->dev, | 601 | dev_err(&spi->dev, |
@@ -628,12 +608,10 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) | |||
628 | } | 608 | } |
629 | 609 | ||
630 | *rx++ = __raw_readl(rx_reg); | 610 | *rx++ = __raw_readl(rx_reg); |
631 | #ifdef VERBOSE | 611 | dev_vdbg(&spi->dev, "read-%d %08x\n", |
632 | dev_dbg(&spi->dev, "read-%d %08x\n", | ||
633 | word_len, *(rx - 1)); | 612 | word_len, *(rx - 1)); |
634 | #endif | ||
635 | } | 613 | } |
636 | } while (c); | 614 | } while (c >= 4); |
637 | } | 615 | } |
638 | 616 | ||
639 | /* for TX_ONLY mode, be sure all words have shifted out */ | 617 | /* for TX_ONLY mode, be sure all words have shifted out */ |
@@ -644,12 +622,29 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) | |||
644 | } else if (mcspi_wait_for_reg_bit(chstat_reg, | 622 | } else if (mcspi_wait_for_reg_bit(chstat_reg, |
645 | OMAP2_MCSPI_CHSTAT_EOT) < 0) | 623 | OMAP2_MCSPI_CHSTAT_EOT) < 0) |
646 | dev_err(&spi->dev, "EOT timed out\n"); | 624 | dev_err(&spi->dev, "EOT timed out\n"); |
625 | |||
626 | /* disable chan to purge rx datas received in TX_ONLY transfer, | ||
627 | * otherwise these rx datas will affect the direct following | ||
628 | * RX_ONLY transfer. | ||
629 | */ | ||
630 | omap2_mcspi_set_enable(spi, 0); | ||
647 | } | 631 | } |
648 | out: | 632 | out: |
649 | omap2_mcspi_set_enable(spi, 1); | 633 | omap2_mcspi_set_enable(spi, 1); |
650 | return count - c; | 634 | return count - c; |
651 | } | 635 | } |
652 | 636 | ||
637 | static u32 omap2_mcspi_calc_divisor(u32 speed_hz) | ||
638 | { | ||
639 | u32 div; | ||
640 | |||
641 | for (div = 0; div < 15; div++) | ||
642 | if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div)) | ||
643 | return div; | ||
644 | |||
645 | return 15; | ||
646 | } | ||
647 | |||
653 | /* called only when no transfer is active to this device */ | 648 | /* called only when no transfer is active to this device */ |
654 | static int omap2_mcspi_setup_transfer(struct spi_device *spi, | 649 | static int omap2_mcspi_setup_transfer(struct spi_device *spi, |
655 | struct spi_transfer *t) | 650 | struct spi_transfer *t) |
@@ -672,12 +667,8 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi, | |||
672 | if (t && t->speed_hz) | 667 | if (t && t->speed_hz) |
673 | speed_hz = t->speed_hz; | 668 | speed_hz = t->speed_hz; |
674 | 669 | ||
675 | if (speed_hz) { | 670 | speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ); |
676 | while (div <= 15 && (OMAP2_MCSPI_MAX_FREQ / (1 << div)) | 671 | div = omap2_mcspi_calc_divisor(speed_hz); |
677 | > speed_hz) | ||
678 | div++; | ||
679 | } else | ||
680 | div = 15; | ||
681 | 672 | ||
682 | l = mcspi_cached_chconf0(spi); | 673 | l = mcspi_cached_chconf0(spi); |
683 | 674 | ||
@@ -714,7 +705,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi, | |||
714 | mcspi_write_chconf0(spi, l); | 705 | mcspi_write_chconf0(spi, l); |
715 | 706 | ||
716 | dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n", | 707 | dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n", |
717 | OMAP2_MCSPI_MAX_FREQ / (1 << div), | 708 | OMAP2_MCSPI_MAX_FREQ >> div, |
718 | (spi->mode & SPI_CPHA) ? "trailing" : "leading", | 709 | (spi->mode & SPI_CPHA) ? "trailing" : "leading", |
719 | (spi->mode & SPI_CPOL) ? "inverted" : "normal"); | 710 | (spi->mode & SPI_CPOL) ? "inverted" : "normal"); |
720 | 711 | ||
@@ -818,8 +809,9 @@ static int omap2_mcspi_setup(struct spi_device *spi) | |||
818 | return ret; | 809 | return ret; |
819 | } | 810 | } |
820 | 811 | ||
821 | if (omap2_mcspi_enable_clocks(mcspi)) | 812 | ret = omap2_mcspi_enable_clocks(mcspi); |
822 | return -ENODEV; | 813 | if (ret < 0) |
814 | return ret; | ||
823 | 815 | ||
824 | ret = omap2_mcspi_setup_transfer(spi, NULL); | 816 | ret = omap2_mcspi_setup_transfer(spi, NULL); |
825 | omap2_mcspi_disable_clocks(mcspi); | 817 | omap2_mcspi_disable_clocks(mcspi); |
@@ -862,10 +854,11 @@ static void omap2_mcspi_work(struct work_struct *work) | |||
862 | struct omap2_mcspi *mcspi; | 854 | struct omap2_mcspi *mcspi; |
863 | 855 | ||
864 | mcspi = container_of(work, struct omap2_mcspi, work); | 856 | mcspi = container_of(work, struct omap2_mcspi, work); |
865 | spin_lock_irq(&mcspi->lock); | ||
866 | 857 | ||
867 | if (omap2_mcspi_enable_clocks(mcspi)) | 858 | if (omap2_mcspi_enable_clocks(mcspi) < 0) |
868 | goto out; | 859 | return; |
860 | |||
861 | spin_lock_irq(&mcspi->lock); | ||
869 | 862 | ||
870 | /* We only enable one channel at a time -- the one whose message is | 863 | /* We only enable one channel at a time -- the one whose message is |
871 | * at the head of the queue -- although this controller would gladly | 864 | * at the head of the queue -- although this controller would gladly |
@@ -978,10 +971,9 @@ static void omap2_mcspi_work(struct work_struct *work) | |||
978 | spin_lock_irq(&mcspi->lock); | 971 | spin_lock_irq(&mcspi->lock); |
979 | } | 972 | } |
980 | 973 | ||
981 | omap2_mcspi_disable_clocks(mcspi); | ||
982 | |||
983 | out: | ||
984 | spin_unlock_irq(&mcspi->lock); | 974 | spin_unlock_irq(&mcspi->lock); |
975 | |||
976 | omap2_mcspi_disable_clocks(mcspi); | ||
985 | } | 977 | } |
986 | 978 | ||
987 | static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) | 979 | static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) |
@@ -1014,21 +1006,16 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) | |||
1014 | t->bits_per_word); | 1006 | t->bits_per_word); |
1015 | return -EINVAL; | 1007 | return -EINVAL; |
1016 | } | 1008 | } |
1017 | if (t->speed_hz && t->speed_hz < OMAP2_MCSPI_MAX_FREQ/(1<<16)) { | 1009 | if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) { |
1018 | dev_dbg(&spi->dev, "%d Hz max exceeds %d\n", | 1010 | dev_dbg(&spi->dev, "speed_hz %d below minimum %d Hz\n", |
1019 | t->speed_hz, | 1011 | t->speed_hz, |
1020 | OMAP2_MCSPI_MAX_FREQ/(1<<16)); | 1012 | OMAP2_MCSPI_MAX_FREQ >> 15); |
1021 | return -EINVAL; | 1013 | return -EINVAL; |
1022 | } | 1014 | } |
1023 | 1015 | ||
1024 | if (m->is_dma_mapped || len < DMA_MIN_BYTES) | 1016 | if (m->is_dma_mapped || len < DMA_MIN_BYTES) |
1025 | continue; | 1017 | continue; |
1026 | 1018 | ||
1027 | /* Do DMA mapping "early" for better error reporting and | ||
1028 | * dcache use. Note that if dma_unmap_single() ever starts | ||
1029 | * to do real work on ARM, we'd need to clean up mappings | ||
1030 | * for previous transfers on *ALL* exits of this loop... | ||
1031 | */ | ||
1032 | if (tx_buf != NULL) { | 1019 | if (tx_buf != NULL) { |
1033 | t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf, | 1020 | t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf, |
1034 | len, DMA_TO_DEVICE); | 1021 | len, DMA_TO_DEVICE); |
@@ -1045,7 +1032,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) | |||
1045 | dev_dbg(&spi->dev, "dma %cX %d bytes error\n", | 1032 | dev_dbg(&spi->dev, "dma %cX %d bytes error\n", |
1046 | 'R', len); | 1033 | 'R', len); |
1047 | if (tx_buf != NULL) | 1034 | if (tx_buf != NULL) |
1048 | dma_unmap_single(NULL, t->tx_dma, | 1035 | dma_unmap_single(&spi->dev, t->tx_dma, |
1049 | len, DMA_TO_DEVICE); | 1036 | len, DMA_TO_DEVICE); |
1050 | return -EINVAL; | 1037 | return -EINVAL; |
1051 | } | 1038 | } |
@@ -1062,25 +1049,15 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m) | |||
1062 | return 0; | 1049 | return 0; |
1063 | } | 1050 | } |
1064 | 1051 | ||
1065 | static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi) | 1052 | static int __init omap2_mcspi_master_setup(struct omap2_mcspi *mcspi) |
1066 | { | 1053 | { |
1067 | struct spi_master *master = mcspi->master; | 1054 | struct spi_master *master = mcspi->master; |
1068 | u32 tmp; | 1055 | u32 tmp; |
1056 | int ret = 0; | ||
1069 | 1057 | ||
1070 | if (omap2_mcspi_enable_clocks(mcspi)) | 1058 | ret = omap2_mcspi_enable_clocks(mcspi); |
1071 | return -1; | 1059 | if (ret < 0) |
1072 | 1060 | return ret; | |
1073 | mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, | ||
1074 | OMAP2_MCSPI_SYSCONFIG_SOFTRESET); | ||
1075 | do { | ||
1076 | tmp = mcspi_read_reg(master, OMAP2_MCSPI_SYSSTATUS); | ||
1077 | } while (!(tmp & OMAP2_MCSPI_SYSSTATUS_RESETDONE)); | ||
1078 | |||
1079 | tmp = OMAP2_MCSPI_SYSCONFIG_AUTOIDLE | | ||
1080 | OMAP2_MCSPI_SYSCONFIG_ENAWAKEUP | | ||
1081 | OMAP2_MCSPI_SYSCONFIG_SMARTIDLE; | ||
1082 | mcspi_write_reg(master, OMAP2_MCSPI_SYSCONFIG, tmp); | ||
1083 | omap2_mcspi_ctx[master->bus_num - 1].sysconfig = tmp; | ||
1084 | 1061 | ||
1085 | tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN; | 1062 | tmp = OMAP2_MCSPI_WAKEUPENABLE_WKEN; |
1086 | mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp); | 1063 | mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE, tmp); |
@@ -1091,91 +1068,26 @@ static int __init omap2_mcspi_reset(struct omap2_mcspi *mcspi) | |||
1091 | return 0; | 1068 | return 0; |
1092 | } | 1069 | } |
1093 | 1070 | ||
1094 | static u8 __initdata spi1_rxdma_id [] = { | 1071 | static int omap_mcspi_runtime_resume(struct device *dev) |
1095 | OMAP24XX_DMA_SPI1_RX0, | 1072 | { |
1096 | OMAP24XX_DMA_SPI1_RX1, | 1073 | struct omap2_mcspi *mcspi; |
1097 | OMAP24XX_DMA_SPI1_RX2, | 1074 | struct spi_master *master; |
1098 | OMAP24XX_DMA_SPI1_RX3, | ||
1099 | }; | ||
1100 | |||
1101 | static u8 __initdata spi1_txdma_id [] = { | ||
1102 | OMAP24XX_DMA_SPI1_TX0, | ||
1103 | OMAP24XX_DMA_SPI1_TX1, | ||
1104 | OMAP24XX_DMA_SPI1_TX2, | ||
1105 | OMAP24XX_DMA_SPI1_TX3, | ||
1106 | }; | ||
1107 | |||
1108 | static u8 __initdata spi2_rxdma_id[] = { | ||
1109 | OMAP24XX_DMA_SPI2_RX0, | ||
1110 | OMAP24XX_DMA_SPI2_RX1, | ||
1111 | }; | ||
1112 | |||
1113 | static u8 __initdata spi2_txdma_id[] = { | ||
1114 | OMAP24XX_DMA_SPI2_TX0, | ||
1115 | OMAP24XX_DMA_SPI2_TX1, | ||
1116 | }; | ||
1117 | |||
1118 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \ | ||
1119 | || defined(CONFIG_ARCH_OMAP4) | ||
1120 | static u8 __initdata spi3_rxdma_id[] = { | ||
1121 | OMAP24XX_DMA_SPI3_RX0, | ||
1122 | OMAP24XX_DMA_SPI3_RX1, | ||
1123 | }; | ||
1124 | 1075 | ||
1125 | static u8 __initdata spi3_txdma_id[] = { | 1076 | master = dev_get_drvdata(dev); |
1126 | OMAP24XX_DMA_SPI3_TX0, | 1077 | mcspi = spi_master_get_devdata(master); |
1127 | OMAP24XX_DMA_SPI3_TX1, | 1078 | omap2_mcspi_restore_ctx(mcspi); |
1128 | }; | ||
1129 | #endif | ||
1130 | 1079 | ||
1131 | #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) | 1080 | return 0; |
1132 | static u8 __initdata spi4_rxdma_id[] = { | 1081 | } |
1133 | OMAP34XX_DMA_SPI4_RX0, | ||
1134 | }; | ||
1135 | 1082 | ||
1136 | static u8 __initdata spi4_txdma_id[] = { | ||
1137 | OMAP34XX_DMA_SPI4_TX0, | ||
1138 | }; | ||
1139 | #endif | ||
1140 | 1083 | ||
1141 | static int __init omap2_mcspi_probe(struct platform_device *pdev) | 1084 | static int __init omap2_mcspi_probe(struct platform_device *pdev) |
1142 | { | 1085 | { |
1143 | struct spi_master *master; | 1086 | struct spi_master *master; |
1087 | struct omap2_mcspi_platform_config *pdata = pdev->dev.platform_data; | ||
1144 | struct omap2_mcspi *mcspi; | 1088 | struct omap2_mcspi *mcspi; |
1145 | struct resource *r; | 1089 | struct resource *r; |
1146 | int status = 0, i; | 1090 | int status = 0, i; |
1147 | const u8 *rxdma_id, *txdma_id; | ||
1148 | unsigned num_chipselect; | ||
1149 | |||
1150 | switch (pdev->id) { | ||
1151 | case 1: | ||
1152 | rxdma_id = spi1_rxdma_id; | ||
1153 | txdma_id = spi1_txdma_id; | ||
1154 | num_chipselect = 4; | ||
1155 | break; | ||
1156 | case 2: | ||
1157 | rxdma_id = spi2_rxdma_id; | ||
1158 | txdma_id = spi2_txdma_id; | ||
1159 | num_chipselect = 2; | ||
1160 | break; | ||
1161 | #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \ | ||
1162 | || defined(CONFIG_ARCH_OMAP4) | ||
1163 | case 3: | ||
1164 | rxdma_id = spi3_rxdma_id; | ||
1165 | txdma_id = spi3_txdma_id; | ||
1166 | num_chipselect = 2; | ||
1167 | break; | ||
1168 | #endif | ||
1169 | #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) | ||
1170 | case 4: | ||
1171 | rxdma_id = spi4_rxdma_id; | ||
1172 | txdma_id = spi4_txdma_id; | ||
1173 | num_chipselect = 1; | ||
1174 | break; | ||
1175 | #endif | ||
1176 | default: | ||
1177 | return -EINVAL; | ||
1178 | } | ||
1179 | 1091 | ||
1180 | master = spi_alloc_master(&pdev->dev, sizeof *mcspi); | 1092 | master = spi_alloc_master(&pdev->dev, sizeof *mcspi); |
1181 | if (master == NULL) { | 1093 | if (master == NULL) { |
@@ -1192,7 +1104,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev) | |||
1192 | master->setup = omap2_mcspi_setup; | 1104 | master->setup = omap2_mcspi_setup; |
1193 | master->transfer = omap2_mcspi_transfer; | 1105 | master->transfer = omap2_mcspi_transfer; |
1194 | master->cleanup = omap2_mcspi_cleanup; | 1106 | master->cleanup = omap2_mcspi_cleanup; |
1195 | master->num_chipselect = num_chipselect; | 1107 | master->num_chipselect = pdata->num_cs; |
1196 | 1108 | ||
1197 | dev_set_drvdata(&pdev->dev, master); | 1109 | dev_set_drvdata(&pdev->dev, master); |
1198 | 1110 | ||
@@ -1210,49 +1122,62 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev) | |||
1210 | goto err1; | 1122 | goto err1; |
1211 | } | 1123 | } |
1212 | 1124 | ||
1125 | r->start += pdata->regs_offset; | ||
1126 | r->end += pdata->regs_offset; | ||
1213 | mcspi->phys = r->start; | 1127 | mcspi->phys = r->start; |
1214 | mcspi->base = ioremap(r->start, r->end - r->start + 1); | 1128 | mcspi->base = ioremap(r->start, r->end - r->start + 1); |
1215 | if (!mcspi->base) { | 1129 | if (!mcspi->base) { |
1216 | dev_dbg(&pdev->dev, "can't ioremap MCSPI\n"); | 1130 | dev_dbg(&pdev->dev, "can't ioremap MCSPI\n"); |
1217 | status = -ENOMEM; | 1131 | status = -ENOMEM; |
1218 | goto err1aa; | 1132 | goto err2; |
1219 | } | 1133 | } |
1220 | 1134 | ||
1135 | mcspi->dev = &pdev->dev; | ||
1221 | INIT_WORK(&mcspi->work, omap2_mcspi_work); | 1136 | INIT_WORK(&mcspi->work, omap2_mcspi_work); |
1222 | 1137 | ||
1223 | spin_lock_init(&mcspi->lock); | 1138 | spin_lock_init(&mcspi->lock); |
1224 | INIT_LIST_HEAD(&mcspi->msg_queue); | 1139 | INIT_LIST_HEAD(&mcspi->msg_queue); |
1225 | INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs); | 1140 | INIT_LIST_HEAD(&omap2_mcspi_ctx[master->bus_num - 1].cs); |
1226 | 1141 | ||
1227 | mcspi->ick = clk_get(&pdev->dev, "ick"); | ||
1228 | if (IS_ERR(mcspi->ick)) { | ||
1229 | dev_dbg(&pdev->dev, "can't get mcspi_ick\n"); | ||
1230 | status = PTR_ERR(mcspi->ick); | ||
1231 | goto err1a; | ||
1232 | } | ||
1233 | mcspi->fck = clk_get(&pdev->dev, "fck"); | ||
1234 | if (IS_ERR(mcspi->fck)) { | ||
1235 | dev_dbg(&pdev->dev, "can't get mcspi_fck\n"); | ||
1236 | status = PTR_ERR(mcspi->fck); | ||
1237 | goto err2; | ||
1238 | } | ||
1239 | |||
1240 | mcspi->dma_channels = kcalloc(master->num_chipselect, | 1142 | mcspi->dma_channels = kcalloc(master->num_chipselect, |
1241 | sizeof(struct omap2_mcspi_dma), | 1143 | sizeof(struct omap2_mcspi_dma), |
1242 | GFP_KERNEL); | 1144 | GFP_KERNEL); |
1243 | 1145 | ||
1244 | if (mcspi->dma_channels == NULL) | 1146 | if (mcspi->dma_channels == NULL) |
1245 | goto err3; | 1147 | goto err2; |
1148 | |||
1149 | for (i = 0; i < master->num_chipselect; i++) { | ||
1150 | char dma_ch_name[14]; | ||
1151 | struct resource *dma_res; | ||
1152 | |||
1153 | sprintf(dma_ch_name, "rx%d", i); | ||
1154 | dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, | ||
1155 | dma_ch_name); | ||
1156 | if (!dma_res) { | ||
1157 | dev_dbg(&pdev->dev, "cannot get DMA RX channel\n"); | ||
1158 | status = -ENODEV; | ||
1159 | break; | ||
1160 | } | ||
1246 | 1161 | ||
1247 | for (i = 0; i < num_chipselect; i++) { | ||
1248 | mcspi->dma_channels[i].dma_rx_channel = -1; | 1162 | mcspi->dma_channels[i].dma_rx_channel = -1; |
1249 | mcspi->dma_channels[i].dma_rx_sync_dev = rxdma_id[i]; | 1163 | mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start; |
1164 | sprintf(dma_ch_name, "tx%d", i); | ||
1165 | dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, | ||
1166 | dma_ch_name); | ||
1167 | if (!dma_res) { | ||
1168 | dev_dbg(&pdev->dev, "cannot get DMA TX channel\n"); | ||
1169 | status = -ENODEV; | ||
1170 | break; | ||
1171 | } | ||
1172 | |||
1250 | mcspi->dma_channels[i].dma_tx_channel = -1; | 1173 | mcspi->dma_channels[i].dma_tx_channel = -1; |
1251 | mcspi->dma_channels[i].dma_tx_sync_dev = txdma_id[i]; | 1174 | mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start; |
1252 | } | 1175 | } |
1253 | 1176 | ||
1254 | if (omap2_mcspi_reset(mcspi) < 0) | 1177 | pm_runtime_enable(&pdev->dev); |
1255 | goto err4; | 1178 | |
1179 | if (status || omap2_mcspi_master_setup(mcspi) < 0) | ||
1180 | goto err3; | ||
1256 | 1181 | ||
1257 | status = spi_register_master(master); | 1182 | status = spi_register_master(master); |
1258 | if (status < 0) | 1183 | if (status < 0) |
@@ -1261,17 +1186,13 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev) | |||
1261 | return status; | 1186 | return status; |
1262 | 1187 | ||
1263 | err4: | 1188 | err4: |
1264 | kfree(mcspi->dma_channels); | 1189 | spi_master_put(master); |
1265 | err3: | 1190 | err3: |
1266 | clk_put(mcspi->fck); | 1191 | kfree(mcspi->dma_channels); |
1267 | err2: | 1192 | err2: |
1268 | clk_put(mcspi->ick); | ||
1269 | err1a: | ||
1270 | iounmap(mcspi->base); | ||
1271 | err1aa: | ||
1272 | release_mem_region(r->start, (r->end - r->start) + 1); | 1193 | release_mem_region(r->start, (r->end - r->start) + 1); |
1194 | iounmap(mcspi->base); | ||
1273 | err1: | 1195 | err1: |
1274 | spi_master_put(master); | ||
1275 | return status; | 1196 | return status; |
1276 | } | 1197 | } |
1277 | 1198 | ||
@@ -1287,9 +1208,7 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev) | |||
1287 | mcspi = spi_master_get_devdata(master); | 1208 | mcspi = spi_master_get_devdata(master); |
1288 | dma_channels = mcspi->dma_channels; | 1209 | dma_channels = mcspi->dma_channels; |
1289 | 1210 | ||
1290 | clk_put(mcspi->fck); | 1211 | omap2_mcspi_disable_clocks(mcspi); |
1291 | clk_put(mcspi->ick); | ||
1292 | |||
1293 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1212 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1294 | release_mem_region(r->start, (r->end - r->start) + 1); | 1213 | release_mem_region(r->start, (r->end - r->start) + 1); |
1295 | 1214 | ||
@@ -1304,10 +1223,50 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev) | |||
1304 | /* work with hotplug and coldplug */ | 1223 | /* work with hotplug and coldplug */ |
1305 | MODULE_ALIAS("platform:omap2_mcspi"); | 1224 | MODULE_ALIAS("platform:omap2_mcspi"); |
1306 | 1225 | ||
1226 | #ifdef CONFIG_SUSPEND | ||
1227 | /* | ||
1228 | * When SPI wake up from off-mode, CS is in activate state. If it was in | ||
1229 | * unactive state when driver was suspend, then force it to unactive state at | ||
1230 | * wake up. | ||
1231 | */ | ||
1232 | static int omap2_mcspi_resume(struct device *dev) | ||
1233 | { | ||
1234 | struct spi_master *master = dev_get_drvdata(dev); | ||
1235 | struct omap2_mcspi *mcspi = spi_master_get_devdata(master); | ||
1236 | struct omap2_mcspi_cs *cs; | ||
1237 | |||
1238 | omap2_mcspi_enable_clocks(mcspi); | ||
1239 | list_for_each_entry(cs, &omap2_mcspi_ctx[master->bus_num - 1].cs, | ||
1240 | node) { | ||
1241 | if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) { | ||
1242 | |||
1243 | /* | ||
1244 | * We need to toggle CS state for OMAP take this | ||
1245 | * change in account. | ||
1246 | */ | ||
1247 | MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 1); | ||
1248 | __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); | ||
1249 | MOD_REG_BIT(cs->chconf0, OMAP2_MCSPI_CHCONF_FORCE, 0); | ||
1250 | __raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0); | ||
1251 | } | ||
1252 | } | ||
1253 | omap2_mcspi_disable_clocks(mcspi); | ||
1254 | return 0; | ||
1255 | } | ||
1256 | #else | ||
1257 | #define omap2_mcspi_resume NULL | ||
1258 | #endif | ||
1259 | |||
1260 | static const struct dev_pm_ops omap2_mcspi_pm_ops = { | ||
1261 | .resume = omap2_mcspi_resume, | ||
1262 | .runtime_resume = omap_mcspi_runtime_resume, | ||
1263 | }; | ||
1264 | |||
1307 | static struct platform_driver omap2_mcspi_driver = { | 1265 | static struct platform_driver omap2_mcspi_driver = { |
1308 | .driver = { | 1266 | .driver = { |
1309 | .name = "omap2_mcspi", | 1267 | .name = "omap2_mcspi", |
1310 | .owner = THIS_MODULE, | 1268 | .owner = THIS_MODULE, |
1269 | .pm = &omap2_mcspi_pm_ops | ||
1311 | }, | 1270 | }, |
1312 | .remove = __exit_p(omap2_mcspi_remove), | 1271 | .remove = __exit_p(omap2_mcspi_remove), |
1313 | }; | 1272 | }; |
diff --git a/drivers/spi/orion_spi.c b/drivers/spi/orion_spi.c index 3aea50da7b29..0b677dc041ad 100644 --- a/drivers/spi/orion_spi.c +++ b/drivers/spi/orion_spi.c | |||
@@ -404,7 +404,7 @@ static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m) | |||
404 | goto msg_rejected; | 404 | goto msg_rejected; |
405 | } | 405 | } |
406 | 406 | ||
407 | if ((t != NULL) && t->bits_per_word) | 407 | if (t->bits_per_word) |
408 | bits_per_word = t->bits_per_word; | 408 | bits_per_word = t->bits_per_word; |
409 | 409 | ||
410 | if ((bits_per_word != 8) && (bits_per_word != 16)) { | 410 | if ((bits_per_word != 8) && (bits_per_word != 16)) { |
@@ -415,7 +415,7 @@ static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m) | |||
415 | goto msg_rejected; | 415 | goto msg_rejected; |
416 | } | 416 | } |
417 | /*make sure buffer length is even when working in 16 bit mode*/ | 417 | /*make sure buffer length is even when working in 16 bit mode*/ |
418 | if ((t != NULL) && (t->bits_per_word == 16) && (t->len & 1)) { | 418 | if ((t->bits_per_word == 16) && (t->len & 1)) { |
419 | dev_err(&spi->dev, | 419 | dev_err(&spi->dev, |
420 | "message rejected : " | 420 | "message rejected : " |
421 | "odd data length (%d) while in 16 bit mode\n", | 421 | "odd data length (%d) while in 16 bit mode\n", |
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c index e76b1afafe07..dc25bee8d33f 100644 --- a/drivers/spi/pxa2xx_spi.c +++ b/drivers/spi/pxa2xx_spi.c | |||
@@ -23,11 +23,11 @@ | |||
23 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
26 | #include <linux/spi/pxa2xx_spi.h> | ||
26 | #include <linux/dma-mapping.h> | 27 | #include <linux/dma-mapping.h> |
27 | #include <linux/spi/spi.h> | 28 | #include <linux/spi/spi.h> |
28 | #include <linux/workqueue.h> | 29 | #include <linux/workqueue.h> |
29 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
30 | #include <linux/clk.h> | ||
31 | #include <linux/gpio.h> | 31 | #include <linux/gpio.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | 33 | ||
@@ -35,9 +35,6 @@ | |||
35 | #include <asm/irq.h> | 35 | #include <asm/irq.h> |
36 | #include <asm/delay.h> | 36 | #include <asm/delay.h> |
37 | 37 | ||
38 | #include <mach/dma.h> | ||
39 | #include <plat/ssp.h> | ||
40 | #include <mach/pxa2xx_spi.h> | ||
41 | 38 | ||
42 | MODULE_AUTHOR("Stephen Street"); | 39 | MODULE_AUTHOR("Stephen Street"); |
43 | MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); | 40 | MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); |
@@ -46,8 +43,6 @@ MODULE_ALIAS("platform:pxa2xx-spi"); | |||
46 | 43 | ||
47 | #define MAX_BUSES 3 | 44 | #define MAX_BUSES 3 |
48 | 45 | ||
49 | #define RX_THRESH_DFLT 8 | ||
50 | #define TX_THRESH_DFLT 8 | ||
51 | #define TIMOUT_DFLT 1000 | 46 | #define TIMOUT_DFLT 1000 |
52 | 47 | ||
53 | #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) | 48 | #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) |
@@ -168,7 +163,10 @@ struct chip_data { | |||
168 | u8 enable_dma; | 163 | u8 enable_dma; |
169 | u8 bits_per_word; | 164 | u8 bits_per_word; |
170 | u32 speed_hz; | 165 | u32 speed_hz; |
171 | int gpio_cs; | 166 | union { |
167 | int gpio_cs; | ||
168 | unsigned int frm; | ||
169 | }; | ||
172 | int gpio_cs_inverted; | 170 | int gpio_cs_inverted; |
173 | int (*write)(struct driver_data *drv_data); | 171 | int (*write)(struct driver_data *drv_data); |
174 | int (*read)(struct driver_data *drv_data); | 172 | int (*read)(struct driver_data *drv_data); |
@@ -181,6 +179,11 @@ static void cs_assert(struct driver_data *drv_data) | |||
181 | { | 179 | { |
182 | struct chip_data *chip = drv_data->cur_chip; | 180 | struct chip_data *chip = drv_data->cur_chip; |
183 | 181 | ||
182 | if (drv_data->ssp_type == CE4100_SSP) { | ||
183 | write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr); | ||
184 | return; | ||
185 | } | ||
186 | |||
184 | if (chip->cs_control) { | 187 | if (chip->cs_control) { |
185 | chip->cs_control(PXA2XX_CS_ASSERT); | 188 | chip->cs_control(PXA2XX_CS_ASSERT); |
186 | return; | 189 | return; |
@@ -194,6 +197,9 @@ static void cs_deassert(struct driver_data *drv_data) | |||
194 | { | 197 | { |
195 | struct chip_data *chip = drv_data->cur_chip; | 198 | struct chip_data *chip = drv_data->cur_chip; |
196 | 199 | ||
200 | if (drv_data->ssp_type == CE4100_SSP) | ||
201 | return; | ||
202 | |||
197 | if (chip->cs_control) { | 203 | if (chip->cs_control) { |
198 | chip->cs_control(PXA2XX_CS_DEASSERT); | 204 | chip->cs_control(PXA2XX_CS_DEASSERT); |
199 | return; | 205 | return; |
@@ -203,6 +209,25 @@ static void cs_deassert(struct driver_data *drv_data) | |||
203 | gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); | 209 | gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); |
204 | } | 210 | } |
205 | 211 | ||
212 | static void write_SSSR_CS(struct driver_data *drv_data, u32 val) | ||
213 | { | ||
214 | void __iomem *reg = drv_data->ioaddr; | ||
215 | |||
216 | if (drv_data->ssp_type == CE4100_SSP) | ||
217 | val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK; | ||
218 | |||
219 | write_SSSR(val, reg); | ||
220 | } | ||
221 | |||
222 | static int pxa25x_ssp_comp(struct driver_data *drv_data) | ||
223 | { | ||
224 | if (drv_data->ssp_type == PXA25x_SSP) | ||
225 | return 1; | ||
226 | if (drv_data->ssp_type == CE4100_SSP) | ||
227 | return 1; | ||
228 | return 0; | ||
229 | } | ||
230 | |||
206 | static int flush(struct driver_data *drv_data) | 231 | static int flush(struct driver_data *drv_data) |
207 | { | 232 | { |
208 | unsigned long limit = loops_per_jiffy << 1; | 233 | unsigned long limit = loops_per_jiffy << 1; |
@@ -214,7 +239,7 @@ static int flush(struct driver_data *drv_data) | |||
214 | read_SSDR(reg); | 239 | read_SSDR(reg); |
215 | } | 240 | } |
216 | } while ((read_SSSR(reg) & SSSR_BSY) && --limit); | 241 | } while ((read_SSSR(reg) & SSSR_BSY) && --limit); |
217 | write_SSSR(SSSR_ROR, reg); | 242 | write_SSSR_CS(drv_data, SSSR_ROR); |
218 | 243 | ||
219 | return limit; | 244 | return limit; |
220 | } | 245 | } |
@@ -224,7 +249,7 @@ static int null_writer(struct driver_data *drv_data) | |||
224 | void __iomem *reg = drv_data->ioaddr; | 249 | void __iomem *reg = drv_data->ioaddr; |
225 | u8 n_bytes = drv_data->n_bytes; | 250 | u8 n_bytes = drv_data->n_bytes; |
226 | 251 | ||
227 | if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00) | 252 | if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) |
228 | || (drv_data->tx == drv_data->tx_end)) | 253 | || (drv_data->tx == drv_data->tx_end)) |
229 | return 0; | 254 | return 0; |
230 | 255 | ||
@@ -252,7 +277,7 @@ static int u8_writer(struct driver_data *drv_data) | |||
252 | { | 277 | { |
253 | void __iomem *reg = drv_data->ioaddr; | 278 | void __iomem *reg = drv_data->ioaddr; |
254 | 279 | ||
255 | if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00) | 280 | if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) |
256 | || (drv_data->tx == drv_data->tx_end)) | 281 | || (drv_data->tx == drv_data->tx_end)) |
257 | return 0; | 282 | return 0; |
258 | 283 | ||
@@ -279,7 +304,7 @@ static int u16_writer(struct driver_data *drv_data) | |||
279 | { | 304 | { |
280 | void __iomem *reg = drv_data->ioaddr; | 305 | void __iomem *reg = drv_data->ioaddr; |
281 | 306 | ||
282 | if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00) | 307 | if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) |
283 | || (drv_data->tx == drv_data->tx_end)) | 308 | || (drv_data->tx == drv_data->tx_end)) |
284 | return 0; | 309 | return 0; |
285 | 310 | ||
@@ -306,7 +331,7 @@ static int u32_writer(struct driver_data *drv_data) | |||
306 | { | 331 | { |
307 | void __iomem *reg = drv_data->ioaddr; | 332 | void __iomem *reg = drv_data->ioaddr; |
308 | 333 | ||
309 | if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00) | 334 | if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) |
310 | || (drv_data->tx == drv_data->tx_end)) | 335 | || (drv_data->tx == drv_data->tx_end)) |
311 | return 0; | 336 | return 0; |
312 | 337 | ||
@@ -507,9 +532,9 @@ static void dma_error_stop(struct driver_data *drv_data, const char *msg) | |||
507 | /* Stop and reset */ | 532 | /* Stop and reset */ |
508 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | 533 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; |
509 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | 534 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; |
510 | write_SSSR(drv_data->clear_sr, reg); | 535 | write_SSSR_CS(drv_data, drv_data->clear_sr); |
511 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | 536 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); |
512 | if (drv_data->ssp_type != PXA25x_SSP) | 537 | if (!pxa25x_ssp_comp(drv_data)) |
513 | write_SSTO(0, reg); | 538 | write_SSTO(0, reg); |
514 | flush(drv_data); | 539 | flush(drv_data); |
515 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | 540 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); |
@@ -529,7 +554,7 @@ static void dma_transfer_complete(struct driver_data *drv_data) | |||
529 | 554 | ||
530 | /* Clear and disable interrupts on SSP and DMA channels*/ | 555 | /* Clear and disable interrupts on SSP and DMA channels*/ |
531 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | 556 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); |
532 | write_SSSR(drv_data->clear_sr, reg); | 557 | write_SSSR_CS(drv_data, drv_data->clear_sr); |
533 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | 558 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; |
534 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | 559 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; |
535 | 560 | ||
@@ -622,7 +647,7 @@ static irqreturn_t dma_transfer(struct driver_data *drv_data) | |||
622 | 647 | ||
623 | /* Clear and disable timeout interrupt, do the rest in | 648 | /* Clear and disable timeout interrupt, do the rest in |
624 | * dma_transfer_complete */ | 649 | * dma_transfer_complete */ |
625 | if (drv_data->ssp_type != PXA25x_SSP) | 650 | if (!pxa25x_ssp_comp(drv_data)) |
626 | write_SSTO(0, reg); | 651 | write_SSTO(0, reg); |
627 | 652 | ||
628 | /* finish this transfer, start the next */ | 653 | /* finish this transfer, start the next */ |
@@ -635,14 +660,26 @@ static irqreturn_t dma_transfer(struct driver_data *drv_data) | |||
635 | return IRQ_NONE; | 660 | return IRQ_NONE; |
636 | } | 661 | } |
637 | 662 | ||
663 | static void reset_sccr1(struct driver_data *drv_data) | ||
664 | { | ||
665 | void __iomem *reg = drv_data->ioaddr; | ||
666 | struct chip_data *chip = drv_data->cur_chip; | ||
667 | u32 sccr1_reg; | ||
668 | |||
669 | sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1; | ||
670 | sccr1_reg &= ~SSCR1_RFT; | ||
671 | sccr1_reg |= chip->threshold; | ||
672 | write_SSCR1(sccr1_reg, reg); | ||
673 | } | ||
674 | |||
638 | static void int_error_stop(struct driver_data *drv_data, const char* msg) | 675 | static void int_error_stop(struct driver_data *drv_data, const char* msg) |
639 | { | 676 | { |
640 | void __iomem *reg = drv_data->ioaddr; | 677 | void __iomem *reg = drv_data->ioaddr; |
641 | 678 | ||
642 | /* Stop and reset SSP */ | 679 | /* Stop and reset SSP */ |
643 | write_SSSR(drv_data->clear_sr, reg); | 680 | write_SSSR_CS(drv_data, drv_data->clear_sr); |
644 | write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); | 681 | reset_sccr1(drv_data); |
645 | if (drv_data->ssp_type != PXA25x_SSP) | 682 | if (!pxa25x_ssp_comp(drv_data)) |
646 | write_SSTO(0, reg); | 683 | write_SSTO(0, reg); |
647 | flush(drv_data); | 684 | flush(drv_data); |
648 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | 685 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); |
@@ -658,12 +695,12 @@ static void int_transfer_complete(struct driver_data *drv_data) | |||
658 | void __iomem *reg = drv_data->ioaddr; | 695 | void __iomem *reg = drv_data->ioaddr; |
659 | 696 | ||
660 | /* Stop SSP */ | 697 | /* Stop SSP */ |
661 | write_SSSR(drv_data->clear_sr, reg); | 698 | write_SSSR_CS(drv_data, drv_data->clear_sr); |
662 | write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); | 699 | reset_sccr1(drv_data); |
663 | if (drv_data->ssp_type != PXA25x_SSP) | 700 | if (!pxa25x_ssp_comp(drv_data)) |
664 | write_SSTO(0, reg); | 701 | write_SSTO(0, reg); |
665 | 702 | ||
666 | /* Update total byte transfered return count actual bytes read */ | 703 | /* Update total byte transferred return count actual bytes read */ |
667 | drv_data->cur_msg->actual_length += drv_data->len - | 704 | drv_data->cur_msg->actual_length += drv_data->len - |
668 | (drv_data->rx_end - drv_data->rx); | 705 | (drv_data->rx_end - drv_data->rx); |
669 | 706 | ||
@@ -714,24 +751,34 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data) | |||
714 | } | 751 | } |
715 | 752 | ||
716 | if (drv_data->tx == drv_data->tx_end) { | 753 | if (drv_data->tx == drv_data->tx_end) { |
717 | write_SSCR1(read_SSCR1(reg) & ~SSCR1_TIE, reg); | 754 | u32 bytes_left; |
718 | /* PXA25x_SSP has no timeout, read trailing bytes */ | 755 | u32 sccr1_reg; |
719 | if (drv_data->ssp_type == PXA25x_SSP) { | 756 | |
720 | if (!wait_ssp_rx_stall(reg)) | 757 | sccr1_reg = read_SSCR1(reg); |
721 | { | 758 | sccr1_reg &= ~SSCR1_TIE; |
722 | int_error_stop(drv_data, "interrupt_transfer: " | 759 | |
723 | "rx stall failed"); | 760 | /* |
724 | return IRQ_HANDLED; | 761 | * PXA25x_SSP has no timeout, set up rx threshould for the |
725 | } | 762 | * remaining RX bytes. |
726 | if (!drv_data->read(drv_data)) | 763 | */ |
727 | { | 764 | if (pxa25x_ssp_comp(drv_data)) { |
728 | int_error_stop(drv_data, | 765 | |
729 | "interrupt_transfer: " | 766 | sccr1_reg &= ~SSCR1_RFT; |
730 | "trailing byte read failed"); | 767 | |
731 | return IRQ_HANDLED; | 768 | bytes_left = drv_data->rx_end - drv_data->rx; |
769 | switch (drv_data->n_bytes) { | ||
770 | case 4: | ||
771 | bytes_left >>= 1; | ||
772 | case 2: | ||
773 | bytes_left >>= 1; | ||
732 | } | 774 | } |
733 | int_transfer_complete(drv_data); | 775 | |
776 | if (bytes_left > RX_THRESH_DFLT) | ||
777 | bytes_left = RX_THRESH_DFLT; | ||
778 | |||
779 | sccr1_reg |= SSCR1_RxTresh(bytes_left); | ||
734 | } | 780 | } |
781 | write_SSCR1(sccr1_reg, reg); | ||
735 | } | 782 | } |
736 | 783 | ||
737 | /* We did something */ | 784 | /* We did something */ |
@@ -742,14 +789,26 @@ static irqreturn_t ssp_int(int irq, void *dev_id) | |||
742 | { | 789 | { |
743 | struct driver_data *drv_data = dev_id; | 790 | struct driver_data *drv_data = dev_id; |
744 | void __iomem *reg = drv_data->ioaddr; | 791 | void __iomem *reg = drv_data->ioaddr; |
792 | u32 sccr1_reg = read_SSCR1(reg); | ||
793 | u32 mask = drv_data->mask_sr; | ||
794 | u32 status; | ||
795 | |||
796 | status = read_SSSR(reg); | ||
797 | |||
798 | /* Ignore possible writes if we don't need to write */ | ||
799 | if (!(sccr1_reg & SSCR1_TIE)) | ||
800 | mask &= ~SSSR_TFS; | ||
801 | |||
802 | if (!(status & mask)) | ||
803 | return IRQ_NONE; | ||
745 | 804 | ||
746 | if (!drv_data->cur_msg) { | 805 | if (!drv_data->cur_msg) { |
747 | 806 | ||
748 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | 807 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); |
749 | write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); | 808 | write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); |
750 | if (drv_data->ssp_type != PXA25x_SSP) | 809 | if (!pxa25x_ssp_comp(drv_data)) |
751 | write_SSTO(0, reg); | 810 | write_SSTO(0, reg); |
752 | write_SSSR(drv_data->clear_sr, reg); | 811 | write_SSSR_CS(drv_data, drv_data->clear_sr); |
753 | 812 | ||
754 | dev_err(&drv_data->pdev->dev, "bad message state " | 813 | dev_err(&drv_data->pdev->dev, "bad message state " |
755 | "in interrupt handler\n"); | 814 | "in interrupt handler\n"); |
@@ -862,7 +921,7 @@ static unsigned int ssp_get_clk_div(struct ssp_device *ssp, int rate) | |||
862 | { | 921 | { |
863 | unsigned long ssp_clk = clk_get_rate(ssp->clk); | 922 | unsigned long ssp_clk = clk_get_rate(ssp->clk); |
864 | 923 | ||
865 | if (ssp->type == PXA25x_SSP) | 924 | if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP) |
866 | return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8; | 925 | return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8; |
867 | else | 926 | else |
868 | return ((ssp_clk / rate - 1) & 0xfff) << 8; | 927 | return ((ssp_clk / rate - 1) & 0xfff) << 8; |
@@ -1088,7 +1147,7 @@ static void pump_transfers(unsigned long data) | |||
1088 | 1147 | ||
1089 | /* Clear status */ | 1148 | /* Clear status */ |
1090 | cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1; | 1149 | cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1; |
1091 | write_SSSR(drv_data->clear_sr, reg); | 1150 | write_SSSR_CS(drv_data, drv_data->clear_sr); |
1092 | } | 1151 | } |
1093 | 1152 | ||
1094 | /* see if we need to reload the config registers */ | 1153 | /* see if we need to reload the config registers */ |
@@ -1098,7 +1157,7 @@ static void pump_transfers(unsigned long data) | |||
1098 | 1157 | ||
1099 | /* stop the SSP, and update the other bits */ | 1158 | /* stop the SSP, and update the other bits */ |
1100 | write_SSCR0(cr0 & ~SSCR0_SSE, reg); | 1159 | write_SSCR0(cr0 & ~SSCR0_SSE, reg); |
1101 | if (drv_data->ssp_type != PXA25x_SSP) | 1160 | if (!pxa25x_ssp_comp(drv_data)) |
1102 | write_SSTO(chip->timeout, reg); | 1161 | write_SSTO(chip->timeout, reg); |
1103 | /* first set CR1 without interrupt and service enables */ | 1162 | /* first set CR1 without interrupt and service enables */ |
1104 | write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg); | 1163 | write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg); |
@@ -1106,7 +1165,7 @@ static void pump_transfers(unsigned long data) | |||
1106 | write_SSCR0(cr0, reg); | 1165 | write_SSCR0(cr0, reg); |
1107 | 1166 | ||
1108 | } else { | 1167 | } else { |
1109 | if (drv_data->ssp_type != PXA25x_SSP) | 1168 | if (!pxa25x_ssp_comp(drv_data)) |
1110 | write_SSTO(chip->timeout, reg); | 1169 | write_SSTO(chip->timeout, reg); |
1111 | } | 1170 | } |
1112 | 1171 | ||
@@ -1233,14 +1292,13 @@ static int setup(struct spi_device *spi) | |||
1233 | uint tx_thres = TX_THRESH_DFLT; | 1292 | uint tx_thres = TX_THRESH_DFLT; |
1234 | uint rx_thres = RX_THRESH_DFLT; | 1293 | uint rx_thres = RX_THRESH_DFLT; |
1235 | 1294 | ||
1236 | if (drv_data->ssp_type != PXA25x_SSP | 1295 | if (!pxa25x_ssp_comp(drv_data) |
1237 | && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) { | 1296 | && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) { |
1238 | dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " | 1297 | dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " |
1239 | "b/w not 4-32 for type non-PXA25x_SSP\n", | 1298 | "b/w not 4-32 for type non-PXA25x_SSP\n", |
1240 | drv_data->ssp_type, spi->bits_per_word); | 1299 | drv_data->ssp_type, spi->bits_per_word); |
1241 | return -EINVAL; | 1300 | return -EINVAL; |
1242 | } | 1301 | } else if (pxa25x_ssp_comp(drv_data) |
1243 | else if (drv_data->ssp_type == PXA25x_SSP | ||
1244 | && (spi->bits_per_word < 4 | 1302 | && (spi->bits_per_word < 4 |
1245 | || spi->bits_per_word > 16)) { | 1303 | || spi->bits_per_word > 16)) { |
1246 | dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " | 1304 | dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " |
@@ -1259,7 +1317,17 @@ static int setup(struct spi_device *spi) | |||
1259 | return -ENOMEM; | 1317 | return -ENOMEM; |
1260 | } | 1318 | } |
1261 | 1319 | ||
1262 | chip->gpio_cs = -1; | 1320 | if (drv_data->ssp_type == CE4100_SSP) { |
1321 | if (spi->chip_select > 4) { | ||
1322 | dev_err(&spi->dev, "failed setup: " | ||
1323 | "cs number must not be > 4.\n"); | ||
1324 | kfree(chip); | ||
1325 | return -EINVAL; | ||
1326 | } | ||
1327 | |||
1328 | chip->frm = spi->chip_select; | ||
1329 | } else | ||
1330 | chip->gpio_cs = -1; | ||
1263 | chip->enable_dma = 0; | 1331 | chip->enable_dma = 0; |
1264 | chip->timeout = TIMOUT_DFLT; | 1332 | chip->timeout = TIMOUT_DFLT; |
1265 | chip->dma_burst_size = drv_data->master_info->enable_dma ? | 1333 | chip->dma_burst_size = drv_data->master_info->enable_dma ? |
@@ -1315,7 +1383,7 @@ static int setup(struct spi_device *spi) | |||
1315 | | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0); | 1383 | | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0); |
1316 | 1384 | ||
1317 | /* NOTE: PXA25x_SSP _could_ use external clocking ... */ | 1385 | /* NOTE: PXA25x_SSP _could_ use external clocking ... */ |
1318 | if (drv_data->ssp_type != PXA25x_SSP) | 1386 | if (!pxa25x_ssp_comp(drv_data)) |
1319 | dev_dbg(&spi->dev, "%ld Hz actual, %s\n", | 1387 | dev_dbg(&spi->dev, "%ld Hz actual, %s\n", |
1320 | clk_get_rate(ssp->clk) | 1388 | clk_get_rate(ssp->clk) |
1321 | / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)), | 1389 | / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)), |
@@ -1350,23 +1418,27 @@ static int setup(struct spi_device *spi) | |||
1350 | 1418 | ||
1351 | spi_set_ctldata(spi, chip); | 1419 | spi_set_ctldata(spi, chip); |
1352 | 1420 | ||
1421 | if (drv_data->ssp_type == CE4100_SSP) | ||
1422 | return 0; | ||
1423 | |||
1353 | return setup_cs(spi, chip, chip_info); | 1424 | return setup_cs(spi, chip, chip_info); |
1354 | } | 1425 | } |
1355 | 1426 | ||
1356 | static void cleanup(struct spi_device *spi) | 1427 | static void cleanup(struct spi_device *spi) |
1357 | { | 1428 | { |
1358 | struct chip_data *chip = spi_get_ctldata(spi); | 1429 | struct chip_data *chip = spi_get_ctldata(spi); |
1430 | struct driver_data *drv_data = spi_master_get_devdata(spi->master); | ||
1359 | 1431 | ||
1360 | if (!chip) | 1432 | if (!chip) |
1361 | return; | 1433 | return; |
1362 | 1434 | ||
1363 | if (gpio_is_valid(chip->gpio_cs)) | 1435 | if (drv_data->ssp_type != CE4100_SSP && gpio_is_valid(chip->gpio_cs)) |
1364 | gpio_free(chip->gpio_cs); | 1436 | gpio_free(chip->gpio_cs); |
1365 | 1437 | ||
1366 | kfree(chip); | 1438 | kfree(chip); |
1367 | } | 1439 | } |
1368 | 1440 | ||
1369 | static int __init init_queue(struct driver_data *drv_data) | 1441 | static int __devinit init_queue(struct driver_data *drv_data) |
1370 | { | 1442 | { |
1371 | INIT_LIST_HEAD(&drv_data->queue); | 1443 | INIT_LIST_HEAD(&drv_data->queue); |
1372 | spin_lock_init(&drv_data->lock); | 1444 | spin_lock_init(&drv_data->lock); |
@@ -1421,7 +1493,7 @@ static int stop_queue(struct driver_data *drv_data) | |||
1421 | * execution path (pump_messages) would be required to call wake_up or | 1493 | * execution path (pump_messages) would be required to call wake_up or |
1422 | * friends on every SPI message. Do this instead */ | 1494 | * friends on every SPI message. Do this instead */ |
1423 | drv_data->run = QUEUE_STOPPED; | 1495 | drv_data->run = QUEUE_STOPPED; |
1424 | while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { | 1496 | while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) { |
1425 | spin_unlock_irqrestore(&drv_data->lock, flags); | 1497 | spin_unlock_irqrestore(&drv_data->lock, flags); |
1426 | msleep(10); | 1498 | msleep(10); |
1427 | spin_lock_irqsave(&drv_data->lock, flags); | 1499 | spin_lock_irqsave(&drv_data->lock, flags); |
@@ -1454,7 +1526,7 @@ static int destroy_queue(struct driver_data *drv_data) | |||
1454 | return 0; | 1526 | return 0; |
1455 | } | 1527 | } |
1456 | 1528 | ||
1457 | static int __init pxa2xx_spi_probe(struct platform_device *pdev) | 1529 | static int __devinit pxa2xx_spi_probe(struct platform_device *pdev) |
1458 | { | 1530 | { |
1459 | struct device *dev = &pdev->dev; | 1531 | struct device *dev = &pdev->dev; |
1460 | struct pxa2xx_spi_master *platform_info; | 1532 | struct pxa2xx_spi_master *platform_info; |
@@ -1484,6 +1556,8 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev) | |||
1484 | drv_data->pdev = pdev; | 1556 | drv_data->pdev = pdev; |
1485 | drv_data->ssp = ssp; | 1557 | drv_data->ssp = ssp; |
1486 | 1558 | ||
1559 | master->dev.parent = &pdev->dev; | ||
1560 | master->dev.of_node = pdev->dev.of_node; | ||
1487 | /* the spi->mode bits understood by this driver: */ | 1561 | /* the spi->mode bits understood by this driver: */ |
1488 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; | 1562 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; |
1489 | 1563 | ||
@@ -1500,7 +1574,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev) | |||
1500 | 1574 | ||
1501 | drv_data->ioaddr = ssp->mmio_base; | 1575 | drv_data->ioaddr = ssp->mmio_base; |
1502 | drv_data->ssdr_physical = ssp->phys_base + SSDR; | 1576 | drv_data->ssdr_physical = ssp->phys_base + SSDR; |
1503 | if (ssp->type == PXA25x_SSP) { | 1577 | if (pxa25x_ssp_comp(drv_data)) { |
1504 | drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE; | 1578 | drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE; |
1505 | drv_data->dma_cr1 = 0; | 1579 | drv_data->dma_cr1 = 0; |
1506 | drv_data->clear_sr = SSSR_ROR; | 1580 | drv_data->clear_sr = SSSR_ROR; |
@@ -1512,7 +1586,8 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev) | |||
1512 | drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR; | 1586 | drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR; |
1513 | } | 1587 | } |
1514 | 1588 | ||
1515 | status = request_irq(ssp->irq, ssp_int, 0, dev_name(dev), drv_data); | 1589 | status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev), |
1590 | drv_data); | ||
1516 | if (status < 0) { | 1591 | if (status < 0) { |
1517 | dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq); | 1592 | dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq); |
1518 | goto out_error_master_alloc; | 1593 | goto out_error_master_alloc; |
@@ -1561,7 +1636,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev) | |||
1561 | | SSCR0_Motorola | 1636 | | SSCR0_Motorola |
1562 | | SSCR0_DataSize(8), | 1637 | | SSCR0_DataSize(8), |
1563 | drv_data->ioaddr); | 1638 | drv_data->ioaddr); |
1564 | if (drv_data->ssp_type != PXA25x_SSP) | 1639 | if (!pxa25x_ssp_comp(drv_data)) |
1565 | write_SSTO(0, drv_data->ioaddr); | 1640 | write_SSTO(0, drv_data->ioaddr); |
1566 | write_SSPSP(0, drv_data->ioaddr); | 1641 | write_SSPSP(0, drv_data->ioaddr); |
1567 | 1642 | ||
@@ -1723,13 +1798,14 @@ static struct platform_driver driver = { | |||
1723 | .pm = &pxa2xx_spi_pm_ops, | 1798 | .pm = &pxa2xx_spi_pm_ops, |
1724 | #endif | 1799 | #endif |
1725 | }, | 1800 | }, |
1801 | .probe = pxa2xx_spi_probe, | ||
1726 | .remove = pxa2xx_spi_remove, | 1802 | .remove = pxa2xx_spi_remove, |
1727 | .shutdown = pxa2xx_spi_shutdown, | 1803 | .shutdown = pxa2xx_spi_shutdown, |
1728 | }; | 1804 | }; |
1729 | 1805 | ||
1730 | static int __init pxa2xx_spi_init(void) | 1806 | static int __init pxa2xx_spi_init(void) |
1731 | { | 1807 | { |
1732 | return platform_driver_probe(&driver, pxa2xx_spi_probe); | 1808 | return platform_driver_register(&driver); |
1733 | } | 1809 | } |
1734 | subsys_initcall(pxa2xx_spi_init); | 1810 | subsys_initcall(pxa2xx_spi_init); |
1735 | 1811 | ||
diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c new file mode 100644 index 000000000000..378e504f89eb --- /dev/null +++ b/drivers/spi/pxa2xx_spi_pci.c | |||
@@ -0,0 +1,180 @@ | |||
1 | /* | ||
2 | * CE4100's SPI device is more or less the same one as found on PXA | ||
3 | * | ||
4 | */ | ||
5 | #include <linux/pci.h> | ||
6 | #include <linux/platform_device.h> | ||
7 | #include <linux/of_device.h> | ||
8 | #include <linux/spi/pxa2xx_spi.h> | ||
9 | |||
10 | struct ce4100_info { | ||
11 | struct ssp_device ssp; | ||
12 | struct platform_device *spi_pdev; | ||
13 | }; | ||
14 | |||
15 | static DEFINE_MUTEX(ssp_lock); | ||
16 | static LIST_HEAD(ssp_list); | ||
17 | |||
18 | struct ssp_device *pxa_ssp_request(int port, const char *label) | ||
19 | { | ||
20 | struct ssp_device *ssp = NULL; | ||
21 | |||
22 | mutex_lock(&ssp_lock); | ||
23 | |||
24 | list_for_each_entry(ssp, &ssp_list, node) { | ||
25 | if (ssp->port_id == port && ssp->use_count == 0) { | ||
26 | ssp->use_count++; | ||
27 | ssp->label = label; | ||
28 | break; | ||
29 | } | ||
30 | } | ||
31 | |||
32 | mutex_unlock(&ssp_lock); | ||
33 | |||
34 | if (&ssp->node == &ssp_list) | ||
35 | return NULL; | ||
36 | |||
37 | return ssp; | ||
38 | } | ||
39 | EXPORT_SYMBOL_GPL(pxa_ssp_request); | ||
40 | |||
41 | void pxa_ssp_free(struct ssp_device *ssp) | ||
42 | { | ||
43 | mutex_lock(&ssp_lock); | ||
44 | if (ssp->use_count) { | ||
45 | ssp->use_count--; | ||
46 | ssp->label = NULL; | ||
47 | } else | ||
48 | dev_err(&ssp->pdev->dev, "device already free\n"); | ||
49 | mutex_unlock(&ssp_lock); | ||
50 | } | ||
51 | EXPORT_SYMBOL_GPL(pxa_ssp_free); | ||
52 | |||
53 | static int __devinit ce4100_spi_probe(struct pci_dev *dev, | ||
54 | const struct pci_device_id *ent) | ||
55 | { | ||
56 | int ret; | ||
57 | resource_size_t phys_beg; | ||
58 | resource_size_t phys_len; | ||
59 | struct ce4100_info *spi_info; | ||
60 | struct platform_device *pdev; | ||
61 | struct pxa2xx_spi_master spi_pdata; | ||
62 | struct ssp_device *ssp; | ||
63 | |||
64 | ret = pci_enable_device(dev); | ||
65 | if (ret) | ||
66 | return ret; | ||
67 | |||
68 | phys_beg = pci_resource_start(dev, 0); | ||
69 | phys_len = pci_resource_len(dev, 0); | ||
70 | |||
71 | if (!request_mem_region(phys_beg, phys_len, | ||
72 | "CE4100 SPI")) { | ||
73 | dev_err(&dev->dev, "Can't request register space.\n"); | ||
74 | ret = -EBUSY; | ||
75 | return ret; | ||
76 | } | ||
77 | |||
78 | pdev = platform_device_alloc("pxa2xx-spi", dev->devfn); | ||
79 | spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL); | ||
80 | if (!pdev || !spi_info ) { | ||
81 | ret = -ENOMEM; | ||
82 | goto err_nomem; | ||
83 | } | ||
84 | memset(&spi_pdata, 0, sizeof(spi_pdata)); | ||
85 | spi_pdata.num_chipselect = dev->devfn; | ||
86 | |||
87 | ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata)); | ||
88 | if (ret) | ||
89 | goto err_nomem; | ||
90 | |||
91 | pdev->dev.parent = &dev->dev; | ||
92 | pdev->dev.of_node = dev->dev.of_node; | ||
93 | ssp = &spi_info->ssp; | ||
94 | ssp->phys_base = pci_resource_start(dev, 0); | ||
95 | ssp->mmio_base = ioremap(phys_beg, phys_len); | ||
96 | if (!ssp->mmio_base) { | ||
97 | dev_err(&pdev->dev, "failed to ioremap() registers\n"); | ||
98 | ret = -EIO; | ||
99 | goto err_nomem; | ||
100 | } | ||
101 | ssp->irq = dev->irq; | ||
102 | ssp->port_id = pdev->id; | ||
103 | ssp->type = PXA25x_SSP; | ||
104 | |||
105 | mutex_lock(&ssp_lock); | ||
106 | list_add(&ssp->node, &ssp_list); | ||
107 | mutex_unlock(&ssp_lock); | ||
108 | |||
109 | pci_set_drvdata(dev, spi_info); | ||
110 | |||
111 | ret = platform_device_add(pdev); | ||
112 | if (ret) | ||
113 | goto err_dev_add; | ||
114 | |||
115 | return ret; | ||
116 | |||
117 | err_dev_add: | ||
118 | pci_set_drvdata(dev, NULL); | ||
119 | mutex_lock(&ssp_lock); | ||
120 | list_del(&ssp->node); | ||
121 | mutex_unlock(&ssp_lock); | ||
122 | iounmap(ssp->mmio_base); | ||
123 | |||
124 | err_nomem: | ||
125 | release_mem_region(phys_beg, phys_len); | ||
126 | platform_device_put(pdev); | ||
127 | kfree(spi_info); | ||
128 | return ret; | ||
129 | } | ||
130 | |||
131 | static void __devexit ce4100_spi_remove(struct pci_dev *dev) | ||
132 | { | ||
133 | struct ce4100_info *spi_info; | ||
134 | struct ssp_device *ssp; | ||
135 | |||
136 | spi_info = pci_get_drvdata(dev); | ||
137 | ssp = &spi_info->ssp; | ||
138 | platform_device_unregister(spi_info->spi_pdev); | ||
139 | |||
140 | iounmap(ssp->mmio_base); | ||
141 | release_mem_region(pci_resource_start(dev, 0), | ||
142 | pci_resource_len(dev, 0)); | ||
143 | |||
144 | mutex_lock(&ssp_lock); | ||
145 | list_del(&ssp->node); | ||
146 | mutex_unlock(&ssp_lock); | ||
147 | |||
148 | pci_set_drvdata(dev, NULL); | ||
149 | pci_disable_device(dev); | ||
150 | kfree(spi_info); | ||
151 | } | ||
152 | |||
153 | static struct pci_device_id ce4100_spi_devices[] __devinitdata = { | ||
154 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) }, | ||
155 | { }, | ||
156 | }; | ||
157 | MODULE_DEVICE_TABLE(pci, ce4100_spi_devices); | ||
158 | |||
159 | static struct pci_driver ce4100_spi_driver = { | ||
160 | .name = "ce4100_spi", | ||
161 | .id_table = ce4100_spi_devices, | ||
162 | .probe = ce4100_spi_probe, | ||
163 | .remove = __devexit_p(ce4100_spi_remove), | ||
164 | }; | ||
165 | |||
166 | static int __init ce4100_spi_init(void) | ||
167 | { | ||
168 | return pci_register_driver(&ce4100_spi_driver); | ||
169 | } | ||
170 | module_init(ce4100_spi_init); | ||
171 | |||
172 | static void __exit ce4100_spi_exit(void) | ||
173 | { | ||
174 | pci_unregister_driver(&ce4100_spi_driver); | ||
175 | } | ||
176 | module_exit(ce4100_spi_exit); | ||
177 | |||
178 | MODULE_DESCRIPTION("CE4100 PCI-SPI glue code for PXA's driver"); | ||
179 | MODULE_LICENSE("GPL v2"); | ||
180 | MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>"); | ||
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index b5a78a1f4421..2e13a14bba3f 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -28,12 +28,8 @@ | |||
28 | #include <linux/mod_devicetable.h> | 28 | #include <linux/mod_devicetable.h> |
29 | #include <linux/spi/spi.h> | 29 | #include <linux/spi/spi.h> |
30 | #include <linux/of_spi.h> | 30 | #include <linux/of_spi.h> |
31 | #include <linux/pm_runtime.h> | ||
31 | 32 | ||
32 | |||
33 | /* SPI bustype and spi_master class are registered after board init code | ||
34 | * provides the SPI device tables, ensuring that both are present by the | ||
35 | * time controller driver registration causes spi_devices to "enumerate". | ||
36 | */ | ||
37 | static void spidev_release(struct device *dev) | 33 | static void spidev_release(struct device *dev) |
38 | { | 34 | { |
39 | struct spi_device *spi = to_spi_device(dev); | 35 | struct spi_device *spi = to_spi_device(dev); |
@@ -105,9 +101,8 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env) | |||
105 | return 0; | 101 | return 0; |
106 | } | 102 | } |
107 | 103 | ||
108 | #ifdef CONFIG_PM | 104 | #ifdef CONFIG_PM_SLEEP |
109 | 105 | static int spi_legacy_suspend(struct device *dev, pm_message_t message) | |
110 | static int spi_suspend(struct device *dev, pm_message_t message) | ||
111 | { | 106 | { |
112 | int value = 0; | 107 | int value = 0; |
113 | struct spi_driver *drv = to_spi_driver(dev->driver); | 108 | struct spi_driver *drv = to_spi_driver(dev->driver); |
@@ -122,7 +117,7 @@ static int spi_suspend(struct device *dev, pm_message_t message) | |||
122 | return value; | 117 | return value; |
123 | } | 118 | } |
124 | 119 | ||
125 | static int spi_resume(struct device *dev) | 120 | static int spi_legacy_resume(struct device *dev) |
126 | { | 121 | { |
127 | int value = 0; | 122 | int value = 0; |
128 | struct spi_driver *drv = to_spi_driver(dev->driver); | 123 | struct spi_driver *drv = to_spi_driver(dev->driver); |
@@ -137,18 +132,94 @@ static int spi_resume(struct device *dev) | |||
137 | return value; | 132 | return value; |
138 | } | 133 | } |
139 | 134 | ||
135 | static int spi_pm_suspend(struct device *dev) | ||
136 | { | ||
137 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
138 | |||
139 | if (pm) | ||
140 | return pm_generic_suspend(dev); | ||
141 | else | ||
142 | return spi_legacy_suspend(dev, PMSG_SUSPEND); | ||
143 | } | ||
144 | |||
145 | static int spi_pm_resume(struct device *dev) | ||
146 | { | ||
147 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
148 | |||
149 | if (pm) | ||
150 | return pm_generic_resume(dev); | ||
151 | else | ||
152 | return spi_legacy_resume(dev); | ||
153 | } | ||
154 | |||
155 | static int spi_pm_freeze(struct device *dev) | ||
156 | { | ||
157 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
158 | |||
159 | if (pm) | ||
160 | return pm_generic_freeze(dev); | ||
161 | else | ||
162 | return spi_legacy_suspend(dev, PMSG_FREEZE); | ||
163 | } | ||
164 | |||
165 | static int spi_pm_thaw(struct device *dev) | ||
166 | { | ||
167 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
168 | |||
169 | if (pm) | ||
170 | return pm_generic_thaw(dev); | ||
171 | else | ||
172 | return spi_legacy_resume(dev); | ||
173 | } | ||
174 | |||
175 | static int spi_pm_poweroff(struct device *dev) | ||
176 | { | ||
177 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
178 | |||
179 | if (pm) | ||
180 | return pm_generic_poweroff(dev); | ||
181 | else | ||
182 | return spi_legacy_suspend(dev, PMSG_HIBERNATE); | ||
183 | } | ||
184 | |||
185 | static int spi_pm_restore(struct device *dev) | ||
186 | { | ||
187 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
188 | |||
189 | if (pm) | ||
190 | return pm_generic_restore(dev); | ||
191 | else | ||
192 | return spi_legacy_resume(dev); | ||
193 | } | ||
140 | #else | 194 | #else |
141 | #define spi_suspend NULL | 195 | #define spi_pm_suspend NULL |
142 | #define spi_resume NULL | 196 | #define spi_pm_resume NULL |
197 | #define spi_pm_freeze NULL | ||
198 | #define spi_pm_thaw NULL | ||
199 | #define spi_pm_poweroff NULL | ||
200 | #define spi_pm_restore NULL | ||
143 | #endif | 201 | #endif |
144 | 202 | ||
203 | static const struct dev_pm_ops spi_pm = { | ||
204 | .suspend = spi_pm_suspend, | ||
205 | .resume = spi_pm_resume, | ||
206 | .freeze = spi_pm_freeze, | ||
207 | .thaw = spi_pm_thaw, | ||
208 | .poweroff = spi_pm_poweroff, | ||
209 | .restore = spi_pm_restore, | ||
210 | SET_RUNTIME_PM_OPS( | ||
211 | pm_generic_runtime_suspend, | ||
212 | pm_generic_runtime_resume, | ||
213 | pm_generic_runtime_idle | ||
214 | ) | ||
215 | }; | ||
216 | |||
145 | struct bus_type spi_bus_type = { | 217 | struct bus_type spi_bus_type = { |
146 | .name = "spi", | 218 | .name = "spi", |
147 | .dev_attrs = spi_dev_attrs, | 219 | .dev_attrs = spi_dev_attrs, |
148 | .match = spi_match_device, | 220 | .match = spi_match_device, |
149 | .uevent = spi_uevent, | 221 | .uevent = spi_uevent, |
150 | .suspend = spi_suspend, | 222 | .pm = &spi_pm, |
151 | .resume = spi_resume, | ||
152 | }; | 223 | }; |
153 | EXPORT_SYMBOL_GPL(spi_bus_type); | 224 | EXPORT_SYMBOL_GPL(spi_bus_type); |
154 | 225 | ||
@@ -202,11 +273,16 @@ EXPORT_SYMBOL_GPL(spi_register_driver); | |||
202 | 273 | ||
203 | struct boardinfo { | 274 | struct boardinfo { |
204 | struct list_head list; | 275 | struct list_head list; |
205 | unsigned n_board_info; | 276 | struct spi_board_info board_info; |
206 | struct spi_board_info board_info[0]; | ||
207 | }; | 277 | }; |
208 | 278 | ||
209 | static LIST_HEAD(board_list); | 279 | static LIST_HEAD(board_list); |
280 | static LIST_HEAD(spi_master_list); | ||
281 | |||
282 | /* | ||
283 | * Used to protect add/del opertion for board_info list and | ||
284 | * spi_master list, and their matching process | ||
285 | */ | ||
210 | static DEFINE_MUTEX(board_lock); | 286 | static DEFINE_MUTEX(board_lock); |
211 | 287 | ||
212 | /** | 288 | /** |
@@ -300,16 +376,16 @@ int spi_add_device(struct spi_device *spi) | |||
300 | */ | 376 | */ |
301 | status = spi_setup(spi); | 377 | status = spi_setup(spi); |
302 | if (status < 0) { | 378 | if (status < 0) { |
303 | dev_err(dev, "can't %s %s, status %d\n", | 379 | dev_err(dev, "can't setup %s, status %d\n", |
304 | "setup", dev_name(&spi->dev), status); | 380 | dev_name(&spi->dev), status); |
305 | goto done; | 381 | goto done; |
306 | } | 382 | } |
307 | 383 | ||
308 | /* Device may be bound to an active driver when this returns */ | 384 | /* Device may be bound to an active driver when this returns */ |
309 | status = device_add(&spi->dev); | 385 | status = device_add(&spi->dev); |
310 | if (status < 0) | 386 | if (status < 0) |
311 | dev_err(dev, "can't %s %s, status %d\n", | 387 | dev_err(dev, "can't add %s, status %d\n", |
312 | "add", dev_name(&spi->dev), status); | 388 | dev_name(&spi->dev), status); |
313 | else | 389 | else |
314 | dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); | 390 | dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); |
315 | 391 | ||
@@ -371,6 +447,20 @@ struct spi_device *spi_new_device(struct spi_master *master, | |||
371 | } | 447 | } |
372 | EXPORT_SYMBOL_GPL(spi_new_device); | 448 | EXPORT_SYMBOL_GPL(spi_new_device); |
373 | 449 | ||
450 | static void spi_match_master_to_boardinfo(struct spi_master *master, | ||
451 | struct spi_board_info *bi) | ||
452 | { | ||
453 | struct spi_device *dev; | ||
454 | |||
455 | if (master->bus_num != bi->bus_num) | ||
456 | return; | ||
457 | |||
458 | dev = spi_new_device(master, bi); | ||
459 | if (!dev) | ||
460 | dev_err(master->dev.parent, "can't create new device for %s\n", | ||
461 | bi->modalias); | ||
462 | } | ||
463 | |||
374 | /** | 464 | /** |
375 | * spi_register_board_info - register SPI devices for a given board | 465 | * spi_register_board_info - register SPI devices for a given board |
376 | * @info: array of chip descriptors | 466 | * @info: array of chip descriptors |
@@ -393,43 +483,25 @@ EXPORT_SYMBOL_GPL(spi_new_device); | |||
393 | int __init | 483 | int __init |
394 | spi_register_board_info(struct spi_board_info const *info, unsigned n) | 484 | spi_register_board_info(struct spi_board_info const *info, unsigned n) |
395 | { | 485 | { |
396 | struct boardinfo *bi; | 486 | struct boardinfo *bi; |
487 | int i; | ||
397 | 488 | ||
398 | bi = kmalloc(sizeof(*bi) + n * sizeof *info, GFP_KERNEL); | 489 | bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); |
399 | if (!bi) | 490 | if (!bi) |
400 | return -ENOMEM; | 491 | return -ENOMEM; |
401 | bi->n_board_info = n; | ||
402 | memcpy(bi->board_info, info, n * sizeof *info); | ||
403 | |||
404 | mutex_lock(&board_lock); | ||
405 | list_add_tail(&bi->list, &board_list); | ||
406 | mutex_unlock(&board_lock); | ||
407 | return 0; | ||
408 | } | ||
409 | 492 | ||
410 | /* FIXME someone should add support for a __setup("spi", ...) that | 493 | for (i = 0; i < n; i++, bi++, info++) { |
411 | * creates board info from kernel command lines | 494 | struct spi_master *master; |
412 | */ | ||
413 | |||
414 | static void scan_boardinfo(struct spi_master *master) | ||
415 | { | ||
416 | struct boardinfo *bi; | ||
417 | 495 | ||
418 | mutex_lock(&board_lock); | 496 | memcpy(&bi->board_info, info, sizeof(*info)); |
419 | list_for_each_entry(bi, &board_list, list) { | 497 | mutex_lock(&board_lock); |
420 | struct spi_board_info *chip = bi->board_info; | 498 | list_add_tail(&bi->list, &board_list); |
421 | unsigned n; | 499 | list_for_each_entry(master, &spi_master_list, list) |
422 | 500 | spi_match_master_to_boardinfo(master, &bi->board_info); | |
423 | for (n = bi->n_board_info; n > 0; n--, chip++) { | 501 | mutex_unlock(&board_lock); |
424 | if (chip->bus_num != master->bus_num) | ||
425 | continue; | ||
426 | /* NOTE: this relies on spi_new_device to | ||
427 | * issue diagnostics when given bogus inputs | ||
428 | */ | ||
429 | (void) spi_new_device(master, chip); | ||
430 | } | ||
431 | } | 502 | } |
432 | mutex_unlock(&board_lock); | 503 | |
504 | return 0; | ||
433 | } | 505 | } |
434 | 506 | ||
435 | /*-------------------------------------------------------------------------*/ | 507 | /*-------------------------------------------------------------------------*/ |
@@ -512,6 +584,7 @@ int spi_register_master(struct spi_master *master) | |||
512 | { | 584 | { |
513 | static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); | 585 | static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); |
514 | struct device *dev = master->dev.parent; | 586 | struct device *dev = master->dev.parent; |
587 | struct boardinfo *bi; | ||
515 | int status = -ENODEV; | 588 | int status = -ENODEV; |
516 | int dynamic = 0; | 589 | int dynamic = 0; |
517 | 590 | ||
@@ -547,8 +620,12 @@ int spi_register_master(struct spi_master *master) | |||
547 | dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), | 620 | dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), |
548 | dynamic ? " (dynamic)" : ""); | 621 | dynamic ? " (dynamic)" : ""); |
549 | 622 | ||
550 | /* populate children from any spi device tables */ | 623 | mutex_lock(&board_lock); |
551 | scan_boardinfo(master); | 624 | list_add_tail(&master->list, &spi_master_list); |
625 | list_for_each_entry(bi, &board_list, list) | ||
626 | spi_match_master_to_boardinfo(master, &bi->board_info); | ||
627 | mutex_unlock(&board_lock); | ||
628 | |||
552 | status = 0; | 629 | status = 0; |
553 | 630 | ||
554 | /* Register devices from the device tree */ | 631 | /* Register devices from the device tree */ |
@@ -579,6 +656,10 @@ void spi_unregister_master(struct spi_master *master) | |||
579 | { | 656 | { |
580 | int dummy; | 657 | int dummy; |
581 | 658 | ||
659 | mutex_lock(&board_lock); | ||
660 | list_del(&master->list); | ||
661 | mutex_unlock(&board_lock); | ||
662 | |||
582 | dummy = device_for_each_child(&master->dev, NULL, __unregister); | 663 | dummy = device_for_each_child(&master->dev, NULL, __unregister); |
583 | device_unregister(&master->dev); | 664 | device_unregister(&master->dev); |
584 | } | 665 | } |
@@ -652,7 +733,7 @@ int spi_setup(struct spi_device *spi) | |||
652 | */ | 733 | */ |
653 | bad_bits = spi->mode & ~spi->master->mode_bits; | 734 | bad_bits = spi->mode & ~spi->master->mode_bits; |
654 | if (bad_bits) { | 735 | if (bad_bits) { |
655 | dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n", | 736 | dev_err(&spi->dev, "setup: unsupported mode bits %x\n", |
656 | bad_bits); | 737 | bad_bits); |
657 | return -EINVAL; | 738 | return -EINVAL; |
658 | } | 739 | } |
@@ -876,7 +957,7 @@ EXPORT_SYMBOL_GPL(spi_sync); | |||
876 | * drivers may DMA directly into and out of the message buffers. | 957 | * drivers may DMA directly into and out of the message buffers. |
877 | * | 958 | * |
878 | * This call should be used by drivers that require exclusive access to the | 959 | * This call should be used by drivers that require exclusive access to the |
879 | * SPI bus. It has to be preceeded by a spi_bus_lock call. The SPI bus must | 960 | * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must |
880 | * be released by a spi_bus_unlock call when the exclusive access is over. | 961 | * be released by a spi_bus_unlock call when the exclusive access is over. |
881 | * | 962 | * |
882 | * It returns zero on success, else a negative error code. | 963 | * It returns zero on success, else a negative error code. |
@@ -966,8 +1047,8 @@ static u8 *buf; | |||
966 | * spi_{async,sync}() calls with dma-safe buffers. | 1047 | * spi_{async,sync}() calls with dma-safe buffers. |
967 | */ | 1048 | */ |
968 | int spi_write_then_read(struct spi_device *spi, | 1049 | int spi_write_then_read(struct spi_device *spi, |
969 | const u8 *txbuf, unsigned n_tx, | 1050 | const void *txbuf, unsigned n_tx, |
970 | u8 *rxbuf, unsigned n_rx) | 1051 | void *rxbuf, unsigned n_rx) |
971 | { | 1052 | { |
972 | static DEFINE_MUTEX(lock); | 1053 | static DEFINE_MUTEX(lock); |
973 | 1054 | ||
diff --git a/drivers/spi/spi_altera.c b/drivers/spi/spi_altera.c new file mode 100644 index 000000000000..4813a63ce6fb --- /dev/null +++ b/drivers/spi/spi_altera.c | |||
@@ -0,0 +1,339 @@ | |||
1 | /* | ||
2 | * Altera SPI driver | ||
3 | * | ||
4 | * Copyright (C) 2008 Thomas Chou <thomas@wytron.com.tw> | ||
5 | * | ||
6 | * Based on spi_s3c24xx.c, which is: | ||
7 | * Copyright (c) 2006 Ben Dooks | ||
8 | * Copyright (c) 2006 Simtec Electronics | ||
9 | * Ben Dooks <ben@simtec.co.uk> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | */ | ||
15 | |||
16 | #include <linux/init.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | #include <linux/spi/spi.h> | ||
21 | #include <linux/spi/spi_bitbang.h> | ||
22 | #include <linux/io.h> | ||
23 | #include <linux/of.h> | ||
24 | |||
25 | #define DRV_NAME "spi_altera" | ||
26 | |||
27 | #define ALTERA_SPI_RXDATA 0 | ||
28 | #define ALTERA_SPI_TXDATA 4 | ||
29 | #define ALTERA_SPI_STATUS 8 | ||
30 | #define ALTERA_SPI_CONTROL 12 | ||
31 | #define ALTERA_SPI_SLAVE_SEL 20 | ||
32 | |||
33 | #define ALTERA_SPI_STATUS_ROE_MSK 0x8 | ||
34 | #define ALTERA_SPI_STATUS_TOE_MSK 0x10 | ||
35 | #define ALTERA_SPI_STATUS_TMT_MSK 0x20 | ||
36 | #define ALTERA_SPI_STATUS_TRDY_MSK 0x40 | ||
37 | #define ALTERA_SPI_STATUS_RRDY_MSK 0x80 | ||
38 | #define ALTERA_SPI_STATUS_E_MSK 0x100 | ||
39 | |||
40 | #define ALTERA_SPI_CONTROL_IROE_MSK 0x8 | ||
41 | #define ALTERA_SPI_CONTROL_ITOE_MSK 0x10 | ||
42 | #define ALTERA_SPI_CONTROL_ITRDY_MSK 0x40 | ||
43 | #define ALTERA_SPI_CONTROL_IRRDY_MSK 0x80 | ||
44 | #define ALTERA_SPI_CONTROL_IE_MSK 0x100 | ||
45 | #define ALTERA_SPI_CONTROL_SSO_MSK 0x400 | ||
46 | |||
47 | struct altera_spi { | ||
48 | /* bitbang has to be first */ | ||
49 | struct spi_bitbang bitbang; | ||
50 | struct completion done; | ||
51 | |||
52 | void __iomem *base; | ||
53 | int irq; | ||
54 | int len; | ||
55 | int count; | ||
56 | int bytes_per_word; | ||
57 | unsigned long imr; | ||
58 | |||
59 | /* data buffers */ | ||
60 | const unsigned char *tx; | ||
61 | unsigned char *rx; | ||
62 | }; | ||
63 | |||
64 | static inline struct altera_spi *altera_spi_to_hw(struct spi_device *sdev) | ||
65 | { | ||
66 | return spi_master_get_devdata(sdev->master); | ||
67 | } | ||
68 | |||
69 | static void altera_spi_chipsel(struct spi_device *spi, int value) | ||
70 | { | ||
71 | struct altera_spi *hw = altera_spi_to_hw(spi); | ||
72 | |||
73 | if (spi->mode & SPI_CS_HIGH) { | ||
74 | switch (value) { | ||
75 | case BITBANG_CS_INACTIVE: | ||
76 | writel(1 << spi->chip_select, | ||
77 | hw->base + ALTERA_SPI_SLAVE_SEL); | ||
78 | hw->imr |= ALTERA_SPI_CONTROL_SSO_MSK; | ||
79 | writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); | ||
80 | break; | ||
81 | |||
82 | case BITBANG_CS_ACTIVE: | ||
83 | hw->imr &= ~ALTERA_SPI_CONTROL_SSO_MSK; | ||
84 | writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); | ||
85 | writel(0, hw->base + ALTERA_SPI_SLAVE_SEL); | ||
86 | break; | ||
87 | } | ||
88 | } else { | ||
89 | switch (value) { | ||
90 | case BITBANG_CS_INACTIVE: | ||
91 | hw->imr &= ~ALTERA_SPI_CONTROL_SSO_MSK; | ||
92 | writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); | ||
93 | break; | ||
94 | |||
95 | case BITBANG_CS_ACTIVE: | ||
96 | writel(1 << spi->chip_select, | ||
97 | hw->base + ALTERA_SPI_SLAVE_SEL); | ||
98 | hw->imr |= ALTERA_SPI_CONTROL_SSO_MSK; | ||
99 | writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); | ||
100 | break; | ||
101 | } | ||
102 | } | ||
103 | } | ||
104 | |||
105 | static int altera_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t) | ||
106 | { | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static int altera_spi_setup(struct spi_device *spi) | ||
111 | { | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static inline unsigned int hw_txbyte(struct altera_spi *hw, int count) | ||
116 | { | ||
117 | if (hw->tx) { | ||
118 | switch (hw->bytes_per_word) { | ||
119 | case 1: | ||
120 | return hw->tx[count]; | ||
121 | case 2: | ||
122 | return (hw->tx[count * 2] | ||
123 | | (hw->tx[count * 2 + 1] << 8)); | ||
124 | } | ||
125 | } | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static int altera_spi_txrx(struct spi_device *spi, struct spi_transfer *t) | ||
130 | { | ||
131 | struct altera_spi *hw = altera_spi_to_hw(spi); | ||
132 | |||
133 | hw->tx = t->tx_buf; | ||
134 | hw->rx = t->rx_buf; | ||
135 | hw->count = 0; | ||
136 | hw->bytes_per_word = (t->bits_per_word ? : spi->bits_per_word) / 8; | ||
137 | hw->len = t->len / hw->bytes_per_word; | ||
138 | |||
139 | if (hw->irq >= 0) { | ||
140 | /* enable receive interrupt */ | ||
141 | hw->imr |= ALTERA_SPI_CONTROL_IRRDY_MSK; | ||
142 | writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); | ||
143 | |||
144 | /* send the first byte */ | ||
145 | writel(hw_txbyte(hw, 0), hw->base + ALTERA_SPI_TXDATA); | ||
146 | |||
147 | wait_for_completion(&hw->done); | ||
148 | /* disable receive interrupt */ | ||
149 | hw->imr &= ~ALTERA_SPI_CONTROL_IRRDY_MSK; | ||
150 | writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); | ||
151 | } else { | ||
152 | /* send the first byte */ | ||
153 | writel(hw_txbyte(hw, 0), hw->base + ALTERA_SPI_TXDATA); | ||
154 | |||
155 | while (1) { | ||
156 | unsigned int rxd; | ||
157 | |||
158 | while (!(readl(hw->base + ALTERA_SPI_STATUS) & | ||
159 | ALTERA_SPI_STATUS_RRDY_MSK)) | ||
160 | cpu_relax(); | ||
161 | |||
162 | rxd = readl(hw->base + ALTERA_SPI_RXDATA); | ||
163 | if (hw->rx) { | ||
164 | switch (hw->bytes_per_word) { | ||
165 | case 1: | ||
166 | hw->rx[hw->count] = rxd; | ||
167 | break; | ||
168 | case 2: | ||
169 | hw->rx[hw->count * 2] = rxd; | ||
170 | hw->rx[hw->count * 2 + 1] = rxd >> 8; | ||
171 | break; | ||
172 | } | ||
173 | } | ||
174 | |||
175 | hw->count++; | ||
176 | |||
177 | if (hw->count < hw->len) | ||
178 | writel(hw_txbyte(hw, hw->count), | ||
179 | hw->base + ALTERA_SPI_TXDATA); | ||
180 | else | ||
181 | break; | ||
182 | } | ||
183 | |||
184 | } | ||
185 | |||
186 | return hw->count * hw->bytes_per_word; | ||
187 | } | ||
188 | |||
189 | static irqreturn_t altera_spi_irq(int irq, void *dev) | ||
190 | { | ||
191 | struct altera_spi *hw = dev; | ||
192 | unsigned int rxd; | ||
193 | |||
194 | rxd = readl(hw->base + ALTERA_SPI_RXDATA); | ||
195 | if (hw->rx) { | ||
196 | switch (hw->bytes_per_word) { | ||
197 | case 1: | ||
198 | hw->rx[hw->count] = rxd; | ||
199 | break; | ||
200 | case 2: | ||
201 | hw->rx[hw->count * 2] = rxd; | ||
202 | hw->rx[hw->count * 2 + 1] = rxd >> 8; | ||
203 | break; | ||
204 | } | ||
205 | } | ||
206 | |||
207 | hw->count++; | ||
208 | |||
209 | if (hw->count < hw->len) | ||
210 | writel(hw_txbyte(hw, hw->count), hw->base + ALTERA_SPI_TXDATA); | ||
211 | else | ||
212 | complete(&hw->done); | ||
213 | |||
214 | return IRQ_HANDLED; | ||
215 | } | ||
216 | |||
217 | static int __devinit altera_spi_probe(struct platform_device *pdev) | ||
218 | { | ||
219 | struct altera_spi_platform_data *platp = pdev->dev.platform_data; | ||
220 | struct altera_spi *hw; | ||
221 | struct spi_master *master; | ||
222 | struct resource *res; | ||
223 | int err = -ENODEV; | ||
224 | |||
225 | master = spi_alloc_master(&pdev->dev, sizeof(struct altera_spi)); | ||
226 | if (!master) | ||
227 | return err; | ||
228 | |||
229 | /* setup the master state. */ | ||
230 | master->bus_num = pdev->id; | ||
231 | master->num_chipselect = 16; | ||
232 | master->mode_bits = SPI_CS_HIGH; | ||
233 | master->setup = altera_spi_setup; | ||
234 | |||
235 | hw = spi_master_get_devdata(master); | ||
236 | platform_set_drvdata(pdev, hw); | ||
237 | |||
238 | /* setup the state for the bitbang driver */ | ||
239 | hw->bitbang.master = spi_master_get(master); | ||
240 | if (!hw->bitbang.master) | ||
241 | return err; | ||
242 | hw->bitbang.setup_transfer = altera_spi_setupxfer; | ||
243 | hw->bitbang.chipselect = altera_spi_chipsel; | ||
244 | hw->bitbang.txrx_bufs = altera_spi_txrx; | ||
245 | |||
246 | /* find and map our resources */ | ||
247 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
248 | if (!res) | ||
249 | goto exit_busy; | ||
250 | if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), | ||
251 | pdev->name)) | ||
252 | goto exit_busy; | ||
253 | hw->base = devm_ioremap_nocache(&pdev->dev, res->start, | ||
254 | resource_size(res)); | ||
255 | if (!hw->base) | ||
256 | goto exit_busy; | ||
257 | /* program defaults into the registers */ | ||
258 | hw->imr = 0; /* disable spi interrupts */ | ||
259 | writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); | ||
260 | writel(0, hw->base + ALTERA_SPI_STATUS); /* clear status reg */ | ||
261 | if (readl(hw->base + ALTERA_SPI_STATUS) & ALTERA_SPI_STATUS_RRDY_MSK) | ||
262 | readl(hw->base + ALTERA_SPI_RXDATA); /* flush rxdata */ | ||
263 | /* irq is optional */ | ||
264 | hw->irq = platform_get_irq(pdev, 0); | ||
265 | if (hw->irq >= 0) { | ||
266 | init_completion(&hw->done); | ||
267 | err = devm_request_irq(&pdev->dev, hw->irq, altera_spi_irq, 0, | ||
268 | pdev->name, hw); | ||
269 | if (err) | ||
270 | goto exit; | ||
271 | } | ||
272 | /* find platform data */ | ||
273 | if (!platp) | ||
274 | hw->bitbang.master->dev.of_node = pdev->dev.of_node; | ||
275 | |||
276 | /* register our spi controller */ | ||
277 | err = spi_bitbang_start(&hw->bitbang); | ||
278 | if (err) | ||
279 | goto exit; | ||
280 | dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq); | ||
281 | |||
282 | return 0; | ||
283 | |||
284 | exit_busy: | ||
285 | err = -EBUSY; | ||
286 | exit: | ||
287 | platform_set_drvdata(pdev, NULL); | ||
288 | spi_master_put(master); | ||
289 | return err; | ||
290 | } | ||
291 | |||
292 | static int __devexit altera_spi_remove(struct platform_device *dev) | ||
293 | { | ||
294 | struct altera_spi *hw = platform_get_drvdata(dev); | ||
295 | struct spi_master *master = hw->bitbang.master; | ||
296 | |||
297 | spi_bitbang_stop(&hw->bitbang); | ||
298 | platform_set_drvdata(dev, NULL); | ||
299 | spi_master_put(master); | ||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | #ifdef CONFIG_OF | ||
304 | static const struct of_device_id altera_spi_match[] = { | ||
305 | { .compatible = "ALTR,spi-1.0", }, | ||
306 | {}, | ||
307 | }; | ||
308 | MODULE_DEVICE_TABLE(of, altera_spi_match); | ||
309 | #else /* CONFIG_OF */ | ||
310 | #define altera_spi_match NULL | ||
311 | #endif /* CONFIG_OF */ | ||
312 | |||
313 | static struct platform_driver altera_spi_driver = { | ||
314 | .probe = altera_spi_probe, | ||
315 | .remove = __devexit_p(altera_spi_remove), | ||
316 | .driver = { | ||
317 | .name = DRV_NAME, | ||
318 | .owner = THIS_MODULE, | ||
319 | .pm = NULL, | ||
320 | .of_match_table = altera_spi_match, | ||
321 | }, | ||
322 | }; | ||
323 | |||
324 | static int __init altera_spi_init(void) | ||
325 | { | ||
326 | return platform_driver_register(&altera_spi_driver); | ||
327 | } | ||
328 | module_init(altera_spi_init); | ||
329 | |||
330 | static void __exit altera_spi_exit(void) | ||
331 | { | ||
332 | platform_driver_unregister(&altera_spi_driver); | ||
333 | } | ||
334 | module_exit(altera_spi_exit); | ||
335 | |||
336 | MODULE_DESCRIPTION("Altera SPI driver"); | ||
337 | MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>"); | ||
338 | MODULE_LICENSE("GPL"); | ||
339 | MODULE_ALIAS("platform:" DRV_NAME); | ||
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c index 10a6dc3d37ac..cc880c95e7de 100644 --- a/drivers/spi/spi_bfin5xx.c +++ b/drivers/spi/spi_bfin5xx.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Blackfin On-Chip SPI Driver | 2 | * Blackfin On-Chip SPI Driver |
3 | * | 3 | * |
4 | * Copyright 2004-2007 Analog Devices Inc. | 4 | * Copyright 2004-2010 Analog Devices Inc. |
5 | * | 5 | * |
6 | * Enter bugs at http://blackfin.uclinux.org/ | 6 | * Enter bugs at http://blackfin.uclinux.org/ |
7 | * | 7 | * |
@@ -41,13 +41,16 @@ MODULE_LICENSE("GPL"); | |||
41 | #define RUNNING_STATE ((void *)1) | 41 | #define RUNNING_STATE ((void *)1) |
42 | #define DONE_STATE ((void *)2) | 42 | #define DONE_STATE ((void *)2) |
43 | #define ERROR_STATE ((void *)-1) | 43 | #define ERROR_STATE ((void *)-1) |
44 | #define QUEUE_RUNNING 0 | ||
45 | #define QUEUE_STOPPED 1 | ||
46 | 44 | ||
47 | /* Value to send if no TX value is supplied */ | 45 | struct bfin_spi_master_data; |
48 | #define SPI_IDLE_TXVAL 0x0000 | ||
49 | 46 | ||
50 | struct driver_data { | 47 | struct bfin_spi_transfer_ops { |
48 | void (*write) (struct bfin_spi_master_data *); | ||
49 | void (*read) (struct bfin_spi_master_data *); | ||
50 | void (*duplex) (struct bfin_spi_master_data *); | ||
51 | }; | ||
52 | |||
53 | struct bfin_spi_master_data { | ||
51 | /* Driver model hookup */ | 54 | /* Driver model hookup */ |
52 | struct platform_device *pdev; | 55 | struct platform_device *pdev; |
53 | 56 | ||
@@ -69,7 +72,7 @@ struct driver_data { | |||
69 | spinlock_t lock; | 72 | spinlock_t lock; |
70 | struct list_head queue; | 73 | struct list_head queue; |
71 | int busy; | 74 | int busy; |
72 | int run; | 75 | bool running; |
73 | 76 | ||
74 | /* Message Transfer pump */ | 77 | /* Message Transfer pump */ |
75 | struct tasklet_struct pump_transfers; | 78 | struct tasklet_struct pump_transfers; |
@@ -77,7 +80,7 @@ struct driver_data { | |||
77 | /* Current message transfer state info */ | 80 | /* Current message transfer state info */ |
78 | struct spi_message *cur_msg; | 81 | struct spi_message *cur_msg; |
79 | struct spi_transfer *cur_transfer; | 82 | struct spi_transfer *cur_transfer; |
80 | struct chip_data *cur_chip; | 83 | struct bfin_spi_slave_data *cur_chip; |
81 | size_t len_in_bytes; | 84 | size_t len_in_bytes; |
82 | size_t len; | 85 | size_t len; |
83 | void *tx; | 86 | void *tx; |
@@ -92,38 +95,37 @@ struct driver_data { | |||
92 | dma_addr_t rx_dma; | 95 | dma_addr_t rx_dma; |
93 | dma_addr_t tx_dma; | 96 | dma_addr_t tx_dma; |
94 | 97 | ||
98 | int irq_requested; | ||
99 | int spi_irq; | ||
100 | |||
95 | size_t rx_map_len; | 101 | size_t rx_map_len; |
96 | size_t tx_map_len; | 102 | size_t tx_map_len; |
97 | u8 n_bytes; | 103 | u8 n_bytes; |
104 | u16 ctrl_reg; | ||
105 | u16 flag_reg; | ||
106 | |||
98 | int cs_change; | 107 | int cs_change; |
99 | void (*write) (struct driver_data *); | 108 | const struct bfin_spi_transfer_ops *ops; |
100 | void (*read) (struct driver_data *); | ||
101 | void (*duplex) (struct driver_data *); | ||
102 | }; | 109 | }; |
103 | 110 | ||
104 | struct chip_data { | 111 | struct bfin_spi_slave_data { |
105 | u16 ctl_reg; | 112 | u16 ctl_reg; |
106 | u16 baud; | 113 | u16 baud; |
107 | u16 flag; | 114 | u16 flag; |
108 | 115 | ||
109 | u8 chip_select_num; | 116 | u8 chip_select_num; |
110 | u8 n_bytes; | ||
111 | u8 width; /* 0 or 1 */ | ||
112 | u8 enable_dma; | 117 | u8 enable_dma; |
113 | u8 bits_per_word; /* 8 or 16 */ | ||
114 | u8 cs_change_per_word; | ||
115 | u16 cs_chg_udelay; /* Some devices require > 255usec delay */ | 118 | u16 cs_chg_udelay; /* Some devices require > 255usec delay */ |
116 | u32 cs_gpio; | 119 | u32 cs_gpio; |
117 | u16 idle_tx_val; | 120 | u16 idle_tx_val; |
118 | void (*write) (struct driver_data *); | 121 | u8 pio_interrupt; /* use spi data irq */ |
119 | void (*read) (struct driver_data *); | 122 | const struct bfin_spi_transfer_ops *ops; |
120 | void (*duplex) (struct driver_data *); | ||
121 | }; | 123 | }; |
122 | 124 | ||
123 | #define DEFINE_SPI_REG(reg, off) \ | 125 | #define DEFINE_SPI_REG(reg, off) \ |
124 | static inline u16 read_##reg(struct driver_data *drv_data) \ | 126 | static inline u16 read_##reg(struct bfin_spi_master_data *drv_data) \ |
125 | { return bfin_read16(drv_data->regs_base + off); } \ | 127 | { return bfin_read16(drv_data->regs_base + off); } \ |
126 | static inline void write_##reg(struct driver_data *drv_data, u16 v) \ | 128 | static inline void write_##reg(struct bfin_spi_master_data *drv_data, u16 v) \ |
127 | { bfin_write16(drv_data->regs_base + off, v); } | 129 | { bfin_write16(drv_data->regs_base + off, v); } |
128 | 130 | ||
129 | DEFINE_SPI_REG(CTRL, 0x00) | 131 | DEFINE_SPI_REG(CTRL, 0x00) |
@@ -134,7 +136,7 @@ DEFINE_SPI_REG(RDBR, 0x10) | |||
134 | DEFINE_SPI_REG(BAUD, 0x14) | 136 | DEFINE_SPI_REG(BAUD, 0x14) |
135 | DEFINE_SPI_REG(SHAW, 0x18) | 137 | DEFINE_SPI_REG(SHAW, 0x18) |
136 | 138 | ||
137 | static void bfin_spi_enable(struct driver_data *drv_data) | 139 | static void bfin_spi_enable(struct bfin_spi_master_data *drv_data) |
138 | { | 140 | { |
139 | u16 cr; | 141 | u16 cr; |
140 | 142 | ||
@@ -142,7 +144,7 @@ static void bfin_spi_enable(struct driver_data *drv_data) | |||
142 | write_CTRL(drv_data, (cr | BIT_CTL_ENABLE)); | 144 | write_CTRL(drv_data, (cr | BIT_CTL_ENABLE)); |
143 | } | 145 | } |
144 | 146 | ||
145 | static void bfin_spi_disable(struct driver_data *drv_data) | 147 | static void bfin_spi_disable(struct bfin_spi_master_data *drv_data) |
146 | { | 148 | { |
147 | u16 cr; | 149 | u16 cr; |
148 | 150 | ||
@@ -165,7 +167,7 @@ static u16 hz_to_spi_baud(u32 speed_hz) | |||
165 | return spi_baud; | 167 | return spi_baud; |
166 | } | 168 | } |
167 | 169 | ||
168 | static int bfin_spi_flush(struct driver_data *drv_data) | 170 | static int bfin_spi_flush(struct bfin_spi_master_data *drv_data) |
169 | { | 171 | { |
170 | unsigned long limit = loops_per_jiffy << 1; | 172 | unsigned long limit = loops_per_jiffy << 1; |
171 | 173 | ||
@@ -179,13 +181,12 @@ static int bfin_spi_flush(struct driver_data *drv_data) | |||
179 | } | 181 | } |
180 | 182 | ||
181 | /* Chip select operation functions for cs_change flag */ | 183 | /* Chip select operation functions for cs_change flag */ |
182 | static void bfin_spi_cs_active(struct driver_data *drv_data, struct chip_data *chip) | 184 | static void bfin_spi_cs_active(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip) |
183 | { | 185 | { |
184 | if (likely(chip->chip_select_num)) { | 186 | if (likely(chip->chip_select_num < MAX_CTRL_CS)) { |
185 | u16 flag = read_FLAG(drv_data); | 187 | u16 flag = read_FLAG(drv_data); |
186 | 188 | ||
187 | flag |= chip->flag; | 189 | flag &= ~chip->flag; |
188 | flag &= ~(chip->flag << 8); | ||
189 | 190 | ||
190 | write_FLAG(drv_data, flag); | 191 | write_FLAG(drv_data, flag); |
191 | } else { | 192 | } else { |
@@ -193,13 +194,13 @@ static void bfin_spi_cs_active(struct driver_data *drv_data, struct chip_data *c | |||
193 | } | 194 | } |
194 | } | 195 | } |
195 | 196 | ||
196 | static void bfin_spi_cs_deactive(struct driver_data *drv_data, struct chip_data *chip) | 197 | static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data, |
198 | struct bfin_spi_slave_data *chip) | ||
197 | { | 199 | { |
198 | if (likely(chip->chip_select_num)) { | 200 | if (likely(chip->chip_select_num < MAX_CTRL_CS)) { |
199 | u16 flag = read_FLAG(drv_data); | 201 | u16 flag = read_FLAG(drv_data); |
200 | 202 | ||
201 | flag &= ~chip->flag; | 203 | flag |= chip->flag; |
202 | flag |= (chip->flag << 8); | ||
203 | 204 | ||
204 | write_FLAG(drv_data, flag); | 205 | write_FLAG(drv_data, flag); |
205 | } else { | 206 | } else { |
@@ -211,16 +212,43 @@ static void bfin_spi_cs_deactive(struct driver_data *drv_data, struct chip_data | |||
211 | udelay(chip->cs_chg_udelay); | 212 | udelay(chip->cs_chg_udelay); |
212 | } | 213 | } |
213 | 214 | ||
215 | /* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */ | ||
216 | static inline void bfin_spi_cs_enable(struct bfin_spi_master_data *drv_data, | ||
217 | struct bfin_spi_slave_data *chip) | ||
218 | { | ||
219 | if (chip->chip_select_num < MAX_CTRL_CS) { | ||
220 | u16 flag = read_FLAG(drv_data); | ||
221 | |||
222 | flag |= (chip->flag >> 8); | ||
223 | |||
224 | write_FLAG(drv_data, flag); | ||
225 | } | ||
226 | } | ||
227 | |||
228 | static inline void bfin_spi_cs_disable(struct bfin_spi_master_data *drv_data, | ||
229 | struct bfin_spi_slave_data *chip) | ||
230 | { | ||
231 | if (chip->chip_select_num < MAX_CTRL_CS) { | ||
232 | u16 flag = read_FLAG(drv_data); | ||
233 | |||
234 | flag &= ~(chip->flag >> 8); | ||
235 | |||
236 | write_FLAG(drv_data, flag); | ||
237 | } | ||
238 | } | ||
239 | |||
214 | /* stop controller and re-config current chip*/ | 240 | /* stop controller and re-config current chip*/ |
215 | static void bfin_spi_restore_state(struct driver_data *drv_data) | 241 | static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data) |
216 | { | 242 | { |
217 | struct chip_data *chip = drv_data->cur_chip; | 243 | struct bfin_spi_slave_data *chip = drv_data->cur_chip; |
218 | 244 | ||
219 | /* Clear status and disable clock */ | 245 | /* Clear status and disable clock */ |
220 | write_STAT(drv_data, BIT_STAT_CLR); | 246 | write_STAT(drv_data, BIT_STAT_CLR); |
221 | bfin_spi_disable(drv_data); | 247 | bfin_spi_disable(drv_data); |
222 | dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n"); | 248 | dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n"); |
223 | 249 | ||
250 | SSYNC(); | ||
251 | |||
224 | /* Load the registers */ | 252 | /* Load the registers */ |
225 | write_CTRL(drv_data, chip->ctl_reg); | 253 | write_CTRL(drv_data, chip->ctl_reg); |
226 | write_BAUD(drv_data, chip->baud); | 254 | write_BAUD(drv_data, chip->baud); |
@@ -230,49 +258,12 @@ static void bfin_spi_restore_state(struct driver_data *drv_data) | |||
230 | } | 258 | } |
231 | 259 | ||
232 | /* used to kick off transfer in rx mode and read unwanted RX data */ | 260 | /* used to kick off transfer in rx mode and read unwanted RX data */ |
233 | static inline void bfin_spi_dummy_read(struct driver_data *drv_data) | 261 | static inline void bfin_spi_dummy_read(struct bfin_spi_master_data *drv_data) |
234 | { | 262 | { |
235 | (void) read_RDBR(drv_data); | 263 | (void) read_RDBR(drv_data); |
236 | } | 264 | } |
237 | 265 | ||
238 | static void bfin_spi_null_writer(struct driver_data *drv_data) | 266 | static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data) |
239 | { | ||
240 | u8 n_bytes = drv_data->n_bytes; | ||
241 | u16 tx_val = drv_data->cur_chip->idle_tx_val; | ||
242 | |||
243 | /* clear RXS (we check for RXS inside the loop) */ | ||
244 | bfin_spi_dummy_read(drv_data); | ||
245 | |||
246 | while (drv_data->tx < drv_data->tx_end) { | ||
247 | write_TDBR(drv_data, tx_val); | ||
248 | drv_data->tx += n_bytes; | ||
249 | /* wait until transfer finished. | ||
250 | checking SPIF or TXS may not guarantee transfer completion */ | ||
251 | while (!(read_STAT(drv_data) & BIT_STAT_RXS)) | ||
252 | cpu_relax(); | ||
253 | /* discard RX data and clear RXS */ | ||
254 | bfin_spi_dummy_read(drv_data); | ||
255 | } | ||
256 | } | ||
257 | |||
258 | static void bfin_spi_null_reader(struct driver_data *drv_data) | ||
259 | { | ||
260 | u8 n_bytes = drv_data->n_bytes; | ||
261 | u16 tx_val = drv_data->cur_chip->idle_tx_val; | ||
262 | |||
263 | /* discard old RX data and clear RXS */ | ||
264 | bfin_spi_dummy_read(drv_data); | ||
265 | |||
266 | while (drv_data->rx < drv_data->rx_end) { | ||
267 | write_TDBR(drv_data, tx_val); | ||
268 | drv_data->rx += n_bytes; | ||
269 | while (!(read_STAT(drv_data) & BIT_STAT_RXS)) | ||
270 | cpu_relax(); | ||
271 | bfin_spi_dummy_read(drv_data); | ||
272 | } | ||
273 | } | ||
274 | |||
275 | static void bfin_spi_u8_writer(struct driver_data *drv_data) | ||
276 | { | 267 | { |
277 | /* clear RXS (we check for RXS inside the loop) */ | 268 | /* clear RXS (we check for RXS inside the loop) */ |
278 | bfin_spi_dummy_read(drv_data); | 269 | bfin_spi_dummy_read(drv_data); |
@@ -288,25 +279,7 @@ static void bfin_spi_u8_writer(struct driver_data *drv_data) | |||
288 | } | 279 | } |
289 | } | 280 | } |
290 | 281 | ||
291 | static void bfin_spi_u8_cs_chg_writer(struct driver_data *drv_data) | 282 | static void bfin_spi_u8_reader(struct bfin_spi_master_data *drv_data) |
292 | { | ||
293 | struct chip_data *chip = drv_data->cur_chip; | ||
294 | |||
295 | /* clear RXS (we check for RXS inside the loop) */ | ||
296 | bfin_spi_dummy_read(drv_data); | ||
297 | |||
298 | while (drv_data->tx < drv_data->tx_end) { | ||
299 | bfin_spi_cs_active(drv_data, chip); | ||
300 | write_TDBR(drv_data, (*(u8 *) (drv_data->tx++))); | ||
301 | /* make sure transfer finished before deactiving CS */ | ||
302 | while (!(read_STAT(drv_data) & BIT_STAT_RXS)) | ||
303 | cpu_relax(); | ||
304 | bfin_spi_dummy_read(drv_data); | ||
305 | bfin_spi_cs_deactive(drv_data, chip); | ||
306 | } | ||
307 | } | ||
308 | |||
309 | static void bfin_spi_u8_reader(struct driver_data *drv_data) | ||
310 | { | 283 | { |
311 | u16 tx_val = drv_data->cur_chip->idle_tx_val; | 284 | u16 tx_val = drv_data->cur_chip->idle_tx_val; |
312 | 285 | ||
@@ -321,25 +294,7 @@ static void bfin_spi_u8_reader(struct driver_data *drv_data) | |||
321 | } | 294 | } |
322 | } | 295 | } |
323 | 296 | ||
324 | static void bfin_spi_u8_cs_chg_reader(struct driver_data *drv_data) | 297 | static void bfin_spi_u8_duplex(struct bfin_spi_master_data *drv_data) |
325 | { | ||
326 | struct chip_data *chip = drv_data->cur_chip; | ||
327 | u16 tx_val = chip->idle_tx_val; | ||
328 | |||
329 | /* discard old RX data and clear RXS */ | ||
330 | bfin_spi_dummy_read(drv_data); | ||
331 | |||
332 | while (drv_data->rx < drv_data->rx_end) { | ||
333 | bfin_spi_cs_active(drv_data, chip); | ||
334 | write_TDBR(drv_data, tx_val); | ||
335 | while (!(read_STAT(drv_data) & BIT_STAT_RXS)) | ||
336 | cpu_relax(); | ||
337 | *(u8 *) (drv_data->rx++) = read_RDBR(drv_data); | ||
338 | bfin_spi_cs_deactive(drv_data, chip); | ||
339 | } | ||
340 | } | ||
341 | |||
342 | static void bfin_spi_u8_duplex(struct driver_data *drv_data) | ||
343 | { | 298 | { |
344 | /* discard old RX data and clear RXS */ | 299 | /* discard old RX data and clear RXS */ |
345 | bfin_spi_dummy_read(drv_data); | 300 | bfin_spi_dummy_read(drv_data); |
@@ -352,24 +307,13 @@ static void bfin_spi_u8_duplex(struct driver_data *drv_data) | |||
352 | } | 307 | } |
353 | } | 308 | } |
354 | 309 | ||
355 | static void bfin_spi_u8_cs_chg_duplex(struct driver_data *drv_data) | 310 | static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u8 = { |
356 | { | 311 | .write = bfin_spi_u8_writer, |
357 | struct chip_data *chip = drv_data->cur_chip; | 312 | .read = bfin_spi_u8_reader, |
358 | 313 | .duplex = bfin_spi_u8_duplex, | |
359 | /* discard old RX data and clear RXS */ | 314 | }; |
360 | bfin_spi_dummy_read(drv_data); | ||
361 | |||
362 | while (drv_data->rx < drv_data->rx_end) { | ||
363 | bfin_spi_cs_active(drv_data, chip); | ||
364 | write_TDBR(drv_data, (*(u8 *) (drv_data->tx++))); | ||
365 | while (!(read_STAT(drv_data) & BIT_STAT_RXS)) | ||
366 | cpu_relax(); | ||
367 | *(u8 *) (drv_data->rx++) = read_RDBR(drv_data); | ||
368 | bfin_spi_cs_deactive(drv_data, chip); | ||
369 | } | ||
370 | } | ||
371 | 315 | ||
372 | static void bfin_spi_u16_writer(struct driver_data *drv_data) | 316 | static void bfin_spi_u16_writer(struct bfin_spi_master_data *drv_data) |
373 | { | 317 | { |
374 | /* clear RXS (we check for RXS inside the loop) */ | 318 | /* clear RXS (we check for RXS inside the loop) */ |
375 | bfin_spi_dummy_read(drv_data); | 319 | bfin_spi_dummy_read(drv_data); |
@@ -386,26 +330,7 @@ static void bfin_spi_u16_writer(struct driver_data *drv_data) | |||
386 | } | 330 | } |
387 | } | 331 | } |
388 | 332 | ||
389 | static void bfin_spi_u16_cs_chg_writer(struct driver_data *drv_data) | 333 | static void bfin_spi_u16_reader(struct bfin_spi_master_data *drv_data) |
390 | { | ||
391 | struct chip_data *chip = drv_data->cur_chip; | ||
392 | |||
393 | /* clear RXS (we check for RXS inside the loop) */ | ||
394 | bfin_spi_dummy_read(drv_data); | ||
395 | |||
396 | while (drv_data->tx < drv_data->tx_end) { | ||
397 | bfin_spi_cs_active(drv_data, chip); | ||
398 | write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); | ||
399 | drv_data->tx += 2; | ||
400 | /* make sure transfer finished before deactiving CS */ | ||
401 | while (!(read_STAT(drv_data) & BIT_STAT_RXS)) | ||
402 | cpu_relax(); | ||
403 | bfin_spi_dummy_read(drv_data); | ||
404 | bfin_spi_cs_deactive(drv_data, chip); | ||
405 | } | ||
406 | } | ||
407 | |||
408 | static void bfin_spi_u16_reader(struct driver_data *drv_data) | ||
409 | { | 334 | { |
410 | u16 tx_val = drv_data->cur_chip->idle_tx_val; | 335 | u16 tx_val = drv_data->cur_chip->idle_tx_val; |
411 | 336 | ||
@@ -421,26 +346,7 @@ static void bfin_spi_u16_reader(struct driver_data *drv_data) | |||
421 | } | 346 | } |
422 | } | 347 | } |
423 | 348 | ||
424 | static void bfin_spi_u16_cs_chg_reader(struct driver_data *drv_data) | 349 | static void bfin_spi_u16_duplex(struct bfin_spi_master_data *drv_data) |
425 | { | ||
426 | struct chip_data *chip = drv_data->cur_chip; | ||
427 | u16 tx_val = chip->idle_tx_val; | ||
428 | |||
429 | /* discard old RX data and clear RXS */ | ||
430 | bfin_spi_dummy_read(drv_data); | ||
431 | |||
432 | while (drv_data->rx < drv_data->rx_end) { | ||
433 | bfin_spi_cs_active(drv_data, chip); | ||
434 | write_TDBR(drv_data, tx_val); | ||
435 | while (!(read_STAT(drv_data) & BIT_STAT_RXS)) | ||
436 | cpu_relax(); | ||
437 | *(u16 *) (drv_data->rx) = read_RDBR(drv_data); | ||
438 | drv_data->rx += 2; | ||
439 | bfin_spi_cs_deactive(drv_data, chip); | ||
440 | } | ||
441 | } | ||
442 | |||
443 | static void bfin_spi_u16_duplex(struct driver_data *drv_data) | ||
444 | { | 350 | { |
445 | /* discard old RX data and clear RXS */ | 351 | /* discard old RX data and clear RXS */ |
446 | bfin_spi_dummy_read(drv_data); | 352 | bfin_spi_dummy_read(drv_data); |
@@ -455,27 +361,14 @@ static void bfin_spi_u16_duplex(struct driver_data *drv_data) | |||
455 | } | 361 | } |
456 | } | 362 | } |
457 | 363 | ||
458 | static void bfin_spi_u16_cs_chg_duplex(struct driver_data *drv_data) | 364 | static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u16 = { |
459 | { | 365 | .write = bfin_spi_u16_writer, |
460 | struct chip_data *chip = drv_data->cur_chip; | 366 | .read = bfin_spi_u16_reader, |
461 | 367 | .duplex = bfin_spi_u16_duplex, | |
462 | /* discard old RX data and clear RXS */ | 368 | }; |
463 | bfin_spi_dummy_read(drv_data); | ||
464 | |||
465 | while (drv_data->rx < drv_data->rx_end) { | ||
466 | bfin_spi_cs_active(drv_data, chip); | ||
467 | write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); | ||
468 | drv_data->tx += 2; | ||
469 | while (!(read_STAT(drv_data) & BIT_STAT_RXS)) | ||
470 | cpu_relax(); | ||
471 | *(u16 *) (drv_data->rx) = read_RDBR(drv_data); | ||
472 | drv_data->rx += 2; | ||
473 | bfin_spi_cs_deactive(drv_data, chip); | ||
474 | } | ||
475 | } | ||
476 | 369 | ||
477 | /* test if ther is more transfer to be done */ | 370 | /* test if there is more transfer to be done */ |
478 | static void *bfin_spi_next_transfer(struct driver_data *drv_data) | 371 | static void *bfin_spi_next_transfer(struct bfin_spi_master_data *drv_data) |
479 | { | 372 | { |
480 | struct spi_message *msg = drv_data->cur_msg; | 373 | struct spi_message *msg = drv_data->cur_msg; |
481 | struct spi_transfer *trans = drv_data->cur_transfer; | 374 | struct spi_transfer *trans = drv_data->cur_transfer; |
@@ -494,9 +387,9 @@ static void *bfin_spi_next_transfer(struct driver_data *drv_data) | |||
494 | * caller already set message->status; | 387 | * caller already set message->status; |
495 | * dma and pio irqs are blocked give finished message back | 388 | * dma and pio irqs are blocked give finished message back |
496 | */ | 389 | */ |
497 | static void bfin_spi_giveback(struct driver_data *drv_data) | 390 | static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data) |
498 | { | 391 | { |
499 | struct chip_data *chip = drv_data->cur_chip; | 392 | struct bfin_spi_slave_data *chip = drv_data->cur_chip; |
500 | struct spi_transfer *last_transfer; | 393 | struct spi_transfer *last_transfer; |
501 | unsigned long flags; | 394 | unsigned long flags; |
502 | struct spi_message *msg; | 395 | struct spi_message *msg; |
@@ -525,10 +418,113 @@ static void bfin_spi_giveback(struct driver_data *drv_data) | |||
525 | msg->complete(msg->context); | 418 | msg->complete(msg->context); |
526 | } | 419 | } |
527 | 420 | ||
421 | /* spi data irq handler */ | ||
422 | static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) | ||
423 | { | ||
424 | struct bfin_spi_master_data *drv_data = dev_id; | ||
425 | struct bfin_spi_slave_data *chip = drv_data->cur_chip; | ||
426 | struct spi_message *msg = drv_data->cur_msg; | ||
427 | int n_bytes = drv_data->n_bytes; | ||
428 | int loop = 0; | ||
429 | |||
430 | /* wait until transfer finished. */ | ||
431 | while (!(read_STAT(drv_data) & BIT_STAT_RXS)) | ||
432 | cpu_relax(); | ||
433 | |||
434 | if ((drv_data->tx && drv_data->tx >= drv_data->tx_end) || | ||
435 | (drv_data->rx && drv_data->rx >= (drv_data->rx_end - n_bytes))) { | ||
436 | /* last read */ | ||
437 | if (drv_data->rx) { | ||
438 | dev_dbg(&drv_data->pdev->dev, "last read\n"); | ||
439 | if (n_bytes % 2) { | ||
440 | u16 *buf = (u16 *)drv_data->rx; | ||
441 | for (loop = 0; loop < n_bytes / 2; loop++) | ||
442 | *buf++ = read_RDBR(drv_data); | ||
443 | } else { | ||
444 | u8 *buf = (u8 *)drv_data->rx; | ||
445 | for (loop = 0; loop < n_bytes; loop++) | ||
446 | *buf++ = read_RDBR(drv_data); | ||
447 | } | ||
448 | drv_data->rx += n_bytes; | ||
449 | } | ||
450 | |||
451 | msg->actual_length += drv_data->len_in_bytes; | ||
452 | if (drv_data->cs_change) | ||
453 | bfin_spi_cs_deactive(drv_data, chip); | ||
454 | /* Move to next transfer */ | ||
455 | msg->state = bfin_spi_next_transfer(drv_data); | ||
456 | |||
457 | disable_irq_nosync(drv_data->spi_irq); | ||
458 | |||
459 | /* Schedule transfer tasklet */ | ||
460 | tasklet_schedule(&drv_data->pump_transfers); | ||
461 | return IRQ_HANDLED; | ||
462 | } | ||
463 | |||
464 | if (drv_data->rx && drv_data->tx) { | ||
465 | /* duplex */ | ||
466 | dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n"); | ||
467 | if (n_bytes % 2) { | ||
468 | u16 *buf = (u16 *)drv_data->rx; | ||
469 | u16 *buf2 = (u16 *)drv_data->tx; | ||
470 | for (loop = 0; loop < n_bytes / 2; loop++) { | ||
471 | *buf++ = read_RDBR(drv_data); | ||
472 | write_TDBR(drv_data, *buf2++); | ||
473 | } | ||
474 | } else { | ||
475 | u8 *buf = (u8 *)drv_data->rx; | ||
476 | u8 *buf2 = (u8 *)drv_data->tx; | ||
477 | for (loop = 0; loop < n_bytes; loop++) { | ||
478 | *buf++ = read_RDBR(drv_data); | ||
479 | write_TDBR(drv_data, *buf2++); | ||
480 | } | ||
481 | } | ||
482 | } else if (drv_data->rx) { | ||
483 | /* read */ | ||
484 | dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n"); | ||
485 | if (n_bytes % 2) { | ||
486 | u16 *buf = (u16 *)drv_data->rx; | ||
487 | for (loop = 0; loop < n_bytes / 2; loop++) { | ||
488 | *buf++ = read_RDBR(drv_data); | ||
489 | write_TDBR(drv_data, chip->idle_tx_val); | ||
490 | } | ||
491 | } else { | ||
492 | u8 *buf = (u8 *)drv_data->rx; | ||
493 | for (loop = 0; loop < n_bytes; loop++) { | ||
494 | *buf++ = read_RDBR(drv_data); | ||
495 | write_TDBR(drv_data, chip->idle_tx_val); | ||
496 | } | ||
497 | } | ||
498 | } else if (drv_data->tx) { | ||
499 | /* write */ | ||
500 | dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n"); | ||
501 | if (n_bytes % 2) { | ||
502 | u16 *buf = (u16 *)drv_data->tx; | ||
503 | for (loop = 0; loop < n_bytes / 2; loop++) { | ||
504 | read_RDBR(drv_data); | ||
505 | write_TDBR(drv_data, *buf++); | ||
506 | } | ||
507 | } else { | ||
508 | u8 *buf = (u8 *)drv_data->tx; | ||
509 | for (loop = 0; loop < n_bytes; loop++) { | ||
510 | read_RDBR(drv_data); | ||
511 | write_TDBR(drv_data, *buf++); | ||
512 | } | ||
513 | } | ||
514 | } | ||
515 | |||
516 | if (drv_data->tx) | ||
517 | drv_data->tx += n_bytes; | ||
518 | if (drv_data->rx) | ||
519 | drv_data->rx += n_bytes; | ||
520 | |||
521 | return IRQ_HANDLED; | ||
522 | } | ||
523 | |||
528 | static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) | 524 | static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) |
529 | { | 525 | { |
530 | struct driver_data *drv_data = dev_id; | 526 | struct bfin_spi_master_data *drv_data = dev_id; |
531 | struct chip_data *chip = drv_data->cur_chip; | 527 | struct bfin_spi_slave_data *chip = drv_data->cur_chip; |
532 | struct spi_message *msg = drv_data->cur_msg; | 528 | struct spi_message *msg = drv_data->cur_msg; |
533 | unsigned long timeout; | 529 | unsigned long timeout; |
534 | unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel); | 530 | unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel); |
@@ -538,11 +534,16 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) | |||
538 | "in dma_irq_handler dmastat:0x%x spistat:0x%x\n", | 534 | "in dma_irq_handler dmastat:0x%x spistat:0x%x\n", |
539 | dmastat, spistat); | 535 | dmastat, spistat); |
540 | 536 | ||
541 | clear_dma_irqstat(drv_data->dma_channel); | 537 | if (drv_data->rx != NULL) { |
538 | u16 cr = read_CTRL(drv_data); | ||
539 | /* discard old RX data and clear RXS */ | ||
540 | bfin_spi_dummy_read(drv_data); | ||
541 | write_CTRL(drv_data, cr & ~BIT_CTL_ENABLE); /* Disable SPI */ | ||
542 | write_CTRL(drv_data, cr & ~BIT_CTL_TIMOD); /* Restore State */ | ||
543 | write_STAT(drv_data, BIT_STAT_CLR); /* Clear Status */ | ||
544 | } | ||
542 | 545 | ||
543 | /* Wait for DMA to complete */ | 546 | clear_dma_irqstat(drv_data->dma_channel); |
544 | while (get_dma_curr_irqstat(drv_data->dma_channel) & DMA_RUN) | ||
545 | cpu_relax(); | ||
546 | 547 | ||
547 | /* | 548 | /* |
548 | * wait for the last transaction shifted out. HRM states: | 549 | * wait for the last transaction shifted out. HRM states: |
@@ -551,8 +552,8 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) | |||
551 | * register until it goes low for 2 successive reads | 552 | * register until it goes low for 2 successive reads |
552 | */ | 553 | */ |
553 | if (drv_data->tx != NULL) { | 554 | if (drv_data->tx != NULL) { |
554 | while ((read_STAT(drv_data) & TXS) || | 555 | while ((read_STAT(drv_data) & BIT_STAT_TXS) || |
555 | (read_STAT(drv_data) & TXS)) | 556 | (read_STAT(drv_data) & BIT_STAT_TXS)) |
556 | cpu_relax(); | 557 | cpu_relax(); |
557 | } | 558 | } |
558 | 559 | ||
@@ -561,14 +562,14 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) | |||
561 | dmastat, read_STAT(drv_data)); | 562 | dmastat, read_STAT(drv_data)); |
562 | 563 | ||
563 | timeout = jiffies + HZ; | 564 | timeout = jiffies + HZ; |
564 | while (!(read_STAT(drv_data) & SPIF)) | 565 | while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) |
565 | if (!time_before(jiffies, timeout)) { | 566 | if (!time_before(jiffies, timeout)) { |
566 | dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF"); | 567 | dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF"); |
567 | break; | 568 | break; |
568 | } else | 569 | } else |
569 | cpu_relax(); | 570 | cpu_relax(); |
570 | 571 | ||
571 | if ((dmastat & DMA_ERR) && (spistat & RBSY)) { | 572 | if ((dmastat & DMA_ERR) && (spistat & BIT_STAT_RBSY)) { |
572 | msg->state = ERROR_STATE; | 573 | msg->state = ERROR_STATE; |
573 | dev_err(&drv_data->pdev->dev, "dma receive: fifo/buffer overflow\n"); | 574 | dev_err(&drv_data->pdev->dev, "dma receive: fifo/buffer overflow\n"); |
574 | } else { | 575 | } else { |
@@ -588,20 +589,20 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) | |||
588 | dev_dbg(&drv_data->pdev->dev, | 589 | dev_dbg(&drv_data->pdev->dev, |
589 | "disable dma channel irq%d\n", | 590 | "disable dma channel irq%d\n", |
590 | drv_data->dma_channel); | 591 | drv_data->dma_channel); |
591 | dma_disable_irq(drv_data->dma_channel); | 592 | dma_disable_irq_nosync(drv_data->dma_channel); |
592 | 593 | ||
593 | return IRQ_HANDLED; | 594 | return IRQ_HANDLED; |
594 | } | 595 | } |
595 | 596 | ||
596 | static void bfin_spi_pump_transfers(unsigned long data) | 597 | static void bfin_spi_pump_transfers(unsigned long data) |
597 | { | 598 | { |
598 | struct driver_data *drv_data = (struct driver_data *)data; | 599 | struct bfin_spi_master_data *drv_data = (struct bfin_spi_master_data *)data; |
599 | struct spi_message *message = NULL; | 600 | struct spi_message *message = NULL; |
600 | struct spi_transfer *transfer = NULL; | 601 | struct spi_transfer *transfer = NULL; |
601 | struct spi_transfer *previous = NULL; | 602 | struct spi_transfer *previous = NULL; |
602 | struct chip_data *chip = NULL; | 603 | struct bfin_spi_slave_data *chip = NULL; |
603 | u8 width; | 604 | unsigned int bits_per_word; |
604 | u16 cr, dma_width, dma_config; | 605 | u16 cr, cr_width, dma_width, dma_config; |
605 | u32 tranf_success = 1; | 606 | u32 tranf_success = 1; |
606 | u8 full_duplex = 0; | 607 | u8 full_duplex = 0; |
607 | 608 | ||
@@ -639,7 +640,7 @@ static void bfin_spi_pump_transfers(unsigned long data) | |||
639 | udelay(previous->delay_usecs); | 640 | udelay(previous->delay_usecs); |
640 | } | 641 | } |
641 | 642 | ||
642 | /* Setup the transfer state based on the type of transfer */ | 643 | /* Flush any existing transfers that may be sitting in the hardware */ |
643 | if (bfin_spi_flush(drv_data) == 0) { | 644 | if (bfin_spi_flush(drv_data) == 0) { |
644 | dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); | 645 | dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); |
645 | message->status = -EIO; | 646 | message->status = -EIO; |
@@ -652,6 +653,7 @@ static void bfin_spi_pump_transfers(unsigned long data) | |||
652 | message->state = bfin_spi_next_transfer(drv_data); | 653 | message->state = bfin_spi_next_transfer(drv_data); |
653 | /* Schedule next transfer tasklet */ | 654 | /* Schedule next transfer tasklet */ |
654 | tasklet_schedule(&drv_data->pump_transfers); | 655 | tasklet_schedule(&drv_data->pump_transfers); |
656 | return; | ||
655 | } | 657 | } |
656 | 658 | ||
657 | if (transfer->tx_buf != NULL) { | 659 | if (transfer->tx_buf != NULL) { |
@@ -679,52 +681,32 @@ static void bfin_spi_pump_transfers(unsigned long data) | |||
679 | drv_data->cs_change = transfer->cs_change; | 681 | drv_data->cs_change = transfer->cs_change; |
680 | 682 | ||
681 | /* Bits per word setup */ | 683 | /* Bits per word setup */ |
682 | switch (transfer->bits_per_word) { | 684 | bits_per_word = transfer->bits_per_word ? : |
683 | case 8: | 685 | message->spi->bits_per_word ? : 8; |
684 | drv_data->n_bytes = 1; | 686 | if (bits_per_word % 16 == 0) { |
685 | width = CFG_SPI_WORDSIZE8; | 687 | drv_data->n_bytes = bits_per_word/8; |
686 | drv_data->read = chip->cs_change_per_word ? | ||
687 | bfin_spi_u8_cs_chg_reader : bfin_spi_u8_reader; | ||
688 | drv_data->write = chip->cs_change_per_word ? | ||
689 | bfin_spi_u8_cs_chg_writer : bfin_spi_u8_writer; | ||
690 | drv_data->duplex = chip->cs_change_per_word ? | ||
691 | bfin_spi_u8_cs_chg_duplex : bfin_spi_u8_duplex; | ||
692 | break; | ||
693 | |||
694 | case 16: | ||
695 | drv_data->n_bytes = 2; | ||
696 | width = CFG_SPI_WORDSIZE16; | ||
697 | drv_data->read = chip->cs_change_per_word ? | ||
698 | bfin_spi_u16_cs_chg_reader : bfin_spi_u16_reader; | ||
699 | drv_data->write = chip->cs_change_per_word ? | ||
700 | bfin_spi_u16_cs_chg_writer : bfin_spi_u16_writer; | ||
701 | drv_data->duplex = chip->cs_change_per_word ? | ||
702 | bfin_spi_u16_cs_chg_duplex : bfin_spi_u16_duplex; | ||
703 | break; | ||
704 | |||
705 | default: | ||
706 | /* No change, the same as default setting */ | ||
707 | drv_data->n_bytes = chip->n_bytes; | ||
708 | width = chip->width; | ||
709 | drv_data->write = drv_data->tx ? chip->write : bfin_spi_null_writer; | ||
710 | drv_data->read = drv_data->rx ? chip->read : bfin_spi_null_reader; | ||
711 | drv_data->duplex = chip->duplex ? chip->duplex : bfin_spi_null_writer; | ||
712 | break; | ||
713 | } | ||
714 | cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD)); | ||
715 | cr |= (width << 8); | ||
716 | write_CTRL(drv_data, cr); | ||
717 | |||
718 | if (width == CFG_SPI_WORDSIZE16) { | ||
719 | drv_data->len = (transfer->len) >> 1; | 688 | drv_data->len = (transfer->len) >> 1; |
720 | } else { | 689 | cr_width = BIT_CTL_WORDSIZE; |
690 | drv_data->ops = &bfin_bfin_spi_transfer_ops_u16; | ||
691 | } else if (bits_per_word % 8 == 0) { | ||
692 | drv_data->n_bytes = bits_per_word/8; | ||
721 | drv_data->len = transfer->len; | 693 | drv_data->len = transfer->len; |
694 | cr_width = 0; | ||
695 | drv_data->ops = &bfin_bfin_spi_transfer_ops_u8; | ||
696 | } else { | ||
697 | dev_err(&drv_data->pdev->dev, "transfer: unsupported bits_per_word\n"); | ||
698 | message->status = -EINVAL; | ||
699 | bfin_spi_giveback(drv_data); | ||
700 | return; | ||
722 | } | 701 | } |
702 | cr = read_CTRL(drv_data) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE); | ||
703 | cr |= cr_width; | ||
704 | write_CTRL(drv_data, cr); | ||
705 | |||
723 | dev_dbg(&drv_data->pdev->dev, | 706 | dev_dbg(&drv_data->pdev->dev, |
724 | "transfer: drv_data->write is %p, chip->write is %p, null_wr is %p\n", | 707 | "transfer: drv_data->ops is %p, chip->ops is %p, u8_ops is %p\n", |
725 | drv_data->write, chip->write, bfin_spi_null_writer); | 708 | drv_data->ops, chip->ops, &bfin_bfin_spi_transfer_ops_u8); |
726 | 709 | ||
727 | /* speed and width has been set on per message */ | ||
728 | message->state = RUNNING_STATE; | 710 | message->state = RUNNING_STATE; |
729 | dma_config = 0; | 711 | dma_config = 0; |
730 | 712 | ||
@@ -735,13 +717,11 @@ static void bfin_spi_pump_transfers(unsigned long data) | |||
735 | write_BAUD(drv_data, chip->baud); | 717 | write_BAUD(drv_data, chip->baud); |
736 | 718 | ||
737 | write_STAT(drv_data, BIT_STAT_CLR); | 719 | write_STAT(drv_data, BIT_STAT_CLR); |
738 | cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD)); | 720 | bfin_spi_cs_active(drv_data, chip); |
739 | if (drv_data->cs_change) | ||
740 | bfin_spi_cs_active(drv_data, chip); | ||
741 | 721 | ||
742 | dev_dbg(&drv_data->pdev->dev, | 722 | dev_dbg(&drv_data->pdev->dev, |
743 | "now pumping a transfer: width is %d, len is %d\n", | 723 | "now pumping a transfer: width is %d, len is %d\n", |
744 | width, transfer->len); | 724 | cr_width, transfer->len); |
745 | 725 | ||
746 | /* | 726 | /* |
747 | * Try to map dma buffer and do a dma transfer. If successful use, | 727 | * Try to map dma buffer and do a dma transfer. If successful use, |
@@ -760,7 +740,7 @@ static void bfin_spi_pump_transfers(unsigned long data) | |||
760 | /* config dma channel */ | 740 | /* config dma channel */ |
761 | dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n"); | 741 | dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n"); |
762 | set_dma_x_count(drv_data->dma_channel, drv_data->len); | 742 | set_dma_x_count(drv_data->dma_channel, drv_data->len); |
763 | if (width == CFG_SPI_WORDSIZE16) { | 743 | if (cr_width == BIT_CTL_WORDSIZE) { |
764 | set_dma_x_modify(drv_data->dma_channel, 2); | 744 | set_dma_x_modify(drv_data->dma_channel, 2); |
765 | dma_width = WDSIZE_16; | 745 | dma_width = WDSIZE_16; |
766 | } else { | 746 | } else { |
@@ -846,73 +826,109 @@ static void bfin_spi_pump_transfers(unsigned long data) | |||
846 | dma_enable_irq(drv_data->dma_channel); | 826 | dma_enable_irq(drv_data->dma_channel); |
847 | local_irq_restore(flags); | 827 | local_irq_restore(flags); |
848 | 828 | ||
849 | } else { | 829 | return; |
850 | /* IO mode write then read */ | 830 | } |
851 | dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n"); | ||
852 | |||
853 | /* we always use SPI_WRITE mode. SPI_READ mode | ||
854 | seems to have problems with setting up the | ||
855 | output value in TDBR prior to the transfer. */ | ||
856 | write_CTRL(drv_data, (cr | CFG_SPI_WRITE)); | ||
857 | |||
858 | if (full_duplex) { | ||
859 | /* full duplex mode */ | ||
860 | BUG_ON((drv_data->tx_end - drv_data->tx) != | ||
861 | (drv_data->rx_end - drv_data->rx)); | ||
862 | dev_dbg(&drv_data->pdev->dev, | ||
863 | "IO duplex: cr is 0x%x\n", cr); | ||
864 | |||
865 | drv_data->duplex(drv_data); | ||
866 | 831 | ||
867 | if (drv_data->tx != drv_data->tx_end) | 832 | /* |
868 | tranf_success = 0; | 833 | * We always use SPI_WRITE mode (transfer starts with TDBR write). |
869 | } else if (drv_data->tx != NULL) { | 834 | * SPI_READ mode (transfer starts with RDBR read) seems to have |
870 | /* write only half duplex */ | 835 | * problems with setting up the output value in TDBR prior to the |
871 | dev_dbg(&drv_data->pdev->dev, | 836 | * start of the transfer. |
872 | "IO write: cr is 0x%x\n", cr); | 837 | */ |
838 | write_CTRL(drv_data, cr | BIT_CTL_TXMOD); | ||
873 | 839 | ||
874 | drv_data->write(drv_data); | 840 | if (chip->pio_interrupt) { |
841 | /* SPI irq should have been disabled by now */ | ||
875 | 842 | ||
876 | if (drv_data->tx != drv_data->tx_end) | 843 | /* discard old RX data and clear RXS */ |
877 | tranf_success = 0; | 844 | bfin_spi_dummy_read(drv_data); |
878 | } else if (drv_data->rx != NULL) { | ||
879 | /* read only half duplex */ | ||
880 | dev_dbg(&drv_data->pdev->dev, | ||
881 | "IO read: cr is 0x%x\n", cr); | ||
882 | 845 | ||
883 | drv_data->read(drv_data); | 846 | /* start transfer */ |
884 | if (drv_data->rx != drv_data->rx_end) | 847 | if (drv_data->tx == NULL) |
885 | tranf_success = 0; | 848 | write_TDBR(drv_data, chip->idle_tx_val); |
849 | else { | ||
850 | int loop; | ||
851 | if (bits_per_word % 16 == 0) { | ||
852 | u16 *buf = (u16 *)drv_data->tx; | ||
853 | for (loop = 0; loop < bits_per_word / 16; | ||
854 | loop++) { | ||
855 | write_TDBR(drv_data, *buf++); | ||
856 | } | ||
857 | } else if (bits_per_word % 8 == 0) { | ||
858 | u8 *buf = (u8 *)drv_data->tx; | ||
859 | for (loop = 0; loop < bits_per_word / 8; loop++) | ||
860 | write_TDBR(drv_data, *buf++); | ||
861 | } | ||
862 | |||
863 | drv_data->tx += drv_data->n_bytes; | ||
886 | } | 864 | } |
887 | 865 | ||
888 | if (!tranf_success) { | 866 | /* once TDBR is empty, interrupt is triggered */ |
889 | dev_dbg(&drv_data->pdev->dev, | 867 | enable_irq(drv_data->spi_irq); |
890 | "IO write error!\n"); | 868 | return; |
891 | message->state = ERROR_STATE; | ||
892 | } else { | ||
893 | /* Update total byte transfered */ | ||
894 | message->actual_length += drv_data->len_in_bytes; | ||
895 | /* Move to next transfer of this msg */ | ||
896 | message->state = bfin_spi_next_transfer(drv_data); | ||
897 | if (drv_data->cs_change) | ||
898 | bfin_spi_cs_deactive(drv_data, chip); | ||
899 | } | ||
900 | /* Schedule next transfer tasklet */ | ||
901 | tasklet_schedule(&drv_data->pump_transfers); | ||
902 | } | 869 | } |
870 | |||
871 | /* IO mode */ | ||
872 | dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n"); | ||
873 | |||
874 | if (full_duplex) { | ||
875 | /* full duplex mode */ | ||
876 | BUG_ON((drv_data->tx_end - drv_data->tx) != | ||
877 | (drv_data->rx_end - drv_data->rx)); | ||
878 | dev_dbg(&drv_data->pdev->dev, | ||
879 | "IO duplex: cr is 0x%x\n", cr); | ||
880 | |||
881 | drv_data->ops->duplex(drv_data); | ||
882 | |||
883 | if (drv_data->tx != drv_data->tx_end) | ||
884 | tranf_success = 0; | ||
885 | } else if (drv_data->tx != NULL) { | ||
886 | /* write only half duplex */ | ||
887 | dev_dbg(&drv_data->pdev->dev, | ||
888 | "IO write: cr is 0x%x\n", cr); | ||
889 | |||
890 | drv_data->ops->write(drv_data); | ||
891 | |||
892 | if (drv_data->tx != drv_data->tx_end) | ||
893 | tranf_success = 0; | ||
894 | } else if (drv_data->rx != NULL) { | ||
895 | /* read only half duplex */ | ||
896 | dev_dbg(&drv_data->pdev->dev, | ||
897 | "IO read: cr is 0x%x\n", cr); | ||
898 | |||
899 | drv_data->ops->read(drv_data); | ||
900 | if (drv_data->rx != drv_data->rx_end) | ||
901 | tranf_success = 0; | ||
902 | } | ||
903 | |||
904 | if (!tranf_success) { | ||
905 | dev_dbg(&drv_data->pdev->dev, | ||
906 | "IO write error!\n"); | ||
907 | message->state = ERROR_STATE; | ||
908 | } else { | ||
909 | /* Update total byte transferred */ | ||
910 | message->actual_length += drv_data->len_in_bytes; | ||
911 | /* Move to next transfer of this msg */ | ||
912 | message->state = bfin_spi_next_transfer(drv_data); | ||
913 | if (drv_data->cs_change) | ||
914 | bfin_spi_cs_deactive(drv_data, chip); | ||
915 | } | ||
916 | |||
917 | /* Schedule next transfer tasklet */ | ||
918 | tasklet_schedule(&drv_data->pump_transfers); | ||
903 | } | 919 | } |
904 | 920 | ||
905 | /* pop a msg from queue and kick off real transfer */ | 921 | /* pop a msg from queue and kick off real transfer */ |
906 | static void bfin_spi_pump_messages(struct work_struct *work) | 922 | static void bfin_spi_pump_messages(struct work_struct *work) |
907 | { | 923 | { |
908 | struct driver_data *drv_data; | 924 | struct bfin_spi_master_data *drv_data; |
909 | unsigned long flags; | 925 | unsigned long flags; |
910 | 926 | ||
911 | drv_data = container_of(work, struct driver_data, pump_messages); | 927 | drv_data = container_of(work, struct bfin_spi_master_data, pump_messages); |
912 | 928 | ||
913 | /* Lock queue and check for queue work */ | 929 | /* Lock queue and check for queue work */ |
914 | spin_lock_irqsave(&drv_data->lock, flags); | 930 | spin_lock_irqsave(&drv_data->lock, flags); |
915 | if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) { | 931 | if (list_empty(&drv_data->queue) || !drv_data->running) { |
916 | /* pumper kicked off but no work to do */ | 932 | /* pumper kicked off but no work to do */ |
917 | drv_data->busy = 0; | 933 | drv_data->busy = 0; |
918 | spin_unlock_irqrestore(&drv_data->lock, flags); | 934 | spin_unlock_irqrestore(&drv_data->lock, flags); |
@@ -962,12 +978,12 @@ static void bfin_spi_pump_messages(struct work_struct *work) | |||
962 | */ | 978 | */ |
963 | static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg) | 979 | static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg) |
964 | { | 980 | { |
965 | struct driver_data *drv_data = spi_master_get_devdata(spi->master); | 981 | struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); |
966 | unsigned long flags; | 982 | unsigned long flags; |
967 | 983 | ||
968 | spin_lock_irqsave(&drv_data->lock, flags); | 984 | spin_lock_irqsave(&drv_data->lock, flags); |
969 | 985 | ||
970 | if (drv_data->run == QUEUE_STOPPED) { | 986 | if (!drv_data->running) { |
971 | spin_unlock_irqrestore(&drv_data->lock, flags); | 987 | spin_unlock_irqrestore(&drv_data->lock, flags); |
972 | return -ESHUTDOWN; | 988 | return -ESHUTDOWN; |
973 | } | 989 | } |
@@ -979,7 +995,7 @@ static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg) | |||
979 | dev_dbg(&spi->dev, "adding an msg in transfer() \n"); | 995 | dev_dbg(&spi->dev, "adding an msg in transfer() \n"); |
980 | list_add_tail(&msg->queue, &drv_data->queue); | 996 | list_add_tail(&msg->queue, &drv_data->queue); |
981 | 997 | ||
982 | if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) | 998 | if (drv_data->running && !drv_data->busy) |
983 | queue_work(drv_data->workqueue, &drv_data->pump_messages); | 999 | queue_work(drv_data->workqueue, &drv_data->pump_messages); |
984 | 1000 | ||
985 | spin_unlock_irqrestore(&drv_data->lock, flags); | 1001 | spin_unlock_irqrestore(&drv_data->lock, flags); |
@@ -1003,147 +1019,187 @@ static u16 ssel[][MAX_SPI_SSEL] = { | |||
1003 | P_SPI2_SSEL6, P_SPI2_SSEL7}, | 1019 | P_SPI2_SSEL6, P_SPI2_SSEL7}, |
1004 | }; | 1020 | }; |
1005 | 1021 | ||
1006 | /* first setup for new devices */ | 1022 | /* setup for devices (may be called multiple times -- not just first setup) */ |
1007 | static int bfin_spi_setup(struct spi_device *spi) | 1023 | static int bfin_spi_setup(struct spi_device *spi) |
1008 | { | 1024 | { |
1009 | struct bfin5xx_spi_chip *chip_info = NULL; | 1025 | struct bfin5xx_spi_chip *chip_info; |
1010 | struct chip_data *chip; | 1026 | struct bfin_spi_slave_data *chip = NULL; |
1011 | struct driver_data *drv_data = spi_master_get_devdata(spi->master); | 1027 | struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); |
1012 | int ret; | 1028 | u16 bfin_ctl_reg; |
1013 | 1029 | int ret = -EINVAL; | |
1014 | if (spi->bits_per_word != 8 && spi->bits_per_word != 16) | ||
1015 | return -EINVAL; | ||
1016 | 1030 | ||
1017 | /* Only alloc (or use chip_info) on first setup */ | 1031 | /* Only alloc (or use chip_info) on first setup */ |
1032 | chip_info = NULL; | ||
1018 | chip = spi_get_ctldata(spi); | 1033 | chip = spi_get_ctldata(spi); |
1019 | if (chip == NULL) { | 1034 | if (chip == NULL) { |
1020 | chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); | 1035 | chip = kzalloc(sizeof(*chip), GFP_KERNEL); |
1021 | if (!chip) | 1036 | if (!chip) { |
1022 | return -ENOMEM; | 1037 | dev_err(&spi->dev, "cannot allocate chip data\n"); |
1038 | ret = -ENOMEM; | ||
1039 | goto error; | ||
1040 | } | ||
1023 | 1041 | ||
1024 | chip->enable_dma = 0; | 1042 | chip->enable_dma = 0; |
1025 | chip_info = spi->controller_data; | 1043 | chip_info = spi->controller_data; |
1026 | } | 1044 | } |
1027 | 1045 | ||
1046 | /* Let people set non-standard bits directly */ | ||
1047 | bfin_ctl_reg = BIT_CTL_OPENDRAIN | BIT_CTL_EMISO | | ||
1048 | BIT_CTL_PSSE | BIT_CTL_GM | BIT_CTL_SZ; | ||
1049 | |||
1028 | /* chip_info isn't always needed */ | 1050 | /* chip_info isn't always needed */ |
1029 | if (chip_info) { | 1051 | if (chip_info) { |
1030 | /* Make sure people stop trying to set fields via ctl_reg | 1052 | /* Make sure people stop trying to set fields via ctl_reg |
1031 | * when they should actually be using common SPI framework. | 1053 | * when they should actually be using common SPI framework. |
1032 | * Currently we let through: WOM EMISO PSSE GM SZ TIMOD. | 1054 | * Currently we let through: WOM EMISO PSSE GM SZ. |
1033 | * Not sure if a user actually needs/uses any of these, | 1055 | * Not sure if a user actually needs/uses any of these, |
1034 | * but let's assume (for now) they do. | 1056 | * but let's assume (for now) they do. |
1035 | */ | 1057 | */ |
1036 | if (chip_info->ctl_reg & (SPE|MSTR|CPOL|CPHA|LSBF|SIZE)) { | 1058 | if (chip_info->ctl_reg & ~bfin_ctl_reg) { |
1037 | dev_err(&spi->dev, "do not set bits in ctl_reg " | 1059 | dev_err(&spi->dev, "do not set bits in ctl_reg " |
1038 | "that the SPI framework manages\n"); | 1060 | "that the SPI framework manages\n"); |
1039 | return -EINVAL; | 1061 | goto error; |
1040 | } | 1062 | } |
1041 | |||
1042 | chip->enable_dma = chip_info->enable_dma != 0 | 1063 | chip->enable_dma = chip_info->enable_dma != 0 |
1043 | && drv_data->master_info->enable_dma; | 1064 | && drv_data->master_info->enable_dma; |
1044 | chip->ctl_reg = chip_info->ctl_reg; | 1065 | chip->ctl_reg = chip_info->ctl_reg; |
1045 | chip->bits_per_word = chip_info->bits_per_word; | ||
1046 | chip->cs_change_per_word = chip_info->cs_change_per_word; | ||
1047 | chip->cs_chg_udelay = chip_info->cs_chg_udelay; | 1066 | chip->cs_chg_udelay = chip_info->cs_chg_udelay; |
1048 | chip->cs_gpio = chip_info->cs_gpio; | ||
1049 | chip->idle_tx_val = chip_info->idle_tx_val; | 1067 | chip->idle_tx_val = chip_info->idle_tx_val; |
1068 | chip->pio_interrupt = chip_info->pio_interrupt; | ||
1069 | spi->bits_per_word = chip_info->bits_per_word; | ||
1070 | } else { | ||
1071 | /* force a default base state */ | ||
1072 | chip->ctl_reg &= bfin_ctl_reg; | ||
1073 | } | ||
1074 | |||
1075 | if (spi->bits_per_word % 8) { | ||
1076 | dev_err(&spi->dev, "%d bits_per_word is not supported\n", | ||
1077 | spi->bits_per_word); | ||
1078 | goto error; | ||
1050 | } | 1079 | } |
1051 | 1080 | ||
1052 | /* translate common spi framework into our register */ | 1081 | /* translate common spi framework into our register */ |
1082 | if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) { | ||
1083 | dev_err(&spi->dev, "unsupported spi modes detected\n"); | ||
1084 | goto error; | ||
1085 | } | ||
1053 | if (spi->mode & SPI_CPOL) | 1086 | if (spi->mode & SPI_CPOL) |
1054 | chip->ctl_reg |= CPOL; | 1087 | chip->ctl_reg |= BIT_CTL_CPOL; |
1055 | if (spi->mode & SPI_CPHA) | 1088 | if (spi->mode & SPI_CPHA) |
1056 | chip->ctl_reg |= CPHA; | 1089 | chip->ctl_reg |= BIT_CTL_CPHA; |
1057 | if (spi->mode & SPI_LSB_FIRST) | 1090 | if (spi->mode & SPI_LSB_FIRST) |
1058 | chip->ctl_reg |= LSBF; | 1091 | chip->ctl_reg |= BIT_CTL_LSBF; |
1059 | /* we dont support running in slave mode (yet?) */ | 1092 | /* we dont support running in slave mode (yet?) */ |
1060 | chip->ctl_reg |= MSTR; | 1093 | chip->ctl_reg |= BIT_CTL_MASTER; |
1061 | 1094 | ||
1062 | /* | 1095 | /* |
1096 | * Notice: for blackfin, the speed_hz is the value of register | ||
1097 | * SPI_BAUD, not the real baudrate | ||
1098 | */ | ||
1099 | chip->baud = hz_to_spi_baud(spi->max_speed_hz); | ||
1100 | chip->chip_select_num = spi->chip_select; | ||
1101 | if (chip->chip_select_num < MAX_CTRL_CS) { | ||
1102 | if (!(spi->mode & SPI_CPHA)) | ||
1103 | dev_warn(&spi->dev, "Warning: SPI CPHA not set:" | ||
1104 | " Slave Select not under software control!\n" | ||
1105 | " See Documentation/blackfin/bfin-spi-notes.txt"); | ||
1106 | |||
1107 | chip->flag = (1 << spi->chip_select) << 8; | ||
1108 | } else | ||
1109 | chip->cs_gpio = chip->chip_select_num - MAX_CTRL_CS; | ||
1110 | |||
1111 | if (chip->enable_dma && chip->pio_interrupt) { | ||
1112 | dev_err(&spi->dev, "enable_dma is set, " | ||
1113 | "do not set pio_interrupt\n"); | ||
1114 | goto error; | ||
1115 | } | ||
1116 | /* | ||
1063 | * if any one SPI chip is registered and wants DMA, request the | 1117 | * if any one SPI chip is registered and wants DMA, request the |
1064 | * DMA channel for it | 1118 | * DMA channel for it |
1065 | */ | 1119 | */ |
1066 | if (chip->enable_dma && !drv_data->dma_requested) { | 1120 | if (chip->enable_dma && !drv_data->dma_requested) { |
1067 | /* register dma irq handler */ | 1121 | /* register dma irq handler */ |
1068 | if (request_dma(drv_data->dma_channel, "BFIN_SPI_DMA") < 0) { | 1122 | ret = request_dma(drv_data->dma_channel, "BFIN_SPI_DMA"); |
1069 | dev_dbg(&spi->dev, | 1123 | if (ret) { |
1124 | dev_err(&spi->dev, | ||
1070 | "Unable to request BlackFin SPI DMA channel\n"); | 1125 | "Unable to request BlackFin SPI DMA channel\n"); |
1071 | return -ENODEV; | 1126 | goto error; |
1072 | } | 1127 | } |
1073 | if (set_dma_callback(drv_data->dma_channel, | 1128 | drv_data->dma_requested = 1; |
1074 | bfin_spi_dma_irq_handler, drv_data) < 0) { | 1129 | |
1075 | dev_dbg(&spi->dev, "Unable to set dma callback\n"); | 1130 | ret = set_dma_callback(drv_data->dma_channel, |
1076 | return -EPERM; | 1131 | bfin_spi_dma_irq_handler, drv_data); |
1132 | if (ret) { | ||
1133 | dev_err(&spi->dev, "Unable to set dma callback\n"); | ||
1134 | goto error; | ||
1077 | } | 1135 | } |
1078 | dma_disable_irq(drv_data->dma_channel); | 1136 | dma_disable_irq(drv_data->dma_channel); |
1079 | drv_data->dma_requested = 1; | ||
1080 | } | 1137 | } |
1081 | 1138 | ||
1082 | /* | 1139 | if (chip->pio_interrupt && !drv_data->irq_requested) { |
1083 | * Notice: for blackfin, the speed_hz is the value of register | 1140 | ret = request_irq(drv_data->spi_irq, bfin_spi_pio_irq_handler, |
1084 | * SPI_BAUD, not the real baudrate | 1141 | IRQF_DISABLED, "BFIN_SPI", drv_data); |
1085 | */ | ||
1086 | chip->baud = hz_to_spi_baud(spi->max_speed_hz); | ||
1087 | chip->flag = 1 << (spi->chip_select); | ||
1088 | chip->chip_select_num = spi->chip_select; | ||
1089 | |||
1090 | if (chip->chip_select_num == 0) { | ||
1091 | ret = gpio_request(chip->cs_gpio, spi->modalias); | ||
1092 | if (ret) { | 1142 | if (ret) { |
1093 | if (drv_data->dma_requested) | 1143 | dev_err(&spi->dev, "Unable to register spi IRQ\n"); |
1094 | free_dma(drv_data->dma_channel); | 1144 | goto error; |
1095 | return ret; | ||
1096 | } | 1145 | } |
1097 | gpio_direction_output(chip->cs_gpio, 1); | 1146 | drv_data->irq_requested = 1; |
1147 | /* we use write mode, spi irq has to be disabled here */ | ||
1148 | disable_irq(drv_data->spi_irq); | ||
1098 | } | 1149 | } |
1099 | 1150 | ||
1100 | switch (chip->bits_per_word) { | 1151 | if (chip->chip_select_num >= MAX_CTRL_CS) { |
1101 | case 8: | 1152 | /* Only request on first setup */ |
1102 | chip->n_bytes = 1; | 1153 | if (spi_get_ctldata(spi) == NULL) { |
1103 | chip->width = CFG_SPI_WORDSIZE8; | 1154 | ret = gpio_request(chip->cs_gpio, spi->modalias); |
1104 | chip->read = chip->cs_change_per_word ? | 1155 | if (ret) { |
1105 | bfin_spi_u8_cs_chg_reader : bfin_spi_u8_reader; | 1156 | dev_err(&spi->dev, "gpio_request() error\n"); |
1106 | chip->write = chip->cs_change_per_word ? | 1157 | goto pin_error; |
1107 | bfin_spi_u8_cs_chg_writer : bfin_spi_u8_writer; | 1158 | } |
1108 | chip->duplex = chip->cs_change_per_word ? | 1159 | gpio_direction_output(chip->cs_gpio, 1); |
1109 | bfin_spi_u8_cs_chg_duplex : bfin_spi_u8_duplex; | 1160 | } |
1110 | break; | ||
1111 | |||
1112 | case 16: | ||
1113 | chip->n_bytes = 2; | ||
1114 | chip->width = CFG_SPI_WORDSIZE16; | ||
1115 | chip->read = chip->cs_change_per_word ? | ||
1116 | bfin_spi_u16_cs_chg_reader : bfin_spi_u16_reader; | ||
1117 | chip->write = chip->cs_change_per_word ? | ||
1118 | bfin_spi_u16_cs_chg_writer : bfin_spi_u16_writer; | ||
1119 | chip->duplex = chip->cs_change_per_word ? | ||
1120 | bfin_spi_u16_cs_chg_duplex : bfin_spi_u16_duplex; | ||
1121 | break; | ||
1122 | |||
1123 | default: | ||
1124 | dev_err(&spi->dev, "%d bits_per_word is not supported\n", | ||
1125 | chip->bits_per_word); | ||
1126 | if (chip_info) | ||
1127 | kfree(chip); | ||
1128 | return -ENODEV; | ||
1129 | } | 1161 | } |
1130 | 1162 | ||
1131 | dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n", | 1163 | dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n", |
1132 | spi->modalias, chip->width, chip->enable_dma); | 1164 | spi->modalias, spi->bits_per_word, chip->enable_dma); |
1133 | dev_dbg(&spi->dev, "ctl_reg is 0x%x, flag_reg is 0x%x\n", | 1165 | dev_dbg(&spi->dev, "ctl_reg is 0x%x, flag_reg is 0x%x\n", |
1134 | chip->ctl_reg, chip->flag); | 1166 | chip->ctl_reg, chip->flag); |
1135 | 1167 | ||
1136 | spi_set_ctldata(spi, chip); | 1168 | spi_set_ctldata(spi, chip); |
1137 | 1169 | ||
1138 | dev_dbg(&spi->dev, "chip select number is %d\n", chip->chip_select_num); | 1170 | dev_dbg(&spi->dev, "chip select number is %d\n", chip->chip_select_num); |
1139 | if ((chip->chip_select_num > 0) | 1171 | if (chip->chip_select_num < MAX_CTRL_CS) { |
1140 | && (chip->chip_select_num <= spi->master->num_chipselect)) | 1172 | ret = peripheral_request(ssel[spi->master->bus_num] |
1141 | peripheral_request(ssel[spi->master->bus_num] | 1173 | [chip->chip_select_num-1], spi->modalias); |
1142 | [chip->chip_select_num-1], spi->modalias); | 1174 | if (ret) { |
1175 | dev_err(&spi->dev, "peripheral_request() error\n"); | ||
1176 | goto pin_error; | ||
1177 | } | ||
1178 | } | ||
1143 | 1179 | ||
1180 | bfin_spi_cs_enable(drv_data, chip); | ||
1144 | bfin_spi_cs_deactive(drv_data, chip); | 1181 | bfin_spi_cs_deactive(drv_data, chip); |
1145 | 1182 | ||
1146 | return 0; | 1183 | return 0; |
1184 | |||
1185 | pin_error: | ||
1186 | if (chip->chip_select_num >= MAX_CTRL_CS) | ||
1187 | gpio_free(chip->cs_gpio); | ||
1188 | else | ||
1189 | peripheral_free(ssel[spi->master->bus_num] | ||
1190 | [chip->chip_select_num - 1]); | ||
1191 | error: | ||
1192 | if (chip) { | ||
1193 | if (drv_data->dma_requested) | ||
1194 | free_dma(drv_data->dma_channel); | ||
1195 | drv_data->dma_requested = 0; | ||
1196 | |||
1197 | kfree(chip); | ||
1198 | /* prevent free 'chip' twice */ | ||
1199 | spi_set_ctldata(spi, NULL); | ||
1200 | } | ||
1201 | |||
1202 | return ret; | ||
1147 | } | 1203 | } |
1148 | 1204 | ||
1149 | /* | 1205 | /* |
@@ -1152,28 +1208,30 @@ static int bfin_spi_setup(struct spi_device *spi) | |||
1152 | */ | 1208 | */ |
1153 | static void bfin_spi_cleanup(struct spi_device *spi) | 1209 | static void bfin_spi_cleanup(struct spi_device *spi) |
1154 | { | 1210 | { |
1155 | struct chip_data *chip = spi_get_ctldata(spi); | 1211 | struct bfin_spi_slave_data *chip = spi_get_ctldata(spi); |
1212 | struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); | ||
1156 | 1213 | ||
1157 | if (!chip) | 1214 | if (!chip) |
1158 | return; | 1215 | return; |
1159 | 1216 | ||
1160 | if ((chip->chip_select_num > 0) | 1217 | if (chip->chip_select_num < MAX_CTRL_CS) { |
1161 | && (chip->chip_select_num <= spi->master->num_chipselect)) | ||
1162 | peripheral_free(ssel[spi->master->bus_num] | 1218 | peripheral_free(ssel[spi->master->bus_num] |
1163 | [chip->chip_select_num-1]); | 1219 | [chip->chip_select_num-1]); |
1164 | 1220 | bfin_spi_cs_disable(drv_data, chip); | |
1165 | if (chip->chip_select_num == 0) | 1221 | } else |
1166 | gpio_free(chip->cs_gpio); | 1222 | gpio_free(chip->cs_gpio); |
1167 | 1223 | ||
1168 | kfree(chip); | 1224 | kfree(chip); |
1225 | /* prevent free 'chip' twice */ | ||
1226 | spi_set_ctldata(spi, NULL); | ||
1169 | } | 1227 | } |
1170 | 1228 | ||
1171 | static inline int bfin_spi_init_queue(struct driver_data *drv_data) | 1229 | static inline int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data) |
1172 | { | 1230 | { |
1173 | INIT_LIST_HEAD(&drv_data->queue); | 1231 | INIT_LIST_HEAD(&drv_data->queue); |
1174 | spin_lock_init(&drv_data->lock); | 1232 | spin_lock_init(&drv_data->lock); |
1175 | 1233 | ||
1176 | drv_data->run = QUEUE_STOPPED; | 1234 | drv_data->running = false; |
1177 | drv_data->busy = 0; | 1235 | drv_data->busy = 0; |
1178 | 1236 | ||
1179 | /* init transfer tasklet */ | 1237 | /* init transfer tasklet */ |
@@ -1190,18 +1248,18 @@ static inline int bfin_spi_init_queue(struct driver_data *drv_data) | |||
1190 | return 0; | 1248 | return 0; |
1191 | } | 1249 | } |
1192 | 1250 | ||
1193 | static inline int bfin_spi_start_queue(struct driver_data *drv_data) | 1251 | static inline int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data) |
1194 | { | 1252 | { |
1195 | unsigned long flags; | 1253 | unsigned long flags; |
1196 | 1254 | ||
1197 | spin_lock_irqsave(&drv_data->lock, flags); | 1255 | spin_lock_irqsave(&drv_data->lock, flags); |
1198 | 1256 | ||
1199 | if (drv_data->run == QUEUE_RUNNING || drv_data->busy) { | 1257 | if (drv_data->running || drv_data->busy) { |
1200 | spin_unlock_irqrestore(&drv_data->lock, flags); | 1258 | spin_unlock_irqrestore(&drv_data->lock, flags); |
1201 | return -EBUSY; | 1259 | return -EBUSY; |
1202 | } | 1260 | } |
1203 | 1261 | ||
1204 | drv_data->run = QUEUE_RUNNING; | 1262 | drv_data->running = true; |
1205 | drv_data->cur_msg = NULL; | 1263 | drv_data->cur_msg = NULL; |
1206 | drv_data->cur_transfer = NULL; | 1264 | drv_data->cur_transfer = NULL; |
1207 | drv_data->cur_chip = NULL; | 1265 | drv_data->cur_chip = NULL; |
@@ -1212,7 +1270,7 @@ static inline int bfin_spi_start_queue(struct driver_data *drv_data) | |||
1212 | return 0; | 1270 | return 0; |
1213 | } | 1271 | } |
1214 | 1272 | ||
1215 | static inline int bfin_spi_stop_queue(struct driver_data *drv_data) | 1273 | static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data) |
1216 | { | 1274 | { |
1217 | unsigned long flags; | 1275 | unsigned long flags; |
1218 | unsigned limit = 500; | 1276 | unsigned limit = 500; |
@@ -1226,8 +1284,8 @@ static inline int bfin_spi_stop_queue(struct driver_data *drv_data) | |||
1226 | * execution path (pump_messages) would be required to call wake_up or | 1284 | * execution path (pump_messages) would be required to call wake_up or |
1227 | * friends on every SPI message. Do this instead | 1285 | * friends on every SPI message. Do this instead |
1228 | */ | 1286 | */ |
1229 | drv_data->run = QUEUE_STOPPED; | 1287 | drv_data->running = false; |
1230 | while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { | 1288 | while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) { |
1231 | spin_unlock_irqrestore(&drv_data->lock, flags); | 1289 | spin_unlock_irqrestore(&drv_data->lock, flags); |
1232 | msleep(10); | 1290 | msleep(10); |
1233 | spin_lock_irqsave(&drv_data->lock, flags); | 1291 | spin_lock_irqsave(&drv_data->lock, flags); |
@@ -1241,7 +1299,7 @@ static inline int bfin_spi_stop_queue(struct driver_data *drv_data) | |||
1241 | return status; | 1299 | return status; |
1242 | } | 1300 | } |
1243 | 1301 | ||
1244 | static inline int bfin_spi_destroy_queue(struct driver_data *drv_data) | 1302 | static inline int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data) |
1245 | { | 1303 | { |
1246 | int status; | 1304 | int status; |
1247 | 1305 | ||
@@ -1259,14 +1317,14 @@ static int __init bfin_spi_probe(struct platform_device *pdev) | |||
1259 | struct device *dev = &pdev->dev; | 1317 | struct device *dev = &pdev->dev; |
1260 | struct bfin5xx_spi_master *platform_info; | 1318 | struct bfin5xx_spi_master *platform_info; |
1261 | struct spi_master *master; | 1319 | struct spi_master *master; |
1262 | struct driver_data *drv_data = 0; | 1320 | struct bfin_spi_master_data *drv_data; |
1263 | struct resource *res; | 1321 | struct resource *res; |
1264 | int status = 0; | 1322 | int status = 0; |
1265 | 1323 | ||
1266 | platform_info = dev->platform_data; | 1324 | platform_info = dev->platform_data; |
1267 | 1325 | ||
1268 | /* Allocate master with space for drv_data */ | 1326 | /* Allocate master with space for drv_data */ |
1269 | master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); | 1327 | master = spi_alloc_master(dev, sizeof(*drv_data)); |
1270 | if (!master) { | 1328 | if (!master) { |
1271 | dev_err(&pdev->dev, "can not alloc spi_master\n"); | 1329 | dev_err(&pdev->dev, "can not alloc spi_master\n"); |
1272 | return -ENOMEM; | 1330 | return -ENOMEM; |
@@ -1302,11 +1360,19 @@ static int __init bfin_spi_probe(struct platform_device *pdev) | |||
1302 | goto out_error_ioremap; | 1360 | goto out_error_ioremap; |
1303 | } | 1361 | } |
1304 | 1362 | ||
1305 | drv_data->dma_channel = platform_get_irq(pdev, 0); | 1363 | res = platform_get_resource(pdev, IORESOURCE_DMA, 0); |
1306 | if (drv_data->dma_channel < 0) { | 1364 | if (res == NULL) { |
1307 | dev_err(dev, "No DMA channel specified\n"); | 1365 | dev_err(dev, "No DMA channel specified\n"); |
1308 | status = -ENOENT; | 1366 | status = -ENOENT; |
1309 | goto out_error_no_dma_ch; | 1367 | goto out_error_free_io; |
1368 | } | ||
1369 | drv_data->dma_channel = res->start; | ||
1370 | |||
1371 | drv_data->spi_irq = platform_get_irq(pdev, 0); | ||
1372 | if (drv_data->spi_irq < 0) { | ||
1373 | dev_err(dev, "No spi pio irq specified\n"); | ||
1374 | status = -ENOENT; | ||
1375 | goto out_error_free_io; | ||
1310 | } | 1376 | } |
1311 | 1377 | ||
1312 | /* Initial and start queue */ | 1378 | /* Initial and start queue */ |
@@ -1328,6 +1394,12 @@ static int __init bfin_spi_probe(struct platform_device *pdev) | |||
1328 | goto out_error_queue_alloc; | 1394 | goto out_error_queue_alloc; |
1329 | } | 1395 | } |
1330 | 1396 | ||
1397 | /* Reset SPI registers. If these registers were used by the boot loader, | ||
1398 | * the sky may fall on your head if you enable the dma controller. | ||
1399 | */ | ||
1400 | write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER); | ||
1401 | write_FLAG(drv_data, 0xFF00); | ||
1402 | |||
1331 | /* Register with the SPI framework */ | 1403 | /* Register with the SPI framework */ |
1332 | platform_set_drvdata(pdev, drv_data); | 1404 | platform_set_drvdata(pdev, drv_data); |
1333 | status = spi_register_master(master); | 1405 | status = spi_register_master(master); |
@@ -1343,7 +1415,7 @@ static int __init bfin_spi_probe(struct platform_device *pdev) | |||
1343 | 1415 | ||
1344 | out_error_queue_alloc: | 1416 | out_error_queue_alloc: |
1345 | bfin_spi_destroy_queue(drv_data); | 1417 | bfin_spi_destroy_queue(drv_data); |
1346 | out_error_no_dma_ch: | 1418 | out_error_free_io: |
1347 | iounmap((void *) drv_data->regs_base); | 1419 | iounmap((void *) drv_data->regs_base); |
1348 | out_error_ioremap: | 1420 | out_error_ioremap: |
1349 | out_error_get_res: | 1421 | out_error_get_res: |
@@ -1355,7 +1427,7 @@ out_error_get_res: | |||
1355 | /* stop hardware and remove the driver */ | 1427 | /* stop hardware and remove the driver */ |
1356 | static int __devexit bfin_spi_remove(struct platform_device *pdev) | 1428 | static int __devexit bfin_spi_remove(struct platform_device *pdev) |
1357 | { | 1429 | { |
1358 | struct driver_data *drv_data = platform_get_drvdata(pdev); | 1430 | struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); |
1359 | int status = 0; | 1431 | int status = 0; |
1360 | 1432 | ||
1361 | if (!drv_data) | 1433 | if (!drv_data) |
@@ -1375,6 +1447,11 @@ static int __devexit bfin_spi_remove(struct platform_device *pdev) | |||
1375 | free_dma(drv_data->dma_channel); | 1447 | free_dma(drv_data->dma_channel); |
1376 | } | 1448 | } |
1377 | 1449 | ||
1450 | if (drv_data->irq_requested) { | ||
1451 | free_irq(drv_data->spi_irq, drv_data); | ||
1452 | drv_data->irq_requested = 0; | ||
1453 | } | ||
1454 | |||
1378 | /* Disconnect from the SPI framework */ | 1455 | /* Disconnect from the SPI framework */ |
1379 | spi_unregister_master(drv_data->master); | 1456 | spi_unregister_master(drv_data->master); |
1380 | 1457 | ||
@@ -1389,26 +1466,32 @@ static int __devexit bfin_spi_remove(struct platform_device *pdev) | |||
1389 | #ifdef CONFIG_PM | 1466 | #ifdef CONFIG_PM |
1390 | static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state) | 1467 | static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state) |
1391 | { | 1468 | { |
1392 | struct driver_data *drv_data = platform_get_drvdata(pdev); | 1469 | struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); |
1393 | int status = 0; | 1470 | int status = 0; |
1394 | 1471 | ||
1395 | status = bfin_spi_stop_queue(drv_data); | 1472 | status = bfin_spi_stop_queue(drv_data); |
1396 | if (status != 0) | 1473 | if (status != 0) |
1397 | return status; | 1474 | return status; |
1398 | 1475 | ||
1399 | /* stop hardware */ | 1476 | drv_data->ctrl_reg = read_CTRL(drv_data); |
1400 | bfin_spi_disable(drv_data); | 1477 | drv_data->flag_reg = read_FLAG(drv_data); |
1478 | |||
1479 | /* | ||
1480 | * reset SPI_CTL and SPI_FLG registers | ||
1481 | */ | ||
1482 | write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER); | ||
1483 | write_FLAG(drv_data, 0xFF00); | ||
1401 | 1484 | ||
1402 | return 0; | 1485 | return 0; |
1403 | } | 1486 | } |
1404 | 1487 | ||
1405 | static int bfin_spi_resume(struct platform_device *pdev) | 1488 | static int bfin_spi_resume(struct platform_device *pdev) |
1406 | { | 1489 | { |
1407 | struct driver_data *drv_data = platform_get_drvdata(pdev); | 1490 | struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); |
1408 | int status = 0; | 1491 | int status = 0; |
1409 | 1492 | ||
1410 | /* Enable the SPI interface */ | 1493 | write_CTRL(drv_data, drv_data->ctrl_reg); |
1411 | bfin_spi_enable(drv_data); | 1494 | write_FLAG(drv_data, drv_data->flag_reg); |
1412 | 1495 | ||
1413 | /* Start the queue running */ | 1496 | /* Start the queue running */ |
1414 | status = bfin_spi_start_queue(drv_data); | 1497 | status = bfin_spi_start_queue(drv_data); |
@@ -1439,7 +1522,7 @@ static int __init bfin_spi_init(void) | |||
1439 | { | 1522 | { |
1440 | return platform_driver_probe(&bfin_spi_driver, bfin_spi_probe); | 1523 | return platform_driver_probe(&bfin_spi_driver, bfin_spi_probe); |
1441 | } | 1524 | } |
1442 | module_init(bfin_spi_init); | 1525 | subsys_initcall(bfin_spi_init); |
1443 | 1526 | ||
1444 | static void __exit bfin_spi_exit(void) | 1527 | static void __exit bfin_spi_exit(void) |
1445 | { | 1528 | { |
diff --git a/drivers/spi/spi_bfin_sport.c b/drivers/spi/spi_bfin_sport.c new file mode 100644 index 000000000000..e557ff617b11 --- /dev/null +++ b/drivers/spi/spi_bfin_sport.c | |||
@@ -0,0 +1,952 @@ | |||
1 | /* | ||
2 | * SPI bus via the Blackfin SPORT peripheral | ||
3 | * | ||
4 | * Enter bugs at http://blackfin.uclinux.org/ | ||
5 | * | ||
6 | * Copyright 2009-2011 Analog Devices Inc. | ||
7 | * | ||
8 | * Licensed under the GPL-2 or later. | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <linux/device.h> | ||
15 | #include <linux/gpio.h> | ||
16 | #include <linux/io.h> | ||
17 | #include <linux/ioport.h> | ||
18 | #include <linux/irq.h> | ||
19 | #include <linux/errno.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/spi/spi.h> | ||
23 | #include <linux/workqueue.h> | ||
24 | |||
25 | #include <asm/portmux.h> | ||
26 | #include <asm/bfin5xx_spi.h> | ||
27 | #include <asm/blackfin.h> | ||
28 | #include <asm/bfin_sport.h> | ||
29 | #include <asm/cacheflush.h> | ||
30 | |||
31 | #define DRV_NAME "bfin-sport-spi" | ||
32 | #define DRV_DESC "SPI bus via the Blackfin SPORT" | ||
33 | |||
34 | MODULE_AUTHOR("Cliff Cai"); | ||
35 | MODULE_DESCRIPTION(DRV_DESC); | ||
36 | MODULE_LICENSE("GPL"); | ||
37 | MODULE_ALIAS("platform:bfin-sport-spi"); | ||
38 | |||
39 | enum bfin_sport_spi_state { | ||
40 | START_STATE, | ||
41 | RUNNING_STATE, | ||
42 | DONE_STATE, | ||
43 | ERROR_STATE, | ||
44 | }; | ||
45 | |||
46 | struct bfin_sport_spi_master_data; | ||
47 | |||
48 | struct bfin_sport_transfer_ops { | ||
49 | void (*write) (struct bfin_sport_spi_master_data *); | ||
50 | void (*read) (struct bfin_sport_spi_master_data *); | ||
51 | void (*duplex) (struct bfin_sport_spi_master_data *); | ||
52 | }; | ||
53 | |||
54 | struct bfin_sport_spi_master_data { | ||
55 | /* Driver model hookup */ | ||
56 | struct device *dev; | ||
57 | |||
58 | /* SPI framework hookup */ | ||
59 | struct spi_master *master; | ||
60 | |||
61 | /* Regs base of SPI controller */ | ||
62 | struct sport_register __iomem *regs; | ||
63 | int err_irq; | ||
64 | |||
65 | /* Pin request list */ | ||
66 | u16 *pin_req; | ||
67 | |||
68 | /* Driver message queue */ | ||
69 | struct workqueue_struct *workqueue; | ||
70 | struct work_struct pump_messages; | ||
71 | spinlock_t lock; | ||
72 | struct list_head queue; | ||
73 | int busy; | ||
74 | bool run; | ||
75 | |||
76 | /* Message Transfer pump */ | ||
77 | struct tasklet_struct pump_transfers; | ||
78 | |||
79 | /* Current message transfer state info */ | ||
80 | enum bfin_sport_spi_state state; | ||
81 | struct spi_message *cur_msg; | ||
82 | struct spi_transfer *cur_transfer; | ||
83 | struct bfin_sport_spi_slave_data *cur_chip; | ||
84 | union { | ||
85 | void *tx; | ||
86 | u8 *tx8; | ||
87 | u16 *tx16; | ||
88 | }; | ||
89 | void *tx_end; | ||
90 | union { | ||
91 | void *rx; | ||
92 | u8 *rx8; | ||
93 | u16 *rx16; | ||
94 | }; | ||
95 | void *rx_end; | ||
96 | |||
97 | int cs_change; | ||
98 | struct bfin_sport_transfer_ops *ops; | ||
99 | }; | ||
100 | |||
101 | struct bfin_sport_spi_slave_data { | ||
102 | u16 ctl_reg; | ||
103 | u16 baud; | ||
104 | u16 cs_chg_udelay; /* Some devices require > 255usec delay */ | ||
105 | u32 cs_gpio; | ||
106 | u16 idle_tx_val; | ||
107 | struct bfin_sport_transfer_ops *ops; | ||
108 | }; | ||
109 | |||
110 | static void | ||
111 | bfin_sport_spi_enable(struct bfin_sport_spi_master_data *drv_data) | ||
112 | { | ||
113 | bfin_write_or(&drv_data->regs->tcr1, TSPEN); | ||
114 | bfin_write_or(&drv_data->regs->rcr1, TSPEN); | ||
115 | SSYNC(); | ||
116 | } | ||
117 | |||
118 | static void | ||
119 | bfin_sport_spi_disable(struct bfin_sport_spi_master_data *drv_data) | ||
120 | { | ||
121 | bfin_write_and(&drv_data->regs->tcr1, ~TSPEN); | ||
122 | bfin_write_and(&drv_data->regs->rcr1, ~TSPEN); | ||
123 | SSYNC(); | ||
124 | } | ||
125 | |||
126 | /* Caculate the SPI_BAUD register value based on input HZ */ | ||
127 | static u16 | ||
128 | bfin_sport_hz_to_spi_baud(u32 speed_hz) | ||
129 | { | ||
130 | u_long clk, sclk = get_sclk(); | ||
131 | int div = (sclk / (2 * speed_hz)) - 1; | ||
132 | |||
133 | if (div < 0) | ||
134 | div = 0; | ||
135 | |||
136 | clk = sclk / (2 * (div + 1)); | ||
137 | |||
138 | if (clk > speed_hz) | ||
139 | div++; | ||
140 | |||
141 | return div; | ||
142 | } | ||
143 | |||
144 | /* Chip select operation functions for cs_change flag */ | ||
145 | static void | ||
146 | bfin_sport_spi_cs_active(struct bfin_sport_spi_slave_data *chip) | ||
147 | { | ||
148 | gpio_direction_output(chip->cs_gpio, 0); | ||
149 | } | ||
150 | |||
151 | static void | ||
152 | bfin_sport_spi_cs_deactive(struct bfin_sport_spi_slave_data *chip) | ||
153 | { | ||
154 | gpio_direction_output(chip->cs_gpio, 1); | ||
155 | /* Move delay here for consistency */ | ||
156 | if (chip->cs_chg_udelay) | ||
157 | udelay(chip->cs_chg_udelay); | ||
158 | } | ||
159 | |||
160 | static void | ||
161 | bfin_sport_spi_stat_poll_complete(struct bfin_sport_spi_master_data *drv_data) | ||
162 | { | ||
163 | unsigned long timeout = jiffies + HZ; | ||
164 | while (!(bfin_read(&drv_data->regs->stat) & RXNE)) { | ||
165 | if (!time_before(jiffies, timeout)) | ||
166 | break; | ||
167 | } | ||
168 | } | ||
169 | |||
170 | static void | ||
171 | bfin_sport_spi_u8_writer(struct bfin_sport_spi_master_data *drv_data) | ||
172 | { | ||
173 | u16 dummy; | ||
174 | |||
175 | while (drv_data->tx < drv_data->tx_end) { | ||
176 | bfin_write(&drv_data->regs->tx16, *drv_data->tx8++); | ||
177 | bfin_sport_spi_stat_poll_complete(drv_data); | ||
178 | dummy = bfin_read(&drv_data->regs->rx16); | ||
179 | } | ||
180 | } | ||
181 | |||
182 | static void | ||
183 | bfin_sport_spi_u8_reader(struct bfin_sport_spi_master_data *drv_data) | ||
184 | { | ||
185 | u16 tx_val = drv_data->cur_chip->idle_tx_val; | ||
186 | |||
187 | while (drv_data->rx < drv_data->rx_end) { | ||
188 | bfin_write(&drv_data->regs->tx16, tx_val); | ||
189 | bfin_sport_spi_stat_poll_complete(drv_data); | ||
190 | *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16); | ||
191 | } | ||
192 | } | ||
193 | |||
194 | static void | ||
195 | bfin_sport_spi_u8_duplex(struct bfin_sport_spi_master_data *drv_data) | ||
196 | { | ||
197 | while (drv_data->rx < drv_data->rx_end) { | ||
198 | bfin_write(&drv_data->regs->tx16, *drv_data->tx8++); | ||
199 | bfin_sport_spi_stat_poll_complete(drv_data); | ||
200 | *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16); | ||
201 | } | ||
202 | } | ||
203 | |||
204 | static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u8 = { | ||
205 | .write = bfin_sport_spi_u8_writer, | ||
206 | .read = bfin_sport_spi_u8_reader, | ||
207 | .duplex = bfin_sport_spi_u8_duplex, | ||
208 | }; | ||
209 | |||
210 | static void | ||
211 | bfin_sport_spi_u16_writer(struct bfin_sport_spi_master_data *drv_data) | ||
212 | { | ||
213 | u16 dummy; | ||
214 | |||
215 | while (drv_data->tx < drv_data->tx_end) { | ||
216 | bfin_write(&drv_data->regs->tx16, *drv_data->tx16++); | ||
217 | bfin_sport_spi_stat_poll_complete(drv_data); | ||
218 | dummy = bfin_read(&drv_data->regs->rx16); | ||
219 | } | ||
220 | } | ||
221 | |||
222 | static void | ||
223 | bfin_sport_spi_u16_reader(struct bfin_sport_spi_master_data *drv_data) | ||
224 | { | ||
225 | u16 tx_val = drv_data->cur_chip->idle_tx_val; | ||
226 | |||
227 | while (drv_data->rx < drv_data->rx_end) { | ||
228 | bfin_write(&drv_data->regs->tx16, tx_val); | ||
229 | bfin_sport_spi_stat_poll_complete(drv_data); | ||
230 | *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16); | ||
231 | } | ||
232 | } | ||
233 | |||
234 | static void | ||
235 | bfin_sport_spi_u16_duplex(struct bfin_sport_spi_master_data *drv_data) | ||
236 | { | ||
237 | while (drv_data->rx < drv_data->rx_end) { | ||
238 | bfin_write(&drv_data->regs->tx16, *drv_data->tx16++); | ||
239 | bfin_sport_spi_stat_poll_complete(drv_data); | ||
240 | *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16); | ||
241 | } | ||
242 | } | ||
243 | |||
244 | static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u16 = { | ||
245 | .write = bfin_sport_spi_u16_writer, | ||
246 | .read = bfin_sport_spi_u16_reader, | ||
247 | .duplex = bfin_sport_spi_u16_duplex, | ||
248 | }; | ||
249 | |||
250 | /* stop controller and re-config current chip */ | ||
251 | static void | ||
252 | bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data) | ||
253 | { | ||
254 | struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip; | ||
255 | unsigned int bits = (drv_data->ops == &bfin_sport_transfer_ops_u8 ? 7 : 15); | ||
256 | |||
257 | bfin_sport_spi_disable(drv_data); | ||
258 | dev_dbg(drv_data->dev, "restoring spi ctl state\n"); | ||
259 | |||
260 | bfin_write(&drv_data->regs->tcr1, chip->ctl_reg); | ||
261 | bfin_write(&drv_data->regs->tcr2, bits); | ||
262 | bfin_write(&drv_data->regs->tclkdiv, chip->baud); | ||
263 | bfin_write(&drv_data->regs->tfsdiv, bits); | ||
264 | SSYNC(); | ||
265 | |||
266 | bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS)); | ||
267 | bfin_write(&drv_data->regs->rcr2, bits); | ||
268 | SSYNC(); | ||
269 | |||
270 | bfin_sport_spi_cs_active(chip); | ||
271 | } | ||
272 | |||
273 | /* test if there is more transfer to be done */ | ||
274 | static enum bfin_sport_spi_state | ||
275 | bfin_sport_spi_next_transfer(struct bfin_sport_spi_master_data *drv_data) | ||
276 | { | ||
277 | struct spi_message *msg = drv_data->cur_msg; | ||
278 | struct spi_transfer *trans = drv_data->cur_transfer; | ||
279 | |||
280 | /* Move to next transfer */ | ||
281 | if (trans->transfer_list.next != &msg->transfers) { | ||
282 | drv_data->cur_transfer = | ||
283 | list_entry(trans->transfer_list.next, | ||
284 | struct spi_transfer, transfer_list); | ||
285 | return RUNNING_STATE; | ||
286 | } | ||
287 | |||
288 | return DONE_STATE; | ||
289 | } | ||
290 | |||
291 | /* | ||
292 | * caller already set message->status; | ||
293 | * dma and pio irqs are blocked give finished message back | ||
294 | */ | ||
295 | static void | ||
296 | bfin_sport_spi_giveback(struct bfin_sport_spi_master_data *drv_data) | ||
297 | { | ||
298 | struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip; | ||
299 | unsigned long flags; | ||
300 | struct spi_message *msg; | ||
301 | |||
302 | spin_lock_irqsave(&drv_data->lock, flags); | ||
303 | msg = drv_data->cur_msg; | ||
304 | drv_data->state = START_STATE; | ||
305 | drv_data->cur_msg = NULL; | ||
306 | drv_data->cur_transfer = NULL; | ||
307 | drv_data->cur_chip = NULL; | ||
308 | queue_work(drv_data->workqueue, &drv_data->pump_messages); | ||
309 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
310 | |||
311 | if (!drv_data->cs_change) | ||
312 | bfin_sport_spi_cs_deactive(chip); | ||
313 | |||
314 | if (msg->complete) | ||
315 | msg->complete(msg->context); | ||
316 | } | ||
317 | |||
318 | static irqreturn_t | ||
319 | sport_err_handler(int irq, void *dev_id) | ||
320 | { | ||
321 | struct bfin_sport_spi_master_data *drv_data = dev_id; | ||
322 | u16 status; | ||
323 | |||
324 | dev_dbg(drv_data->dev, "%s enter\n", __func__); | ||
325 | status = bfin_read(&drv_data->regs->stat) & (TOVF | TUVF | ROVF | RUVF); | ||
326 | |||
327 | if (status) { | ||
328 | bfin_write(&drv_data->regs->stat, status); | ||
329 | SSYNC(); | ||
330 | |||
331 | bfin_sport_spi_disable(drv_data); | ||
332 | dev_err(drv_data->dev, "status error:%s%s%s%s\n", | ||
333 | status & TOVF ? " TOVF" : "", | ||
334 | status & TUVF ? " TUVF" : "", | ||
335 | status & ROVF ? " ROVF" : "", | ||
336 | status & RUVF ? " RUVF" : ""); | ||
337 | } | ||
338 | |||
339 | return IRQ_HANDLED; | ||
340 | } | ||
341 | |||
342 | static void | ||
343 | bfin_sport_spi_pump_transfers(unsigned long data) | ||
344 | { | ||
345 | struct bfin_sport_spi_master_data *drv_data = (void *)data; | ||
346 | struct spi_message *message = NULL; | ||
347 | struct spi_transfer *transfer = NULL; | ||
348 | struct spi_transfer *previous = NULL; | ||
349 | struct bfin_sport_spi_slave_data *chip = NULL; | ||
350 | unsigned int bits_per_word; | ||
351 | u32 tranf_success = 1; | ||
352 | u32 transfer_speed; | ||
353 | u8 full_duplex = 0; | ||
354 | |||
355 | /* Get current state information */ | ||
356 | message = drv_data->cur_msg; | ||
357 | transfer = drv_data->cur_transfer; | ||
358 | chip = drv_data->cur_chip; | ||
359 | |||
360 | if (transfer->speed_hz) | ||
361 | transfer_speed = bfin_sport_hz_to_spi_baud(transfer->speed_hz); | ||
362 | else | ||
363 | transfer_speed = chip->baud; | ||
364 | bfin_write(&drv_data->regs->tclkdiv, transfer_speed); | ||
365 | SSYNC(); | ||
366 | |||
367 | /* | ||
368 | * if msg is error or done, report it back using complete() callback | ||
369 | */ | ||
370 | |||
371 | /* Handle for abort */ | ||
372 | if (drv_data->state == ERROR_STATE) { | ||
373 | dev_dbg(drv_data->dev, "transfer: we've hit an error\n"); | ||
374 | message->status = -EIO; | ||
375 | bfin_sport_spi_giveback(drv_data); | ||
376 | return; | ||
377 | } | ||
378 | |||
379 | /* Handle end of message */ | ||
380 | if (drv_data->state == DONE_STATE) { | ||
381 | dev_dbg(drv_data->dev, "transfer: all done!\n"); | ||
382 | message->status = 0; | ||
383 | bfin_sport_spi_giveback(drv_data); | ||
384 | return; | ||
385 | } | ||
386 | |||
387 | /* Delay if requested at end of transfer */ | ||
388 | if (drv_data->state == RUNNING_STATE) { | ||
389 | dev_dbg(drv_data->dev, "transfer: still running ...\n"); | ||
390 | previous = list_entry(transfer->transfer_list.prev, | ||
391 | struct spi_transfer, transfer_list); | ||
392 | if (previous->delay_usecs) | ||
393 | udelay(previous->delay_usecs); | ||
394 | } | ||
395 | |||
396 | if (transfer->len == 0) { | ||
397 | /* Move to next transfer of this msg */ | ||
398 | drv_data->state = bfin_sport_spi_next_transfer(drv_data); | ||
399 | /* Schedule next transfer tasklet */ | ||
400 | tasklet_schedule(&drv_data->pump_transfers); | ||
401 | } | ||
402 | |||
403 | if (transfer->tx_buf != NULL) { | ||
404 | drv_data->tx = (void *)transfer->tx_buf; | ||
405 | drv_data->tx_end = drv_data->tx + transfer->len; | ||
406 | dev_dbg(drv_data->dev, "tx_buf is %p, tx_end is %p\n", | ||
407 | transfer->tx_buf, drv_data->tx_end); | ||
408 | } else | ||
409 | drv_data->tx = NULL; | ||
410 | |||
411 | if (transfer->rx_buf != NULL) { | ||
412 | full_duplex = transfer->tx_buf != NULL; | ||
413 | drv_data->rx = transfer->rx_buf; | ||
414 | drv_data->rx_end = drv_data->rx + transfer->len; | ||
415 | dev_dbg(drv_data->dev, "rx_buf is %p, rx_end is %p\n", | ||
416 | transfer->rx_buf, drv_data->rx_end); | ||
417 | } else | ||
418 | drv_data->rx = NULL; | ||
419 | |||
420 | drv_data->cs_change = transfer->cs_change; | ||
421 | |||
422 | /* Bits per word setup */ | ||
423 | bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word; | ||
424 | if (bits_per_word == 8) | ||
425 | drv_data->ops = &bfin_sport_transfer_ops_u8; | ||
426 | else | ||
427 | drv_data->ops = &bfin_sport_transfer_ops_u16; | ||
428 | |||
429 | drv_data->state = RUNNING_STATE; | ||
430 | |||
431 | if (drv_data->cs_change) | ||
432 | bfin_sport_spi_cs_active(chip); | ||
433 | |||
434 | dev_dbg(drv_data->dev, | ||
435 | "now pumping a transfer: width is %d, len is %d\n", | ||
436 | bits_per_word, transfer->len); | ||
437 | |||
438 | /* PIO mode write then read */ | ||
439 | dev_dbg(drv_data->dev, "doing IO transfer\n"); | ||
440 | |||
441 | bfin_sport_spi_enable(drv_data); | ||
442 | if (full_duplex) { | ||
443 | /* full duplex mode */ | ||
444 | BUG_ON((drv_data->tx_end - drv_data->tx) != | ||
445 | (drv_data->rx_end - drv_data->rx)); | ||
446 | drv_data->ops->duplex(drv_data); | ||
447 | |||
448 | if (drv_data->tx != drv_data->tx_end) | ||
449 | tranf_success = 0; | ||
450 | } else if (drv_data->tx != NULL) { | ||
451 | /* write only half duplex */ | ||
452 | |||
453 | drv_data->ops->write(drv_data); | ||
454 | |||
455 | if (drv_data->tx != drv_data->tx_end) | ||
456 | tranf_success = 0; | ||
457 | } else if (drv_data->rx != NULL) { | ||
458 | /* read only half duplex */ | ||
459 | |||
460 | drv_data->ops->read(drv_data); | ||
461 | if (drv_data->rx != drv_data->rx_end) | ||
462 | tranf_success = 0; | ||
463 | } | ||
464 | bfin_sport_spi_disable(drv_data); | ||
465 | |||
466 | if (!tranf_success) { | ||
467 | dev_dbg(drv_data->dev, "IO write error!\n"); | ||
468 | drv_data->state = ERROR_STATE; | ||
469 | } else { | ||
470 | /* Update total byte transfered */ | ||
471 | message->actual_length += transfer->len; | ||
472 | /* Move to next transfer of this msg */ | ||
473 | drv_data->state = bfin_sport_spi_next_transfer(drv_data); | ||
474 | if (drv_data->cs_change) | ||
475 | bfin_sport_spi_cs_deactive(chip); | ||
476 | } | ||
477 | |||
478 | /* Schedule next transfer tasklet */ | ||
479 | tasklet_schedule(&drv_data->pump_transfers); | ||
480 | } | ||
481 | |||
482 | /* pop a msg from queue and kick off real transfer */ | ||
483 | static void | ||
484 | bfin_sport_spi_pump_messages(struct work_struct *work) | ||
485 | { | ||
486 | struct bfin_sport_spi_master_data *drv_data; | ||
487 | unsigned long flags; | ||
488 | struct spi_message *next_msg; | ||
489 | |||
490 | drv_data = container_of(work, struct bfin_sport_spi_master_data, pump_messages); | ||
491 | |||
492 | /* Lock queue and check for queue work */ | ||
493 | spin_lock_irqsave(&drv_data->lock, flags); | ||
494 | if (list_empty(&drv_data->queue) || !drv_data->run) { | ||
495 | /* pumper kicked off but no work to do */ | ||
496 | drv_data->busy = 0; | ||
497 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
498 | return; | ||
499 | } | ||
500 | |||
501 | /* Make sure we are not already running a message */ | ||
502 | if (drv_data->cur_msg) { | ||
503 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
504 | return; | ||
505 | } | ||
506 | |||
507 | /* Extract head of queue */ | ||
508 | next_msg = list_entry(drv_data->queue.next, | ||
509 | struct spi_message, queue); | ||
510 | |||
511 | drv_data->cur_msg = next_msg; | ||
512 | |||
513 | /* Setup the SSP using the per chip configuration */ | ||
514 | drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); | ||
515 | |||
516 | list_del_init(&drv_data->cur_msg->queue); | ||
517 | |||
518 | /* Initialize message state */ | ||
519 | drv_data->cur_msg->state = START_STATE; | ||
520 | drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, | ||
521 | struct spi_transfer, transfer_list); | ||
522 | bfin_sport_spi_restore_state(drv_data); | ||
523 | dev_dbg(drv_data->dev, "got a message to pump, " | ||
524 | "state is set to: baud %d, cs_gpio %i, ctl 0x%x\n", | ||
525 | drv_data->cur_chip->baud, drv_data->cur_chip->cs_gpio, | ||
526 | drv_data->cur_chip->ctl_reg); | ||
527 | |||
528 | dev_dbg(drv_data->dev, | ||
529 | "the first transfer len is %d\n", | ||
530 | drv_data->cur_transfer->len); | ||
531 | |||
532 | /* Mark as busy and launch transfers */ | ||
533 | tasklet_schedule(&drv_data->pump_transfers); | ||
534 | |||
535 | drv_data->busy = 1; | ||
536 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
537 | } | ||
538 | |||
539 | /* | ||
540 | * got a msg to transfer, queue it in drv_data->queue. | ||
541 | * And kick off message pumper | ||
542 | */ | ||
543 | static int | ||
544 | bfin_sport_spi_transfer(struct spi_device *spi, struct spi_message *msg) | ||
545 | { | ||
546 | struct bfin_sport_spi_master_data *drv_data = spi_master_get_devdata(spi->master); | ||
547 | unsigned long flags; | ||
548 | |||
549 | spin_lock_irqsave(&drv_data->lock, flags); | ||
550 | |||
551 | if (!drv_data->run) { | ||
552 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
553 | return -ESHUTDOWN; | ||
554 | } | ||
555 | |||
556 | msg->actual_length = 0; | ||
557 | msg->status = -EINPROGRESS; | ||
558 | msg->state = START_STATE; | ||
559 | |||
560 | dev_dbg(&spi->dev, "adding an msg in transfer()\n"); | ||
561 | list_add_tail(&msg->queue, &drv_data->queue); | ||
562 | |||
563 | if (drv_data->run && !drv_data->busy) | ||
564 | queue_work(drv_data->workqueue, &drv_data->pump_messages); | ||
565 | |||
566 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
567 | |||
568 | return 0; | ||
569 | } | ||
570 | |||
571 | /* Called every time common spi devices change state */ | ||
572 | static int | ||
573 | bfin_sport_spi_setup(struct spi_device *spi) | ||
574 | { | ||
575 | struct bfin_sport_spi_slave_data *chip, *first = NULL; | ||
576 | int ret; | ||
577 | |||
578 | /* Only alloc (or use chip_info) on first setup */ | ||
579 | chip = spi_get_ctldata(spi); | ||
580 | if (chip == NULL) { | ||
581 | struct bfin5xx_spi_chip *chip_info; | ||
582 | |||
583 | chip = first = kzalloc(sizeof(*chip), GFP_KERNEL); | ||
584 | if (!chip) | ||
585 | return -ENOMEM; | ||
586 | |||
587 | /* platform chip_info isn't required */ | ||
588 | chip_info = spi->controller_data; | ||
589 | if (chip_info) { | ||
590 | /* | ||
591 | * DITFS and TDTYPE are only thing we don't set, but | ||
592 | * they probably shouldn't be changed by people. | ||
593 | */ | ||
594 | if (chip_info->ctl_reg || chip_info->enable_dma) { | ||
595 | ret = -EINVAL; | ||
596 | dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields"); | ||
597 | goto error; | ||
598 | } | ||
599 | chip->cs_chg_udelay = chip_info->cs_chg_udelay; | ||
600 | chip->idle_tx_val = chip_info->idle_tx_val; | ||
601 | spi->bits_per_word = chip_info->bits_per_word; | ||
602 | } | ||
603 | } | ||
604 | |||
605 | if (spi->bits_per_word != 8 && spi->bits_per_word != 16) { | ||
606 | ret = -EINVAL; | ||
607 | goto error; | ||
608 | } | ||
609 | |||
610 | /* translate common spi framework into our register | ||
611 | * following configure contents are same for tx and rx. | ||
612 | */ | ||
613 | |||
614 | if (spi->mode & SPI_CPHA) | ||
615 | chip->ctl_reg &= ~TCKFE; | ||
616 | else | ||
617 | chip->ctl_reg |= TCKFE; | ||
618 | |||
619 | if (spi->mode & SPI_LSB_FIRST) | ||
620 | chip->ctl_reg |= TLSBIT; | ||
621 | else | ||
622 | chip->ctl_reg &= ~TLSBIT; | ||
623 | |||
624 | /* Sport in master mode */ | ||
625 | chip->ctl_reg |= ITCLK | ITFS | TFSR | LATFS | LTFS; | ||
626 | |||
627 | chip->baud = bfin_sport_hz_to_spi_baud(spi->max_speed_hz); | ||
628 | |||
629 | chip->cs_gpio = spi->chip_select; | ||
630 | ret = gpio_request(chip->cs_gpio, spi->modalias); | ||
631 | if (ret) | ||
632 | goto error; | ||
633 | |||
634 | dev_dbg(&spi->dev, "setup spi chip %s, width is %d\n", | ||
635 | spi->modalias, spi->bits_per_word); | ||
636 | dev_dbg(&spi->dev, "ctl_reg is 0x%x, GPIO is %i\n", | ||
637 | chip->ctl_reg, spi->chip_select); | ||
638 | |||
639 | spi_set_ctldata(spi, chip); | ||
640 | |||
641 | bfin_sport_spi_cs_deactive(chip); | ||
642 | |||
643 | return ret; | ||
644 | |||
645 | error: | ||
646 | kfree(first); | ||
647 | return ret; | ||
648 | } | ||
649 | |||
650 | /* | ||
651 | * callback for spi framework. | ||
652 | * clean driver specific data | ||
653 | */ | ||
654 | static void | ||
655 | bfin_sport_spi_cleanup(struct spi_device *spi) | ||
656 | { | ||
657 | struct bfin_sport_spi_slave_data *chip = spi_get_ctldata(spi); | ||
658 | |||
659 | if (!chip) | ||
660 | return; | ||
661 | |||
662 | gpio_free(chip->cs_gpio); | ||
663 | |||
664 | kfree(chip); | ||
665 | } | ||
666 | |||
667 | static int | ||
668 | bfin_sport_spi_init_queue(struct bfin_sport_spi_master_data *drv_data) | ||
669 | { | ||
670 | INIT_LIST_HEAD(&drv_data->queue); | ||
671 | spin_lock_init(&drv_data->lock); | ||
672 | |||
673 | drv_data->run = false; | ||
674 | drv_data->busy = 0; | ||
675 | |||
676 | /* init transfer tasklet */ | ||
677 | tasklet_init(&drv_data->pump_transfers, | ||
678 | bfin_sport_spi_pump_transfers, (unsigned long)drv_data); | ||
679 | |||
680 | /* init messages workqueue */ | ||
681 | INIT_WORK(&drv_data->pump_messages, bfin_sport_spi_pump_messages); | ||
682 | drv_data->workqueue = | ||
683 | create_singlethread_workqueue(dev_name(drv_data->master->dev.parent)); | ||
684 | if (drv_data->workqueue == NULL) | ||
685 | return -EBUSY; | ||
686 | |||
687 | return 0; | ||
688 | } | ||
689 | |||
690 | static int | ||
691 | bfin_sport_spi_start_queue(struct bfin_sport_spi_master_data *drv_data) | ||
692 | { | ||
693 | unsigned long flags; | ||
694 | |||
695 | spin_lock_irqsave(&drv_data->lock, flags); | ||
696 | |||
697 | if (drv_data->run || drv_data->busy) { | ||
698 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
699 | return -EBUSY; | ||
700 | } | ||
701 | |||
702 | drv_data->run = true; | ||
703 | drv_data->cur_msg = NULL; | ||
704 | drv_data->cur_transfer = NULL; | ||
705 | drv_data->cur_chip = NULL; | ||
706 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
707 | |||
708 | queue_work(drv_data->workqueue, &drv_data->pump_messages); | ||
709 | |||
710 | return 0; | ||
711 | } | ||
712 | |||
713 | static inline int | ||
714 | bfin_sport_spi_stop_queue(struct bfin_sport_spi_master_data *drv_data) | ||
715 | { | ||
716 | unsigned long flags; | ||
717 | unsigned limit = 500; | ||
718 | int status = 0; | ||
719 | |||
720 | spin_lock_irqsave(&drv_data->lock, flags); | ||
721 | |||
722 | /* | ||
723 | * This is a bit lame, but is optimized for the common execution path. | ||
724 | * A wait_queue on the drv_data->busy could be used, but then the common | ||
725 | * execution path (pump_messages) would be required to call wake_up or | ||
726 | * friends on every SPI message. Do this instead | ||
727 | */ | ||
728 | drv_data->run = false; | ||
729 | while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { | ||
730 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
731 | msleep(10); | ||
732 | spin_lock_irqsave(&drv_data->lock, flags); | ||
733 | } | ||
734 | |||
735 | if (!list_empty(&drv_data->queue) || drv_data->busy) | ||
736 | status = -EBUSY; | ||
737 | |||
738 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
739 | |||
740 | return status; | ||
741 | } | ||
742 | |||
743 | static inline int | ||
744 | bfin_sport_spi_destroy_queue(struct bfin_sport_spi_master_data *drv_data) | ||
745 | { | ||
746 | int status; | ||
747 | |||
748 | status = bfin_sport_spi_stop_queue(drv_data); | ||
749 | if (status) | ||
750 | return status; | ||
751 | |||
752 | destroy_workqueue(drv_data->workqueue); | ||
753 | |||
754 | return 0; | ||
755 | } | ||
756 | |||
757 | static int __devinit | ||
758 | bfin_sport_spi_probe(struct platform_device *pdev) | ||
759 | { | ||
760 | struct device *dev = &pdev->dev; | ||
761 | struct bfin5xx_spi_master *platform_info; | ||
762 | struct spi_master *master; | ||
763 | struct resource *res, *ires; | ||
764 | struct bfin_sport_spi_master_data *drv_data; | ||
765 | int status; | ||
766 | |||
767 | platform_info = dev->platform_data; | ||
768 | |||
769 | /* Allocate master with space for drv_data */ | ||
770 | master = spi_alloc_master(dev, sizeof(*master) + 16); | ||
771 | if (!master) { | ||
772 | dev_err(dev, "cannot alloc spi_master\n"); | ||
773 | return -ENOMEM; | ||
774 | } | ||
775 | |||
776 | drv_data = spi_master_get_devdata(master); | ||
777 | drv_data->master = master; | ||
778 | drv_data->dev = dev; | ||
779 | drv_data->pin_req = platform_info->pin_req; | ||
780 | |||
781 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; | ||
782 | master->bus_num = pdev->id; | ||
783 | master->num_chipselect = platform_info->num_chipselect; | ||
784 | master->cleanup = bfin_sport_spi_cleanup; | ||
785 | master->setup = bfin_sport_spi_setup; | ||
786 | master->transfer = bfin_sport_spi_transfer; | ||
787 | |||
788 | /* Find and map our resources */ | ||
789 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
790 | if (res == NULL) { | ||
791 | dev_err(dev, "cannot get IORESOURCE_MEM\n"); | ||
792 | status = -ENOENT; | ||
793 | goto out_error_get_res; | ||
794 | } | ||
795 | |||
796 | drv_data->regs = ioremap(res->start, resource_size(res)); | ||
797 | if (drv_data->regs == NULL) { | ||
798 | dev_err(dev, "cannot map registers\n"); | ||
799 | status = -ENXIO; | ||
800 | goto out_error_ioremap; | ||
801 | } | ||
802 | |||
803 | ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
804 | if (!ires) { | ||
805 | dev_err(dev, "cannot get IORESOURCE_IRQ\n"); | ||
806 | status = -ENODEV; | ||
807 | goto out_error_get_ires; | ||
808 | } | ||
809 | drv_data->err_irq = ires->start; | ||
810 | |||
811 | /* Initial and start queue */ | ||
812 | status = bfin_sport_spi_init_queue(drv_data); | ||
813 | if (status) { | ||
814 | dev_err(dev, "problem initializing queue\n"); | ||
815 | goto out_error_queue_alloc; | ||
816 | } | ||
817 | |||
818 | status = bfin_sport_spi_start_queue(drv_data); | ||
819 | if (status) { | ||
820 | dev_err(dev, "problem starting queue\n"); | ||
821 | goto out_error_queue_alloc; | ||
822 | } | ||
823 | |||
824 | status = request_irq(drv_data->err_irq, sport_err_handler, | ||
825 | 0, "sport_spi_err", drv_data); | ||
826 | if (status) { | ||
827 | dev_err(dev, "unable to request sport err irq\n"); | ||
828 | goto out_error_irq; | ||
829 | } | ||
830 | |||
831 | status = peripheral_request_list(drv_data->pin_req, DRV_NAME); | ||
832 | if (status) { | ||
833 | dev_err(dev, "requesting peripherals failed\n"); | ||
834 | goto out_error_peripheral; | ||
835 | } | ||
836 | |||
837 | /* Register with the SPI framework */ | ||
838 | platform_set_drvdata(pdev, drv_data); | ||
839 | status = spi_register_master(master); | ||
840 | if (status) { | ||
841 | dev_err(dev, "problem registering spi master\n"); | ||
842 | goto out_error_master; | ||
843 | } | ||
844 | |||
845 | dev_info(dev, "%s, regs_base@%p\n", DRV_DESC, drv_data->regs); | ||
846 | return 0; | ||
847 | |||
848 | out_error_master: | ||
849 | peripheral_free_list(drv_data->pin_req); | ||
850 | out_error_peripheral: | ||
851 | free_irq(drv_data->err_irq, drv_data); | ||
852 | out_error_irq: | ||
853 | out_error_queue_alloc: | ||
854 | bfin_sport_spi_destroy_queue(drv_data); | ||
855 | out_error_get_ires: | ||
856 | iounmap(drv_data->regs); | ||
857 | out_error_ioremap: | ||
858 | out_error_get_res: | ||
859 | spi_master_put(master); | ||
860 | |||
861 | return status; | ||
862 | } | ||
863 | |||
864 | /* stop hardware and remove the driver */ | ||
865 | static int __devexit | ||
866 | bfin_sport_spi_remove(struct platform_device *pdev) | ||
867 | { | ||
868 | struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev); | ||
869 | int status = 0; | ||
870 | |||
871 | if (!drv_data) | ||
872 | return 0; | ||
873 | |||
874 | /* Remove the queue */ | ||
875 | status = bfin_sport_spi_destroy_queue(drv_data); | ||
876 | if (status) | ||
877 | return status; | ||
878 | |||
879 | /* Disable the SSP at the peripheral and SOC level */ | ||
880 | bfin_sport_spi_disable(drv_data); | ||
881 | |||
882 | /* Disconnect from the SPI framework */ | ||
883 | spi_unregister_master(drv_data->master); | ||
884 | |||
885 | peripheral_free_list(drv_data->pin_req); | ||
886 | |||
887 | /* Prevent double remove */ | ||
888 | platform_set_drvdata(pdev, NULL); | ||
889 | |||
890 | return 0; | ||
891 | } | ||
892 | |||
893 | #ifdef CONFIG_PM | ||
894 | static int | ||
895 | bfin_sport_spi_suspend(struct platform_device *pdev, pm_message_t state) | ||
896 | { | ||
897 | struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev); | ||
898 | int status; | ||
899 | |||
900 | status = bfin_sport_spi_stop_queue(drv_data); | ||
901 | if (status) | ||
902 | return status; | ||
903 | |||
904 | /* stop hardware */ | ||
905 | bfin_sport_spi_disable(drv_data); | ||
906 | |||
907 | return status; | ||
908 | } | ||
909 | |||
910 | static int | ||
911 | bfin_sport_spi_resume(struct platform_device *pdev) | ||
912 | { | ||
913 | struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev); | ||
914 | int status; | ||
915 | |||
916 | /* Enable the SPI interface */ | ||
917 | bfin_sport_spi_enable(drv_data); | ||
918 | |||
919 | /* Start the queue running */ | ||
920 | status = bfin_sport_spi_start_queue(drv_data); | ||
921 | if (status) | ||
922 | dev_err(drv_data->dev, "problem resuming queue\n"); | ||
923 | |||
924 | return status; | ||
925 | } | ||
926 | #else | ||
927 | # define bfin_sport_spi_suspend NULL | ||
928 | # define bfin_sport_spi_resume NULL | ||
929 | #endif | ||
930 | |||
931 | static struct platform_driver bfin_sport_spi_driver = { | ||
932 | .driver = { | ||
933 | .name = DRV_NAME, | ||
934 | .owner = THIS_MODULE, | ||
935 | }, | ||
936 | .probe = bfin_sport_spi_probe, | ||
937 | .remove = __devexit_p(bfin_sport_spi_remove), | ||
938 | .suspend = bfin_sport_spi_suspend, | ||
939 | .resume = bfin_sport_spi_resume, | ||
940 | }; | ||
941 | |||
942 | static int __init bfin_sport_spi_init(void) | ||
943 | { | ||
944 | return platform_driver_register(&bfin_sport_spi_driver); | ||
945 | } | ||
946 | module_init(bfin_sport_spi_init); | ||
947 | |||
948 | static void __exit bfin_sport_spi_exit(void) | ||
949 | { | ||
950 | platform_driver_unregister(&bfin_sport_spi_driver); | ||
951 | } | ||
952 | module_exit(bfin_sport_spi_exit); | ||
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c index 8b55724d5f39..14a63f6010d1 100644 --- a/drivers/spi/spi_bitbang.c +++ b/drivers/spi/spi_bitbang.c | |||
@@ -259,10 +259,6 @@ static void bitbang_work(struct work_struct *work) | |||
259 | struct spi_bitbang *bitbang = | 259 | struct spi_bitbang *bitbang = |
260 | container_of(work, struct spi_bitbang, work); | 260 | container_of(work, struct spi_bitbang, work); |
261 | unsigned long flags; | 261 | unsigned long flags; |
262 | int (*setup_transfer)(struct spi_device *, | ||
263 | struct spi_transfer *); | ||
264 | |||
265 | setup_transfer = bitbang->setup_transfer; | ||
266 | 262 | ||
267 | spin_lock_irqsave(&bitbang->lock, flags); | 263 | spin_lock_irqsave(&bitbang->lock, flags); |
268 | bitbang->busy = 1; | 264 | bitbang->busy = 1; |
@@ -300,11 +296,7 @@ static void bitbang_work(struct work_struct *work) | |||
300 | 296 | ||
301 | /* init (-1) or override (1) transfer params */ | 297 | /* init (-1) or override (1) transfer params */ |
302 | if (do_setup != 0) { | 298 | if (do_setup != 0) { |
303 | if (!setup_transfer) { | 299 | status = bitbang->setup_transfer(spi, t); |
304 | status = -ENOPROTOOPT; | ||
305 | break; | ||
306 | } | ||
307 | status = setup_transfer(spi, t); | ||
308 | if (status < 0) | 300 | if (status < 0) |
309 | break; | 301 | break; |
310 | if (do_setup == -1) | 302 | if (do_setup == -1) |
@@ -465,6 +457,9 @@ int spi_bitbang_start(struct spi_bitbang *bitbang) | |||
465 | } | 457 | } |
466 | } else if (!bitbang->master->setup) | 458 | } else if (!bitbang->master->setup) |
467 | return -EINVAL; | 459 | return -EINVAL; |
460 | if (bitbang->master->transfer == spi_bitbang_transfer && | ||
461 | !bitbang->setup_transfer) | ||
462 | return -EINVAL; | ||
468 | 463 | ||
469 | /* this task is the only thing to touch the SPI bits */ | 464 | /* this task is the only thing to touch the SPI bits */ |
470 | bitbang->busy = 0; | 465 | bitbang->busy = 0; |
diff --git a/drivers/spi/spi_fsl_espi.c b/drivers/spi/spi_fsl_espi.c new file mode 100644 index 000000000000..496f895a0024 --- /dev/null +++ b/drivers/spi/spi_fsl_espi.c | |||
@@ -0,0 +1,762 @@ | |||
1 | /* | ||
2 | * Freescale eSPI controller driver. | ||
3 | * | ||
4 | * Copyright 2010 Freescale Semiconductor, Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | */ | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/irq.h> | ||
14 | #include <linux/spi/spi.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | #include <linux/fsl_devices.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <linux/of.h> | ||
19 | #include <linux/of_platform.h> | ||
20 | #include <linux/of_spi.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/err.h> | ||
23 | #include <sysdev/fsl_soc.h> | ||
24 | |||
25 | #include "spi_fsl_lib.h" | ||
26 | |||
27 | /* eSPI Controller registers */ | ||
28 | struct fsl_espi_reg { | ||
29 | __be32 mode; /* 0x000 - eSPI mode register */ | ||
30 | __be32 event; /* 0x004 - eSPI event register */ | ||
31 | __be32 mask; /* 0x008 - eSPI mask register */ | ||
32 | __be32 command; /* 0x00c - eSPI command register */ | ||
33 | __be32 transmit; /* 0x010 - eSPI transmit FIFO access register*/ | ||
34 | __be32 receive; /* 0x014 - eSPI receive FIFO access register*/ | ||
35 | u8 res[8]; /* 0x018 - 0x01c reserved */ | ||
36 | __be32 csmode[4]; /* 0x020 - 0x02c eSPI cs mode register */ | ||
37 | }; | ||
38 | |||
39 | struct fsl_espi_transfer { | ||
40 | const void *tx_buf; | ||
41 | void *rx_buf; | ||
42 | unsigned len; | ||
43 | unsigned n_tx; | ||
44 | unsigned n_rx; | ||
45 | unsigned actual_length; | ||
46 | int status; | ||
47 | }; | ||
48 | |||
49 | /* eSPI Controller mode register definitions */ | ||
50 | #define SPMODE_ENABLE (1 << 31) | ||
51 | #define SPMODE_LOOP (1 << 30) | ||
52 | #define SPMODE_TXTHR(x) ((x) << 8) | ||
53 | #define SPMODE_RXTHR(x) ((x) << 0) | ||
54 | |||
55 | /* eSPI Controller CS mode register definitions */ | ||
56 | #define CSMODE_CI_INACTIVEHIGH (1 << 31) | ||
57 | #define CSMODE_CP_BEGIN_EDGECLK (1 << 30) | ||
58 | #define CSMODE_REV (1 << 29) | ||
59 | #define CSMODE_DIV16 (1 << 28) | ||
60 | #define CSMODE_PM(x) ((x) << 24) | ||
61 | #define CSMODE_POL_1 (1 << 20) | ||
62 | #define CSMODE_LEN(x) ((x) << 16) | ||
63 | #define CSMODE_BEF(x) ((x) << 12) | ||
64 | #define CSMODE_AFT(x) ((x) << 8) | ||
65 | #define CSMODE_CG(x) ((x) << 3) | ||
66 | |||
67 | /* Default mode/csmode for eSPI controller */ | ||
68 | #define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(3)) | ||
69 | #define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \ | ||
70 | | CSMODE_AFT(0) | CSMODE_CG(1)) | ||
71 | |||
72 | /* SPIE register values */ | ||
73 | #define SPIE_NE 0x00000200 /* Not empty */ | ||
74 | #define SPIE_NF 0x00000100 /* Not full */ | ||
75 | |||
76 | /* SPIM register values */ | ||
77 | #define SPIM_NE 0x00000200 /* Not empty */ | ||
78 | #define SPIM_NF 0x00000100 /* Not full */ | ||
79 | #define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F) | ||
80 | #define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F) | ||
81 | |||
82 | /* SPCOM register values */ | ||
83 | #define SPCOM_CS(x) ((x) << 30) | ||
84 | #define SPCOM_TRANLEN(x) ((x) << 0) | ||
85 | #define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */ | ||
86 | |||
87 | static void fsl_espi_change_mode(struct spi_device *spi) | ||
88 | { | ||
89 | struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); | ||
90 | struct spi_mpc8xxx_cs *cs = spi->controller_state; | ||
91 | struct fsl_espi_reg *reg_base = mspi->reg_base; | ||
92 | __be32 __iomem *mode = ®_base->csmode[spi->chip_select]; | ||
93 | __be32 __iomem *espi_mode = ®_base->mode; | ||
94 | u32 tmp; | ||
95 | unsigned long flags; | ||
96 | |||
97 | /* Turn off IRQs locally to minimize time that SPI is disabled. */ | ||
98 | local_irq_save(flags); | ||
99 | |||
100 | /* Turn off SPI unit prior changing mode */ | ||
101 | tmp = mpc8xxx_spi_read_reg(espi_mode); | ||
102 | mpc8xxx_spi_write_reg(espi_mode, tmp & ~SPMODE_ENABLE); | ||
103 | mpc8xxx_spi_write_reg(mode, cs->hw_mode); | ||
104 | mpc8xxx_spi_write_reg(espi_mode, tmp); | ||
105 | |||
106 | local_irq_restore(flags); | ||
107 | } | ||
108 | |||
109 | static u32 fsl_espi_tx_buf_lsb(struct mpc8xxx_spi *mpc8xxx_spi) | ||
110 | { | ||
111 | u32 data; | ||
112 | u16 data_h; | ||
113 | u16 data_l; | ||
114 | const u32 *tx = mpc8xxx_spi->tx; | ||
115 | |||
116 | if (!tx) | ||
117 | return 0; | ||
118 | |||
119 | data = *tx++ << mpc8xxx_spi->tx_shift; | ||
120 | data_l = data & 0xffff; | ||
121 | data_h = (data >> 16) & 0xffff; | ||
122 | swab16s(&data_l); | ||
123 | swab16s(&data_h); | ||
124 | data = data_h | data_l; | ||
125 | |||
126 | mpc8xxx_spi->tx = tx; | ||
127 | return data; | ||
128 | } | ||
129 | |||
130 | static int fsl_espi_setup_transfer(struct spi_device *spi, | ||
131 | struct spi_transfer *t) | ||
132 | { | ||
133 | struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); | ||
134 | int bits_per_word = 0; | ||
135 | u8 pm; | ||
136 | u32 hz = 0; | ||
137 | struct spi_mpc8xxx_cs *cs = spi->controller_state; | ||
138 | |||
139 | if (t) { | ||
140 | bits_per_word = t->bits_per_word; | ||
141 | hz = t->speed_hz; | ||
142 | } | ||
143 | |||
144 | /* spi_transfer level calls that work per-word */ | ||
145 | if (!bits_per_word) | ||
146 | bits_per_word = spi->bits_per_word; | ||
147 | |||
148 | /* Make sure its a bit width we support [4..16] */ | ||
149 | if ((bits_per_word < 4) || (bits_per_word > 16)) | ||
150 | return -EINVAL; | ||
151 | |||
152 | if (!hz) | ||
153 | hz = spi->max_speed_hz; | ||
154 | |||
155 | cs->rx_shift = 0; | ||
156 | cs->tx_shift = 0; | ||
157 | cs->get_rx = mpc8xxx_spi_rx_buf_u32; | ||
158 | cs->get_tx = mpc8xxx_spi_tx_buf_u32; | ||
159 | if (bits_per_word <= 8) { | ||
160 | cs->rx_shift = 8 - bits_per_word; | ||
161 | } else if (bits_per_word <= 16) { | ||
162 | cs->rx_shift = 16 - bits_per_word; | ||
163 | if (spi->mode & SPI_LSB_FIRST) | ||
164 | cs->get_tx = fsl_espi_tx_buf_lsb; | ||
165 | } else { | ||
166 | return -EINVAL; | ||
167 | } | ||
168 | |||
169 | mpc8xxx_spi->rx_shift = cs->rx_shift; | ||
170 | mpc8xxx_spi->tx_shift = cs->tx_shift; | ||
171 | mpc8xxx_spi->get_rx = cs->get_rx; | ||
172 | mpc8xxx_spi->get_tx = cs->get_tx; | ||
173 | |||
174 | bits_per_word = bits_per_word - 1; | ||
175 | |||
176 | /* mask out bits we are going to set */ | ||
177 | cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF)); | ||
178 | |||
179 | cs->hw_mode |= CSMODE_LEN(bits_per_word); | ||
180 | |||
181 | if ((mpc8xxx_spi->spibrg / hz) > 64) { | ||
182 | cs->hw_mode |= CSMODE_DIV16; | ||
183 | pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1; | ||
184 | |||
185 | WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " | ||
186 | "Will use %d Hz instead.\n", dev_name(&spi->dev), | ||
187 | hz, mpc8xxx_spi->spibrg / 1024); | ||
188 | if (pm > 16) | ||
189 | pm = 16; | ||
190 | } else { | ||
191 | pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; | ||
192 | } | ||
193 | if (pm) | ||
194 | pm--; | ||
195 | |||
196 | cs->hw_mode |= CSMODE_PM(pm); | ||
197 | |||
198 | fsl_espi_change_mode(spi); | ||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | static int fsl_espi_cpu_bufs(struct mpc8xxx_spi *mspi, struct spi_transfer *t, | ||
203 | unsigned int len) | ||
204 | { | ||
205 | u32 word; | ||
206 | struct fsl_espi_reg *reg_base = mspi->reg_base; | ||
207 | |||
208 | mspi->count = len; | ||
209 | |||
210 | /* enable rx ints */ | ||
211 | mpc8xxx_spi_write_reg(®_base->mask, SPIM_NE); | ||
212 | |||
213 | /* transmit word */ | ||
214 | word = mspi->get_tx(mspi); | ||
215 | mpc8xxx_spi_write_reg(®_base->transmit, word); | ||
216 | |||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t) | ||
221 | { | ||
222 | struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); | ||
223 | struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base; | ||
224 | unsigned int len = t->len; | ||
225 | u8 bits_per_word; | ||
226 | int ret; | ||
227 | |||
228 | bits_per_word = spi->bits_per_word; | ||
229 | if (t->bits_per_word) | ||
230 | bits_per_word = t->bits_per_word; | ||
231 | |||
232 | mpc8xxx_spi->len = t->len; | ||
233 | len = roundup(len, 4) / 4; | ||
234 | |||
235 | mpc8xxx_spi->tx = t->tx_buf; | ||
236 | mpc8xxx_spi->rx = t->rx_buf; | ||
237 | |||
238 | INIT_COMPLETION(mpc8xxx_spi->done); | ||
239 | |||
240 | /* Set SPCOM[CS] and SPCOM[TRANLEN] field */ | ||
241 | if ((t->len - 1) > SPCOM_TRANLEN_MAX) { | ||
242 | dev_err(mpc8xxx_spi->dev, "Transaction length (%d)" | ||
243 | " beyond the SPCOM[TRANLEN] field\n", t->len); | ||
244 | return -EINVAL; | ||
245 | } | ||
246 | mpc8xxx_spi_write_reg(®_base->command, | ||
247 | (SPCOM_CS(spi->chip_select) | SPCOM_TRANLEN(t->len - 1))); | ||
248 | |||
249 | ret = fsl_espi_cpu_bufs(mpc8xxx_spi, t, len); | ||
250 | if (ret) | ||
251 | return ret; | ||
252 | |||
253 | wait_for_completion(&mpc8xxx_spi->done); | ||
254 | |||
255 | /* disable rx ints */ | ||
256 | mpc8xxx_spi_write_reg(®_base->mask, 0); | ||
257 | |||
258 | return mpc8xxx_spi->count; | ||
259 | } | ||
260 | |||
261 | static inline void fsl_espi_addr2cmd(unsigned int addr, u8 *cmd) | ||
262 | { | ||
263 | if (cmd) { | ||
264 | cmd[1] = (u8)(addr >> 16); | ||
265 | cmd[2] = (u8)(addr >> 8); | ||
266 | cmd[3] = (u8)(addr >> 0); | ||
267 | } | ||
268 | } | ||
269 | |||
270 | static inline unsigned int fsl_espi_cmd2addr(u8 *cmd) | ||
271 | { | ||
272 | if (cmd) | ||
273 | return cmd[1] << 16 | cmd[2] << 8 | cmd[3] << 0; | ||
274 | |||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | static void fsl_espi_do_trans(struct spi_message *m, | ||
279 | struct fsl_espi_transfer *tr) | ||
280 | { | ||
281 | struct spi_device *spi = m->spi; | ||
282 | struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); | ||
283 | struct fsl_espi_transfer *espi_trans = tr; | ||
284 | struct spi_message message; | ||
285 | struct spi_transfer *t, *first, trans; | ||
286 | int status = 0; | ||
287 | |||
288 | spi_message_init(&message); | ||
289 | memset(&trans, 0, sizeof(trans)); | ||
290 | |||
291 | first = list_first_entry(&m->transfers, struct spi_transfer, | ||
292 | transfer_list); | ||
293 | list_for_each_entry(t, &m->transfers, transfer_list) { | ||
294 | if ((first->bits_per_word != t->bits_per_word) || | ||
295 | (first->speed_hz != t->speed_hz)) { | ||
296 | espi_trans->status = -EINVAL; | ||
297 | dev_err(mspi->dev, "bits_per_word/speed_hz should be" | ||
298 | " same for the same SPI transfer\n"); | ||
299 | return; | ||
300 | } | ||
301 | |||
302 | trans.speed_hz = t->speed_hz; | ||
303 | trans.bits_per_word = t->bits_per_word; | ||
304 | trans.delay_usecs = max(first->delay_usecs, t->delay_usecs); | ||
305 | } | ||
306 | |||
307 | trans.len = espi_trans->len; | ||
308 | trans.tx_buf = espi_trans->tx_buf; | ||
309 | trans.rx_buf = espi_trans->rx_buf; | ||
310 | spi_message_add_tail(&trans, &message); | ||
311 | |||
312 | list_for_each_entry(t, &message.transfers, transfer_list) { | ||
313 | if (t->bits_per_word || t->speed_hz) { | ||
314 | status = -EINVAL; | ||
315 | |||
316 | status = fsl_espi_setup_transfer(spi, t); | ||
317 | if (status < 0) | ||
318 | break; | ||
319 | } | ||
320 | |||
321 | if (t->len) | ||
322 | status = fsl_espi_bufs(spi, t); | ||
323 | |||
324 | if (status) { | ||
325 | status = -EMSGSIZE; | ||
326 | break; | ||
327 | } | ||
328 | |||
329 | if (t->delay_usecs) | ||
330 | udelay(t->delay_usecs); | ||
331 | } | ||
332 | |||
333 | espi_trans->status = status; | ||
334 | fsl_espi_setup_transfer(spi, NULL); | ||
335 | } | ||
336 | |||
337 | static void fsl_espi_cmd_trans(struct spi_message *m, | ||
338 | struct fsl_espi_transfer *trans, u8 *rx_buff) | ||
339 | { | ||
340 | struct spi_transfer *t; | ||
341 | u8 *local_buf; | ||
342 | int i = 0; | ||
343 | struct fsl_espi_transfer *espi_trans = trans; | ||
344 | |||
345 | local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); | ||
346 | if (!local_buf) { | ||
347 | espi_trans->status = -ENOMEM; | ||
348 | return; | ||
349 | } | ||
350 | |||
351 | list_for_each_entry(t, &m->transfers, transfer_list) { | ||
352 | if (t->tx_buf) { | ||
353 | memcpy(local_buf + i, t->tx_buf, t->len); | ||
354 | i += t->len; | ||
355 | } | ||
356 | } | ||
357 | |||
358 | espi_trans->tx_buf = local_buf; | ||
359 | espi_trans->rx_buf = local_buf + espi_trans->n_tx; | ||
360 | fsl_espi_do_trans(m, espi_trans); | ||
361 | |||
362 | espi_trans->actual_length = espi_trans->len; | ||
363 | kfree(local_buf); | ||
364 | } | ||
365 | |||
366 | static void fsl_espi_rw_trans(struct spi_message *m, | ||
367 | struct fsl_espi_transfer *trans, u8 *rx_buff) | ||
368 | { | ||
369 | struct fsl_espi_transfer *espi_trans = trans; | ||
370 | unsigned int n_tx = espi_trans->n_tx; | ||
371 | unsigned int n_rx = espi_trans->n_rx; | ||
372 | struct spi_transfer *t; | ||
373 | u8 *local_buf; | ||
374 | u8 *rx_buf = rx_buff; | ||
375 | unsigned int trans_len; | ||
376 | unsigned int addr; | ||
377 | int i, pos, loop; | ||
378 | |||
379 | local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); | ||
380 | if (!local_buf) { | ||
381 | espi_trans->status = -ENOMEM; | ||
382 | return; | ||
383 | } | ||
384 | |||
385 | for (pos = 0, loop = 0; pos < n_rx; pos += trans_len, loop++) { | ||
386 | trans_len = n_rx - pos; | ||
387 | if (trans_len > SPCOM_TRANLEN_MAX - n_tx) | ||
388 | trans_len = SPCOM_TRANLEN_MAX - n_tx; | ||
389 | |||
390 | i = 0; | ||
391 | list_for_each_entry(t, &m->transfers, transfer_list) { | ||
392 | if (t->tx_buf) { | ||
393 | memcpy(local_buf + i, t->tx_buf, t->len); | ||
394 | i += t->len; | ||
395 | } | ||
396 | } | ||
397 | |||
398 | if (pos > 0) { | ||
399 | addr = fsl_espi_cmd2addr(local_buf); | ||
400 | addr += pos; | ||
401 | fsl_espi_addr2cmd(addr, local_buf); | ||
402 | } | ||
403 | |||
404 | espi_trans->n_tx = n_tx; | ||
405 | espi_trans->n_rx = trans_len; | ||
406 | espi_trans->len = trans_len + n_tx; | ||
407 | espi_trans->tx_buf = local_buf; | ||
408 | espi_trans->rx_buf = local_buf + n_tx; | ||
409 | fsl_espi_do_trans(m, espi_trans); | ||
410 | |||
411 | memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len); | ||
412 | |||
413 | if (loop > 0) | ||
414 | espi_trans->actual_length += espi_trans->len - n_tx; | ||
415 | else | ||
416 | espi_trans->actual_length += espi_trans->len; | ||
417 | } | ||
418 | |||
419 | kfree(local_buf); | ||
420 | } | ||
421 | |||
422 | static void fsl_espi_do_one_msg(struct spi_message *m) | ||
423 | { | ||
424 | struct spi_transfer *t; | ||
425 | u8 *rx_buf = NULL; | ||
426 | unsigned int n_tx = 0; | ||
427 | unsigned int n_rx = 0; | ||
428 | struct fsl_espi_transfer espi_trans; | ||
429 | |||
430 | list_for_each_entry(t, &m->transfers, transfer_list) { | ||
431 | if (t->tx_buf) | ||
432 | n_tx += t->len; | ||
433 | if (t->rx_buf) { | ||
434 | n_rx += t->len; | ||
435 | rx_buf = t->rx_buf; | ||
436 | } | ||
437 | } | ||
438 | |||
439 | espi_trans.n_tx = n_tx; | ||
440 | espi_trans.n_rx = n_rx; | ||
441 | espi_trans.len = n_tx + n_rx; | ||
442 | espi_trans.actual_length = 0; | ||
443 | espi_trans.status = 0; | ||
444 | |||
445 | if (!rx_buf) | ||
446 | fsl_espi_cmd_trans(m, &espi_trans, NULL); | ||
447 | else | ||
448 | fsl_espi_rw_trans(m, &espi_trans, rx_buf); | ||
449 | |||
450 | m->actual_length = espi_trans.actual_length; | ||
451 | m->status = espi_trans.status; | ||
452 | m->complete(m->context); | ||
453 | } | ||
454 | |||
455 | static int fsl_espi_setup(struct spi_device *spi) | ||
456 | { | ||
457 | struct mpc8xxx_spi *mpc8xxx_spi; | ||
458 | struct fsl_espi_reg *reg_base; | ||
459 | int retval; | ||
460 | u32 hw_mode; | ||
461 | u32 loop_mode; | ||
462 | struct spi_mpc8xxx_cs *cs = spi->controller_state; | ||
463 | |||
464 | if (!spi->max_speed_hz) | ||
465 | return -EINVAL; | ||
466 | |||
467 | if (!cs) { | ||
468 | cs = kzalloc(sizeof *cs, GFP_KERNEL); | ||
469 | if (!cs) | ||
470 | return -ENOMEM; | ||
471 | spi->controller_state = cs; | ||
472 | } | ||
473 | |||
474 | mpc8xxx_spi = spi_master_get_devdata(spi->master); | ||
475 | reg_base = mpc8xxx_spi->reg_base; | ||
476 | |||
477 | hw_mode = cs->hw_mode; /* Save original settings */ | ||
478 | cs->hw_mode = mpc8xxx_spi_read_reg( | ||
479 | ®_base->csmode[spi->chip_select]); | ||
480 | /* mask out bits we are going to set */ | ||
481 | cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH | ||
482 | | CSMODE_REV); | ||
483 | |||
484 | if (spi->mode & SPI_CPHA) | ||
485 | cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK; | ||
486 | if (spi->mode & SPI_CPOL) | ||
487 | cs->hw_mode |= CSMODE_CI_INACTIVEHIGH; | ||
488 | if (!(spi->mode & SPI_LSB_FIRST)) | ||
489 | cs->hw_mode |= CSMODE_REV; | ||
490 | |||
491 | /* Handle the loop mode */ | ||
492 | loop_mode = mpc8xxx_spi_read_reg(®_base->mode); | ||
493 | loop_mode &= ~SPMODE_LOOP; | ||
494 | if (spi->mode & SPI_LOOP) | ||
495 | loop_mode |= SPMODE_LOOP; | ||
496 | mpc8xxx_spi_write_reg(®_base->mode, loop_mode); | ||
497 | |||
498 | retval = fsl_espi_setup_transfer(spi, NULL); | ||
499 | if (retval < 0) { | ||
500 | cs->hw_mode = hw_mode; /* Restore settings */ | ||
501 | return retval; | ||
502 | } | ||
503 | return 0; | ||
504 | } | ||
505 | |||
506 | void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) | ||
507 | { | ||
508 | struct fsl_espi_reg *reg_base = mspi->reg_base; | ||
509 | |||
510 | /* We need handle RX first */ | ||
511 | if (events & SPIE_NE) { | ||
512 | u32 rx_data, tmp; | ||
513 | u8 rx_data_8; | ||
514 | |||
515 | /* Spin until RX is done */ | ||
516 | while (SPIE_RXCNT(events) < min(4, mspi->len)) { | ||
517 | cpu_relax(); | ||
518 | events = mpc8xxx_spi_read_reg(®_base->event); | ||
519 | } | ||
520 | |||
521 | if (mspi->len >= 4) { | ||
522 | rx_data = mpc8xxx_spi_read_reg(®_base->receive); | ||
523 | } else { | ||
524 | tmp = mspi->len; | ||
525 | rx_data = 0; | ||
526 | while (tmp--) { | ||
527 | rx_data_8 = in_8((u8 *)®_base->receive); | ||
528 | rx_data |= (rx_data_8 << (tmp * 8)); | ||
529 | } | ||
530 | |||
531 | rx_data <<= (4 - mspi->len) * 8; | ||
532 | } | ||
533 | |||
534 | mspi->len -= 4; | ||
535 | |||
536 | if (mspi->rx) | ||
537 | mspi->get_rx(rx_data, mspi); | ||
538 | } | ||
539 | |||
540 | if (!(events & SPIE_NF)) { | ||
541 | int ret; | ||
542 | |||
543 | /* spin until TX is done */ | ||
544 | ret = spin_event_timeout(((events = mpc8xxx_spi_read_reg( | ||
545 | ®_base->event)) & SPIE_NF) == 0, 1000, 0); | ||
546 | if (!ret) { | ||
547 | dev_err(mspi->dev, "tired waiting for SPIE_NF\n"); | ||
548 | return; | ||
549 | } | ||
550 | } | ||
551 | |||
552 | /* Clear the events */ | ||
553 | mpc8xxx_spi_write_reg(®_base->event, events); | ||
554 | |||
555 | mspi->count -= 1; | ||
556 | if (mspi->count) { | ||
557 | u32 word = mspi->get_tx(mspi); | ||
558 | |||
559 | mpc8xxx_spi_write_reg(®_base->transmit, word); | ||
560 | } else { | ||
561 | complete(&mspi->done); | ||
562 | } | ||
563 | } | ||
564 | |||
565 | static irqreturn_t fsl_espi_irq(s32 irq, void *context_data) | ||
566 | { | ||
567 | struct mpc8xxx_spi *mspi = context_data; | ||
568 | struct fsl_espi_reg *reg_base = mspi->reg_base; | ||
569 | irqreturn_t ret = IRQ_NONE; | ||
570 | u32 events; | ||
571 | |||
572 | /* Get interrupt events(tx/rx) */ | ||
573 | events = mpc8xxx_spi_read_reg(®_base->event); | ||
574 | if (events) | ||
575 | ret = IRQ_HANDLED; | ||
576 | |||
577 | dev_vdbg(mspi->dev, "%s: events %x\n", __func__, events); | ||
578 | |||
579 | fsl_espi_cpu_irq(mspi, events); | ||
580 | |||
581 | return ret; | ||
582 | } | ||
583 | |||
584 | static void fsl_espi_remove(struct mpc8xxx_spi *mspi) | ||
585 | { | ||
586 | iounmap(mspi->reg_base); | ||
587 | } | ||
588 | |||
589 | static struct spi_master * __devinit fsl_espi_probe(struct device *dev, | ||
590 | struct resource *mem, unsigned int irq) | ||
591 | { | ||
592 | struct fsl_spi_platform_data *pdata = dev->platform_data; | ||
593 | struct spi_master *master; | ||
594 | struct mpc8xxx_spi *mpc8xxx_spi; | ||
595 | struct fsl_espi_reg *reg_base; | ||
596 | u32 regval; | ||
597 | int i, ret = 0; | ||
598 | |||
599 | master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi)); | ||
600 | if (!master) { | ||
601 | ret = -ENOMEM; | ||
602 | goto err; | ||
603 | } | ||
604 | |||
605 | dev_set_drvdata(dev, master); | ||
606 | |||
607 | ret = mpc8xxx_spi_probe(dev, mem, irq); | ||
608 | if (ret) | ||
609 | goto err_probe; | ||
610 | |||
611 | master->setup = fsl_espi_setup; | ||
612 | |||
613 | mpc8xxx_spi = spi_master_get_devdata(master); | ||
614 | mpc8xxx_spi->spi_do_one_msg = fsl_espi_do_one_msg; | ||
615 | mpc8xxx_spi->spi_remove = fsl_espi_remove; | ||
616 | |||
617 | mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); | ||
618 | if (!mpc8xxx_spi->reg_base) { | ||
619 | ret = -ENOMEM; | ||
620 | goto err_probe; | ||
621 | } | ||
622 | |||
623 | reg_base = mpc8xxx_spi->reg_base; | ||
624 | |||
625 | /* Register for SPI Interrupt */ | ||
626 | ret = request_irq(mpc8xxx_spi->irq, fsl_espi_irq, | ||
627 | 0, "fsl_espi", mpc8xxx_spi); | ||
628 | if (ret) | ||
629 | goto free_irq; | ||
630 | |||
631 | if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { | ||
632 | mpc8xxx_spi->rx_shift = 16; | ||
633 | mpc8xxx_spi->tx_shift = 24; | ||
634 | } | ||
635 | |||
636 | /* SPI controller initializations */ | ||
637 | mpc8xxx_spi_write_reg(®_base->mode, 0); | ||
638 | mpc8xxx_spi_write_reg(®_base->mask, 0); | ||
639 | mpc8xxx_spi_write_reg(®_base->command, 0); | ||
640 | mpc8xxx_spi_write_reg(®_base->event, 0xffffffff); | ||
641 | |||
642 | /* Init eSPI CS mode register */ | ||
643 | for (i = 0; i < pdata->max_chipselect; i++) | ||
644 | mpc8xxx_spi_write_reg(®_base->csmode[i], CSMODE_INIT_VAL); | ||
645 | |||
646 | /* Enable SPI interface */ | ||
647 | regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; | ||
648 | |||
649 | mpc8xxx_spi_write_reg(®_base->mode, regval); | ||
650 | |||
651 | ret = spi_register_master(master); | ||
652 | if (ret < 0) | ||
653 | goto unreg_master; | ||
654 | |||
655 | dev_info(dev, "at 0x%p (irq = %d)\n", reg_base, mpc8xxx_spi->irq); | ||
656 | |||
657 | return master; | ||
658 | |||
659 | unreg_master: | ||
660 | free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); | ||
661 | free_irq: | ||
662 | iounmap(mpc8xxx_spi->reg_base); | ||
663 | err_probe: | ||
664 | spi_master_put(master); | ||
665 | err: | ||
666 | return ERR_PTR(ret); | ||
667 | } | ||
668 | |||
669 | static int of_fsl_espi_get_chipselects(struct device *dev) | ||
670 | { | ||
671 | struct device_node *np = dev->of_node; | ||
672 | struct fsl_spi_platform_data *pdata = dev->platform_data; | ||
673 | const u32 *prop; | ||
674 | int len; | ||
675 | |||
676 | prop = of_get_property(np, "fsl,espi-num-chipselects", &len); | ||
677 | if (!prop || len < sizeof(*prop)) { | ||
678 | dev_err(dev, "No 'fsl,espi-num-chipselects' property\n"); | ||
679 | return -EINVAL; | ||
680 | } | ||
681 | |||
682 | pdata->max_chipselect = *prop; | ||
683 | pdata->cs_control = NULL; | ||
684 | |||
685 | return 0; | ||
686 | } | ||
687 | |||
688 | static int __devinit of_fsl_espi_probe(struct platform_device *ofdev) | ||
689 | { | ||
690 | struct device *dev = &ofdev->dev; | ||
691 | struct device_node *np = ofdev->dev.of_node; | ||
692 | struct spi_master *master; | ||
693 | struct resource mem; | ||
694 | struct resource irq; | ||
695 | int ret = -ENOMEM; | ||
696 | |||
697 | ret = of_mpc8xxx_spi_probe(ofdev); | ||
698 | if (ret) | ||
699 | return ret; | ||
700 | |||
701 | ret = of_fsl_espi_get_chipselects(dev); | ||
702 | if (ret) | ||
703 | goto err; | ||
704 | |||
705 | ret = of_address_to_resource(np, 0, &mem); | ||
706 | if (ret) | ||
707 | goto err; | ||
708 | |||
709 | ret = of_irq_to_resource(np, 0, &irq); | ||
710 | if (!ret) { | ||
711 | ret = -EINVAL; | ||
712 | goto err; | ||
713 | } | ||
714 | |||
715 | master = fsl_espi_probe(dev, &mem, irq.start); | ||
716 | if (IS_ERR(master)) { | ||
717 | ret = PTR_ERR(master); | ||
718 | goto err; | ||
719 | } | ||
720 | |||
721 | return 0; | ||
722 | |||
723 | err: | ||
724 | return ret; | ||
725 | } | ||
726 | |||
727 | static int __devexit of_fsl_espi_remove(struct platform_device *dev) | ||
728 | { | ||
729 | return mpc8xxx_spi_remove(&dev->dev); | ||
730 | } | ||
731 | |||
732 | static const struct of_device_id of_fsl_espi_match[] = { | ||
733 | { .compatible = "fsl,mpc8536-espi" }, | ||
734 | {} | ||
735 | }; | ||
736 | MODULE_DEVICE_TABLE(of, of_fsl_espi_match); | ||
737 | |||
738 | static struct platform_driver fsl_espi_driver = { | ||
739 | .driver = { | ||
740 | .name = "fsl_espi", | ||
741 | .owner = THIS_MODULE, | ||
742 | .of_match_table = of_fsl_espi_match, | ||
743 | }, | ||
744 | .probe = of_fsl_espi_probe, | ||
745 | .remove = __devexit_p(of_fsl_espi_remove), | ||
746 | }; | ||
747 | |||
748 | static int __init fsl_espi_init(void) | ||
749 | { | ||
750 | return platform_driver_register(&fsl_espi_driver); | ||
751 | } | ||
752 | module_init(fsl_espi_init); | ||
753 | |||
754 | static void __exit fsl_espi_exit(void) | ||
755 | { | ||
756 | platform_driver_unregister(&fsl_espi_driver); | ||
757 | } | ||
758 | module_exit(fsl_espi_exit); | ||
759 | |||
760 | MODULE_AUTHOR("Mingkai Hu"); | ||
761 | MODULE_DESCRIPTION("Enhanced Freescale SPI Driver"); | ||
762 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/spi/spi_fsl_lib.c b/drivers/spi/spi_fsl_lib.c new file mode 100644 index 000000000000..ff59f42ae990 --- /dev/null +++ b/drivers/spi/spi_fsl_lib.c | |||
@@ -0,0 +1,236 @@ | |||
1 | /* | ||
2 | * Freescale SPI/eSPI controller driver library. | ||
3 | * | ||
4 | * Maintainer: Kumar Gala | ||
5 | * | ||
6 | * Copyright (C) 2006 Polycom, Inc. | ||
7 | * | ||
8 | * CPM SPI and QE buffer descriptors mode support: | ||
9 | * Copyright (c) 2009 MontaVista Software, Inc. | ||
10 | * Author: Anton Vorontsov <avorontsov@ru.mvista.com> | ||
11 | * | ||
12 | * Copyright 2010 Freescale Semiconductor, Inc. | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify it | ||
15 | * under the terms of the GNU General Public License as published by the | ||
16 | * Free Software Foundation; either version 2 of the License, or (at your | ||
17 | * option) any later version. | ||
18 | */ | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/fsl_devices.h> | ||
22 | #include <linux/dma-mapping.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/of_platform.h> | ||
25 | #include <linux/of_spi.h> | ||
26 | #include <sysdev/fsl_soc.h> | ||
27 | |||
28 | #include "spi_fsl_lib.h" | ||
29 | |||
30 | #define MPC8XXX_SPI_RX_BUF(type) \ | ||
31 | void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \ | ||
32 | { \ | ||
33 | type *rx = mpc8xxx_spi->rx; \ | ||
34 | *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \ | ||
35 | mpc8xxx_spi->rx = rx; \ | ||
36 | } | ||
37 | |||
38 | #define MPC8XXX_SPI_TX_BUF(type) \ | ||
39 | u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ | ||
40 | { \ | ||
41 | u32 data; \ | ||
42 | const type *tx = mpc8xxx_spi->tx; \ | ||
43 | if (!tx) \ | ||
44 | return 0; \ | ||
45 | data = *tx++ << mpc8xxx_spi->tx_shift; \ | ||
46 | mpc8xxx_spi->tx = tx; \ | ||
47 | return data; \ | ||
48 | } | ||
49 | |||
50 | MPC8XXX_SPI_RX_BUF(u8) | ||
51 | MPC8XXX_SPI_RX_BUF(u16) | ||
52 | MPC8XXX_SPI_RX_BUF(u32) | ||
53 | MPC8XXX_SPI_TX_BUF(u8) | ||
54 | MPC8XXX_SPI_TX_BUF(u16) | ||
55 | MPC8XXX_SPI_TX_BUF(u32) | ||
56 | |||
57 | struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata) | ||
58 | { | ||
59 | return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); | ||
60 | } | ||
61 | |||
62 | void mpc8xxx_spi_work(struct work_struct *work) | ||
63 | { | ||
64 | struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi, | ||
65 | work); | ||
66 | |||
67 | spin_lock_irq(&mpc8xxx_spi->lock); | ||
68 | while (!list_empty(&mpc8xxx_spi->queue)) { | ||
69 | struct spi_message *m = container_of(mpc8xxx_spi->queue.next, | ||
70 | struct spi_message, queue); | ||
71 | |||
72 | list_del_init(&m->queue); | ||
73 | spin_unlock_irq(&mpc8xxx_spi->lock); | ||
74 | |||
75 | if (mpc8xxx_spi->spi_do_one_msg) | ||
76 | mpc8xxx_spi->spi_do_one_msg(m); | ||
77 | |||
78 | spin_lock_irq(&mpc8xxx_spi->lock); | ||
79 | } | ||
80 | spin_unlock_irq(&mpc8xxx_spi->lock); | ||
81 | } | ||
82 | |||
83 | int mpc8xxx_spi_transfer(struct spi_device *spi, | ||
84 | struct spi_message *m) | ||
85 | { | ||
86 | struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); | ||
87 | unsigned long flags; | ||
88 | |||
89 | m->actual_length = 0; | ||
90 | m->status = -EINPROGRESS; | ||
91 | |||
92 | spin_lock_irqsave(&mpc8xxx_spi->lock, flags); | ||
93 | list_add_tail(&m->queue, &mpc8xxx_spi->queue); | ||
94 | queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work); | ||
95 | spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags); | ||
96 | |||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | void mpc8xxx_spi_cleanup(struct spi_device *spi) | ||
101 | { | ||
102 | kfree(spi->controller_state); | ||
103 | } | ||
104 | |||
105 | const char *mpc8xxx_spi_strmode(unsigned int flags) | ||
106 | { | ||
107 | if (flags & SPI_QE_CPU_MODE) { | ||
108 | return "QE CPU"; | ||
109 | } else if (flags & SPI_CPM_MODE) { | ||
110 | if (flags & SPI_QE) | ||
111 | return "QE"; | ||
112 | else if (flags & SPI_CPM2) | ||
113 | return "CPM2"; | ||
114 | else | ||
115 | return "CPM1"; | ||
116 | } | ||
117 | return "CPU"; | ||
118 | } | ||
119 | |||
120 | int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, | ||
121 | unsigned int irq) | ||
122 | { | ||
123 | struct fsl_spi_platform_data *pdata = dev->platform_data; | ||
124 | struct spi_master *master; | ||
125 | struct mpc8xxx_spi *mpc8xxx_spi; | ||
126 | int ret = 0; | ||
127 | |||
128 | master = dev_get_drvdata(dev); | ||
129 | |||
130 | /* the spi->mode bits understood by this driver: */ | ||
131 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | ||
132 | | SPI_LSB_FIRST | SPI_LOOP; | ||
133 | |||
134 | master->transfer = mpc8xxx_spi_transfer; | ||
135 | master->cleanup = mpc8xxx_spi_cleanup; | ||
136 | master->dev.of_node = dev->of_node; | ||
137 | |||
138 | mpc8xxx_spi = spi_master_get_devdata(master); | ||
139 | mpc8xxx_spi->dev = dev; | ||
140 | mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8; | ||
141 | mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8; | ||
142 | mpc8xxx_spi->flags = pdata->flags; | ||
143 | mpc8xxx_spi->spibrg = pdata->sysclk; | ||
144 | mpc8xxx_spi->irq = irq; | ||
145 | |||
146 | mpc8xxx_spi->rx_shift = 0; | ||
147 | mpc8xxx_spi->tx_shift = 0; | ||
148 | |||
149 | init_completion(&mpc8xxx_spi->done); | ||
150 | |||
151 | master->bus_num = pdata->bus_num; | ||
152 | master->num_chipselect = pdata->max_chipselect; | ||
153 | |||
154 | spin_lock_init(&mpc8xxx_spi->lock); | ||
155 | init_completion(&mpc8xxx_spi->done); | ||
156 | INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work); | ||
157 | INIT_LIST_HEAD(&mpc8xxx_spi->queue); | ||
158 | |||
159 | mpc8xxx_spi->workqueue = create_singlethread_workqueue( | ||
160 | dev_name(master->dev.parent)); | ||
161 | if (mpc8xxx_spi->workqueue == NULL) { | ||
162 | ret = -EBUSY; | ||
163 | goto err; | ||
164 | } | ||
165 | |||
166 | return 0; | ||
167 | |||
168 | err: | ||
169 | return ret; | ||
170 | } | ||
171 | |||
172 | int __devexit mpc8xxx_spi_remove(struct device *dev) | ||
173 | { | ||
174 | struct mpc8xxx_spi *mpc8xxx_spi; | ||
175 | struct spi_master *master; | ||
176 | |||
177 | master = dev_get_drvdata(dev); | ||
178 | mpc8xxx_spi = spi_master_get_devdata(master); | ||
179 | |||
180 | flush_workqueue(mpc8xxx_spi->workqueue); | ||
181 | destroy_workqueue(mpc8xxx_spi->workqueue); | ||
182 | spi_unregister_master(master); | ||
183 | |||
184 | free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); | ||
185 | |||
186 | if (mpc8xxx_spi->spi_remove) | ||
187 | mpc8xxx_spi->spi_remove(mpc8xxx_spi); | ||
188 | |||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev) | ||
193 | { | ||
194 | struct device *dev = &ofdev->dev; | ||
195 | struct device_node *np = ofdev->dev.of_node; | ||
196 | struct mpc8xxx_spi_probe_info *pinfo; | ||
197 | struct fsl_spi_platform_data *pdata; | ||
198 | const void *prop; | ||
199 | int ret = -ENOMEM; | ||
200 | |||
201 | pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL); | ||
202 | if (!pinfo) | ||
203 | return -ENOMEM; | ||
204 | |||
205 | pdata = &pinfo->pdata; | ||
206 | dev->platform_data = pdata; | ||
207 | |||
208 | /* Allocate bus num dynamically. */ | ||
209 | pdata->bus_num = -1; | ||
210 | |||
211 | /* SPI controller is either clocked from QE or SoC clock. */ | ||
212 | pdata->sysclk = get_brgfreq(); | ||
213 | if (pdata->sysclk == -1) { | ||
214 | pdata->sysclk = fsl_get_sys_freq(); | ||
215 | if (pdata->sysclk == -1) { | ||
216 | ret = -ENODEV; | ||
217 | goto err; | ||
218 | } | ||
219 | } | ||
220 | |||
221 | prop = of_get_property(np, "mode", NULL); | ||
222 | if (prop && !strcmp(prop, "cpu-qe")) | ||
223 | pdata->flags = SPI_QE_CPU_MODE; | ||
224 | else if (prop && !strcmp(prop, "qe")) | ||
225 | pdata->flags = SPI_CPM_MODE | SPI_QE; | ||
226 | else if (of_device_is_compatible(np, "fsl,cpm2-spi")) | ||
227 | pdata->flags = SPI_CPM_MODE | SPI_CPM2; | ||
228 | else if (of_device_is_compatible(np, "fsl,cpm1-spi")) | ||
229 | pdata->flags = SPI_CPM_MODE | SPI_CPM1; | ||
230 | |||
231 | return 0; | ||
232 | |||
233 | err: | ||
234 | kfree(pinfo); | ||
235 | return ret; | ||
236 | } | ||
diff --git a/drivers/spi/spi_fsl_lib.h b/drivers/spi/spi_fsl_lib.h new file mode 100644 index 000000000000..cbe881b9ea76 --- /dev/null +++ b/drivers/spi/spi_fsl_lib.h | |||
@@ -0,0 +1,123 @@ | |||
1 | /* | ||
2 | * Freescale SPI/eSPI controller driver library. | ||
3 | * | ||
4 | * Maintainer: Kumar Gala | ||
5 | * | ||
6 | * Copyright 2010 Freescale Semiconductor, Inc. | ||
7 | * Copyright (C) 2006 Polycom, Inc. | ||
8 | * | ||
9 | * CPM SPI and QE buffer descriptors mode support: | ||
10 | * Copyright (c) 2009 MontaVista Software, Inc. | ||
11 | * Author: Anton Vorontsov <avorontsov@ru.mvista.com> | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify it | ||
14 | * under the terms of the GNU General Public License as published by the | ||
15 | * Free Software Foundation; either version 2 of the License, or (at your | ||
16 | * option) any later version. | ||
17 | */ | ||
18 | #ifndef __SPI_FSL_LIB_H__ | ||
19 | #define __SPI_FSL_LIB_H__ | ||
20 | |||
21 | #include <asm/io.h> | ||
22 | |||
23 | /* SPI/eSPI Controller driver's private data. */ | ||
24 | struct mpc8xxx_spi { | ||
25 | struct device *dev; | ||
26 | void *reg_base; | ||
27 | |||
28 | /* rx & tx bufs from the spi_transfer */ | ||
29 | const void *tx; | ||
30 | void *rx; | ||
31 | #ifdef CONFIG_SPI_FSL_ESPI | ||
32 | int len; | ||
33 | #endif | ||
34 | |||
35 | int subblock; | ||
36 | struct spi_pram __iomem *pram; | ||
37 | struct cpm_buf_desc __iomem *tx_bd; | ||
38 | struct cpm_buf_desc __iomem *rx_bd; | ||
39 | |||
40 | struct spi_transfer *xfer_in_progress; | ||
41 | |||
42 | /* dma addresses for CPM transfers */ | ||
43 | dma_addr_t tx_dma; | ||
44 | dma_addr_t rx_dma; | ||
45 | bool map_tx_dma; | ||
46 | bool map_rx_dma; | ||
47 | |||
48 | dma_addr_t dma_dummy_tx; | ||
49 | dma_addr_t dma_dummy_rx; | ||
50 | |||
51 | /* functions to deal with different sized buffers */ | ||
52 | void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); | ||
53 | u32(*get_tx) (struct mpc8xxx_spi *); | ||
54 | |||
55 | /* hooks for different controller driver */ | ||
56 | void (*spi_do_one_msg) (struct spi_message *m); | ||
57 | void (*spi_remove) (struct mpc8xxx_spi *mspi); | ||
58 | |||
59 | unsigned int count; | ||
60 | unsigned int irq; | ||
61 | |||
62 | unsigned nsecs; /* (clock cycle time)/2 */ | ||
63 | |||
64 | u32 spibrg; /* SPIBRG input clock */ | ||
65 | u32 rx_shift; /* RX data reg shift when in qe mode */ | ||
66 | u32 tx_shift; /* TX data reg shift when in qe mode */ | ||
67 | |||
68 | unsigned int flags; | ||
69 | |||
70 | struct workqueue_struct *workqueue; | ||
71 | struct work_struct work; | ||
72 | |||
73 | struct list_head queue; | ||
74 | spinlock_t lock; | ||
75 | |||
76 | struct completion done; | ||
77 | }; | ||
78 | |||
79 | struct spi_mpc8xxx_cs { | ||
80 | /* functions to deal with different sized buffers */ | ||
81 | void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); | ||
82 | u32 (*get_tx) (struct mpc8xxx_spi *); | ||
83 | u32 rx_shift; /* RX data reg shift when in qe mode */ | ||
84 | u32 tx_shift; /* TX data reg shift when in qe mode */ | ||
85 | u32 hw_mode; /* Holds HW mode register settings */ | ||
86 | }; | ||
87 | |||
88 | static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val) | ||
89 | { | ||
90 | out_be32(reg, val); | ||
91 | } | ||
92 | |||
93 | static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg) | ||
94 | { | ||
95 | return in_be32(reg); | ||
96 | } | ||
97 | |||
98 | struct mpc8xxx_spi_probe_info { | ||
99 | struct fsl_spi_platform_data pdata; | ||
100 | int *gpios; | ||
101 | bool *alow_flags; | ||
102 | }; | ||
103 | |||
104 | extern u32 mpc8xxx_spi_tx_buf_u8(struct mpc8xxx_spi *mpc8xxx_spi); | ||
105 | extern u32 mpc8xxx_spi_tx_buf_u16(struct mpc8xxx_spi *mpc8xxx_spi); | ||
106 | extern u32 mpc8xxx_spi_tx_buf_u32(struct mpc8xxx_spi *mpc8xxx_spi); | ||
107 | extern void mpc8xxx_spi_rx_buf_u8(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); | ||
108 | extern void mpc8xxx_spi_rx_buf_u16(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); | ||
109 | extern void mpc8xxx_spi_rx_buf_u32(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); | ||
110 | |||
111 | extern struct mpc8xxx_spi_probe_info *to_of_pinfo( | ||
112 | struct fsl_spi_platform_data *pdata); | ||
113 | extern int mpc8xxx_spi_bufs(struct mpc8xxx_spi *mspi, | ||
114 | struct spi_transfer *t, unsigned int len); | ||
115 | extern int mpc8xxx_spi_transfer(struct spi_device *spi, struct spi_message *m); | ||
116 | extern void mpc8xxx_spi_cleanup(struct spi_device *spi); | ||
117 | extern const char *mpc8xxx_spi_strmode(unsigned int flags); | ||
118 | extern int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, | ||
119 | unsigned int irq); | ||
120 | extern int mpc8xxx_spi_remove(struct device *dev); | ||
121 | extern int of_mpc8xxx_spi_probe(struct platform_device *ofdev); | ||
122 | |||
123 | #endif /* __SPI_FSL_LIB_H__ */ | ||
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_fsl_spi.c index 1dd86b835cd8..7963c9b49566 100644 --- a/drivers/spi/spi_mpc8xxx.c +++ b/drivers/spi/spi_fsl_spi.c | |||
@@ -1,9 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * MPC8xxx SPI controller driver. | 2 | * Freescale SPI controller driver. |
3 | * | 3 | * |
4 | * Maintainer: Kumar Gala | 4 | * Maintainer: Kumar Gala |
5 | * | 5 | * |
6 | * Copyright (C) 2006 Polycom, Inc. | 6 | * Copyright (C) 2006 Polycom, Inc. |
7 | * Copyright 2010 Freescale Semiconductor, Inc. | ||
7 | * | 8 | * |
8 | * CPM SPI and QE buffer descriptors mode support: | 9 | * CPM SPI and QE buffer descriptors mode support: |
9 | * Copyright (c) 2009 MontaVista Software, Inc. | 10 | * Copyright (c) 2009 MontaVista Software, Inc. |
@@ -15,18 +16,11 @@ | |||
15 | * option) any later version. | 16 | * option) any later version. |
16 | */ | 17 | */ |
17 | #include <linux/module.h> | 18 | #include <linux/module.h> |
18 | #include <linux/init.h> | ||
19 | #include <linux/types.h> | 19 | #include <linux/types.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/bug.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/err.h> | ||
24 | #include <linux/io.h> | ||
25 | #include <linux/completion.h> | ||
26 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
27 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
28 | #include <linux/irq.h> | 23 | #include <linux/irq.h> |
29 | #include <linux/device.h> | ||
30 | #include <linux/spi/spi.h> | 24 | #include <linux/spi/spi.h> |
31 | #include <linux/spi/spi_bitbang.h> | 25 | #include <linux/spi/spi_bitbang.h> |
32 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
@@ -38,12 +32,12 @@ | |||
38 | #include <linux/of_platform.h> | 32 | #include <linux/of_platform.h> |
39 | #include <linux/gpio.h> | 33 | #include <linux/gpio.h> |
40 | #include <linux/of_gpio.h> | 34 | #include <linux/of_gpio.h> |
41 | #include <linux/slab.h> | ||
42 | 35 | ||
43 | #include <sysdev/fsl_soc.h> | 36 | #include <sysdev/fsl_soc.h> |
44 | #include <asm/cpm.h> | 37 | #include <asm/cpm.h> |
45 | #include <asm/qe.h> | 38 | #include <asm/qe.h> |
46 | #include <asm/irq.h> | 39 | |
40 | #include "spi_fsl_lib.h" | ||
47 | 41 | ||
48 | /* CPM1 and CPM2 are mutually exclusive. */ | 42 | /* CPM1 and CPM2 are mutually exclusive. */ |
49 | #ifdef CONFIG_CPM1 | 43 | #ifdef CONFIG_CPM1 |
@@ -55,7 +49,7 @@ | |||
55 | #endif | 49 | #endif |
56 | 50 | ||
57 | /* SPI Controller registers */ | 51 | /* SPI Controller registers */ |
58 | struct mpc8xxx_spi_reg { | 52 | struct fsl_spi_reg { |
59 | u8 res1[0x20]; | 53 | u8 res1[0x20]; |
60 | __be32 mode; | 54 | __be32 mode; |
61 | __be32 event; | 55 | __be32 event; |
@@ -80,7 +74,7 @@ struct mpc8xxx_spi_reg { | |||
80 | 74 | ||
81 | /* | 75 | /* |
82 | * Default for SPI Mode: | 76 | * Default for SPI Mode: |
83 | * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk | 77 | * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk |
84 | */ | 78 | */ |
85 | #define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \ | 79 | #define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \ |
86 | SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf)) | 80 | SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf)) |
@@ -102,112 +96,16 @@ struct mpc8xxx_spi_reg { | |||
102 | #define SPI_PRAM_SIZE 0x100 | 96 | #define SPI_PRAM_SIZE 0x100 |
103 | #define SPI_MRBLR ((unsigned int)PAGE_SIZE) | 97 | #define SPI_MRBLR ((unsigned int)PAGE_SIZE) |
104 | 98 | ||
105 | /* SPI Controller driver's private data. */ | 99 | static void *fsl_dummy_rx; |
106 | struct mpc8xxx_spi { | 100 | static DEFINE_MUTEX(fsl_dummy_rx_lock); |
107 | struct device *dev; | 101 | static int fsl_dummy_rx_refcnt; |
108 | struct mpc8xxx_spi_reg __iomem *base; | ||
109 | |||
110 | /* rx & tx bufs from the spi_transfer */ | ||
111 | const void *tx; | ||
112 | void *rx; | ||
113 | |||
114 | int subblock; | ||
115 | struct spi_pram __iomem *pram; | ||
116 | struct cpm_buf_desc __iomem *tx_bd; | ||
117 | struct cpm_buf_desc __iomem *rx_bd; | ||
118 | |||
119 | struct spi_transfer *xfer_in_progress; | ||
120 | |||
121 | /* dma addresses for CPM transfers */ | ||
122 | dma_addr_t tx_dma; | ||
123 | dma_addr_t rx_dma; | ||
124 | bool map_tx_dma; | ||
125 | bool map_rx_dma; | ||
126 | |||
127 | dma_addr_t dma_dummy_tx; | ||
128 | dma_addr_t dma_dummy_rx; | ||
129 | |||
130 | /* functions to deal with different sized buffers */ | ||
131 | void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); | ||
132 | u32(*get_tx) (struct mpc8xxx_spi *); | ||
133 | |||
134 | unsigned int count; | ||
135 | unsigned int irq; | ||
136 | |||
137 | unsigned nsecs; /* (clock cycle time)/2 */ | ||
138 | |||
139 | u32 spibrg; /* SPIBRG input clock */ | ||
140 | u32 rx_shift; /* RX data reg shift when in qe mode */ | ||
141 | u32 tx_shift; /* TX data reg shift when in qe mode */ | ||
142 | |||
143 | unsigned int flags; | ||
144 | |||
145 | struct workqueue_struct *workqueue; | ||
146 | struct work_struct work; | ||
147 | |||
148 | struct list_head queue; | ||
149 | spinlock_t lock; | ||
150 | |||
151 | struct completion done; | ||
152 | }; | ||
153 | |||
154 | static void *mpc8xxx_dummy_rx; | ||
155 | static DEFINE_MUTEX(mpc8xxx_dummy_rx_lock); | ||
156 | static int mpc8xxx_dummy_rx_refcnt; | ||
157 | |||
158 | struct spi_mpc8xxx_cs { | ||
159 | /* functions to deal with different sized buffers */ | ||
160 | void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); | ||
161 | u32 (*get_tx) (struct mpc8xxx_spi *); | ||
162 | u32 rx_shift; /* RX data reg shift when in qe mode */ | ||
163 | u32 tx_shift; /* TX data reg shift when in qe mode */ | ||
164 | u32 hw_mode; /* Holds HW mode register settings */ | ||
165 | }; | ||
166 | |||
167 | static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val) | ||
168 | { | ||
169 | out_be32(reg, val); | ||
170 | } | ||
171 | |||
172 | static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg) | ||
173 | { | ||
174 | return in_be32(reg); | ||
175 | } | ||
176 | |||
177 | #define MPC83XX_SPI_RX_BUF(type) \ | ||
178 | static \ | ||
179 | void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \ | ||
180 | { \ | ||
181 | type *rx = mpc8xxx_spi->rx; \ | ||
182 | *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \ | ||
183 | mpc8xxx_spi->rx = rx; \ | ||
184 | } | ||
185 | |||
186 | #define MPC83XX_SPI_TX_BUF(type) \ | ||
187 | static \ | ||
188 | u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ | ||
189 | { \ | ||
190 | u32 data; \ | ||
191 | const type *tx = mpc8xxx_spi->tx; \ | ||
192 | if (!tx) \ | ||
193 | return 0; \ | ||
194 | data = *tx++ << mpc8xxx_spi->tx_shift; \ | ||
195 | mpc8xxx_spi->tx = tx; \ | ||
196 | return data; \ | ||
197 | } | ||
198 | 102 | ||
199 | MPC83XX_SPI_RX_BUF(u8) | 103 | static void fsl_spi_change_mode(struct spi_device *spi) |
200 | MPC83XX_SPI_RX_BUF(u16) | ||
201 | MPC83XX_SPI_RX_BUF(u32) | ||
202 | MPC83XX_SPI_TX_BUF(u8) | ||
203 | MPC83XX_SPI_TX_BUF(u16) | ||
204 | MPC83XX_SPI_TX_BUF(u32) | ||
205 | |||
206 | static void mpc8xxx_spi_change_mode(struct spi_device *spi) | ||
207 | { | 104 | { |
208 | struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); | 105 | struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); |
209 | struct spi_mpc8xxx_cs *cs = spi->controller_state; | 106 | struct spi_mpc8xxx_cs *cs = spi->controller_state; |
210 | __be32 __iomem *mode = &mspi->base->mode; | 107 | struct fsl_spi_reg *reg_base = mspi->reg_base; |
108 | __be32 __iomem *mode = ®_base->mode; | ||
211 | unsigned long flags; | 109 | unsigned long flags; |
212 | 110 | ||
213 | if (cs->hw_mode == mpc8xxx_spi_read_reg(mode)) | 111 | if (cs->hw_mode == mpc8xxx_spi_read_reg(mode)) |
@@ -238,7 +136,7 @@ static void mpc8xxx_spi_change_mode(struct spi_device *spi) | |||
238 | local_irq_restore(flags); | 136 | local_irq_restore(flags); |
239 | } | 137 | } |
240 | 138 | ||
241 | static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value) | 139 | static void fsl_spi_chipselect(struct spi_device *spi, int value) |
242 | { | 140 | { |
243 | struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); | 141 | struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); |
244 | struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data; | 142 | struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data; |
@@ -256,18 +154,17 @@ static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value) | |||
256 | mpc8xxx_spi->get_rx = cs->get_rx; | 154 | mpc8xxx_spi->get_rx = cs->get_rx; |
257 | mpc8xxx_spi->get_tx = cs->get_tx; | 155 | mpc8xxx_spi->get_tx = cs->get_tx; |
258 | 156 | ||
259 | mpc8xxx_spi_change_mode(spi); | 157 | fsl_spi_change_mode(spi); |
260 | 158 | ||
261 | if (pdata->cs_control) | 159 | if (pdata->cs_control) |
262 | pdata->cs_control(spi, pol); | 160 | pdata->cs_control(spi, pol); |
263 | } | 161 | } |
264 | } | 162 | } |
265 | 163 | ||
266 | static int | 164 | static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs, |
267 | mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs, | 165 | struct spi_device *spi, |
268 | struct spi_device *spi, | 166 | struct mpc8xxx_spi *mpc8xxx_spi, |
269 | struct mpc8xxx_spi *mpc8xxx_spi, | 167 | int bits_per_word) |
270 | int bits_per_word) | ||
271 | { | 168 | { |
272 | cs->rx_shift = 0; | 169 | cs->rx_shift = 0; |
273 | cs->tx_shift = 0; | 170 | cs->tx_shift = 0; |
@@ -307,10 +204,9 @@ mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs, | |||
307 | return bits_per_word; | 204 | return bits_per_word; |
308 | } | 205 | } |
309 | 206 | ||
310 | static int | 207 | static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs, |
311 | mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs, | 208 | struct spi_device *spi, |
312 | struct spi_device *spi, | 209 | int bits_per_word) |
313 | int bits_per_word) | ||
314 | { | 210 | { |
315 | /* QE uses Little Endian for words > 8 | 211 | /* QE uses Little Endian for words > 8 |
316 | * so transform all words > 8 into 8 bits | 212 | * so transform all words > 8 into 8 bits |
@@ -326,13 +222,13 @@ mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs, | |||
326 | return bits_per_word; | 222 | return bits_per_word; |
327 | } | 223 | } |
328 | 224 | ||
329 | static | 225 | static int fsl_spi_setup_transfer(struct spi_device *spi, |
330 | int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | 226 | struct spi_transfer *t) |
331 | { | 227 | { |
332 | struct mpc8xxx_spi *mpc8xxx_spi; | 228 | struct mpc8xxx_spi *mpc8xxx_spi; |
333 | int bits_per_word; | 229 | int bits_per_word = 0; |
334 | u8 pm; | 230 | u8 pm; |
335 | u32 hz; | 231 | u32 hz = 0; |
336 | struct spi_mpc8xxx_cs *cs = spi->controller_state; | 232 | struct spi_mpc8xxx_cs *cs = spi->controller_state; |
337 | 233 | ||
338 | mpc8xxx_spi = spi_master_get_devdata(spi->master); | 234 | mpc8xxx_spi = spi_master_get_devdata(spi->master); |
@@ -340,9 +236,6 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | |||
340 | if (t) { | 236 | if (t) { |
341 | bits_per_word = t->bits_per_word; | 237 | bits_per_word = t->bits_per_word; |
342 | hz = t->speed_hz; | 238 | hz = t->speed_hz; |
343 | } else { | ||
344 | bits_per_word = 0; | ||
345 | hz = 0; | ||
346 | } | 239 | } |
347 | 240 | ||
348 | /* spi_transfer level calls that work per-word */ | 241 | /* spi_transfer level calls that work per-word */ |
@@ -388,23 +281,25 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | |||
388 | hz, mpc8xxx_spi->spibrg / 1024); | 281 | hz, mpc8xxx_spi->spibrg / 1024); |
389 | if (pm > 16) | 282 | if (pm > 16) |
390 | pm = 16; | 283 | pm = 16; |
391 | } else | 284 | } else { |
392 | pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; | 285 | pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; |
286 | } | ||
393 | if (pm) | 287 | if (pm) |
394 | pm--; | 288 | pm--; |
395 | 289 | ||
396 | cs->hw_mode |= SPMODE_PM(pm); | 290 | cs->hw_mode |= SPMODE_PM(pm); |
397 | 291 | ||
398 | mpc8xxx_spi_change_mode(spi); | 292 | fsl_spi_change_mode(spi); |
399 | return 0; | 293 | return 0; |
400 | } | 294 | } |
401 | 295 | ||
402 | static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) | 296 | static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) |
403 | { | 297 | { |
404 | struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd; | 298 | struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd; |
405 | struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd; | 299 | struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd; |
406 | unsigned int xfer_len = min(mspi->count, SPI_MRBLR); | 300 | unsigned int xfer_len = min(mspi->count, SPI_MRBLR); |
407 | unsigned int xfer_ofs; | 301 | unsigned int xfer_ofs; |
302 | struct fsl_spi_reg *reg_base = mspi->reg_base; | ||
408 | 303 | ||
409 | xfer_ofs = mspi->xfer_in_progress->len - mspi->count; | 304 | xfer_ofs = mspi->xfer_in_progress->len - mspi->count; |
410 | 305 | ||
@@ -424,13 +319,14 @@ static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) | |||
424 | BD_SC_LAST); | 319 | BD_SC_LAST); |
425 | 320 | ||
426 | /* start transfer */ | 321 | /* start transfer */ |
427 | mpc8xxx_spi_write_reg(&mspi->base->command, SPCOM_STR); | 322 | mpc8xxx_spi_write_reg(®_base->command, SPCOM_STR); |
428 | } | 323 | } |
429 | 324 | ||
430 | static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi, | 325 | static int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi, |
431 | struct spi_transfer *t, bool is_dma_mapped) | 326 | struct spi_transfer *t, bool is_dma_mapped) |
432 | { | 327 | { |
433 | struct device *dev = mspi->dev; | 328 | struct device *dev = mspi->dev; |
329 | struct fsl_spi_reg *reg_base = mspi->reg_base; | ||
434 | 330 | ||
435 | if (is_dma_mapped) { | 331 | if (is_dma_mapped) { |
436 | mspi->map_tx_dma = 0; | 332 | mspi->map_tx_dma = 0; |
@@ -475,13 +371,13 @@ static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi, | |||
475 | } | 371 | } |
476 | 372 | ||
477 | /* enable rx ints */ | 373 | /* enable rx ints */ |
478 | mpc8xxx_spi_write_reg(&mspi->base->mask, SPIE_RXB); | 374 | mpc8xxx_spi_write_reg(®_base->mask, SPIE_RXB); |
479 | 375 | ||
480 | mspi->xfer_in_progress = t; | 376 | mspi->xfer_in_progress = t; |
481 | mspi->count = t->len; | 377 | mspi->count = t->len; |
482 | 378 | ||
483 | /* start CPM transfers */ | 379 | /* start CPM transfers */ |
484 | mpc8xxx_spi_cpm_bufs_start(mspi); | 380 | fsl_spi_cpm_bufs_start(mspi); |
485 | 381 | ||
486 | return 0; | 382 | return 0; |
487 | 383 | ||
@@ -491,7 +387,7 @@ err_rx_dma: | |||
491 | return -ENOMEM; | 387 | return -ENOMEM; |
492 | } | 388 | } |
493 | 389 | ||
494 | static void mpc8xxx_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) | 390 | static void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) |
495 | { | 391 | { |
496 | struct device *dev = mspi->dev; | 392 | struct device *dev = mspi->dev; |
497 | struct spi_transfer *t = mspi->xfer_in_progress; | 393 | struct spi_transfer *t = mspi->xfer_in_progress; |
@@ -503,31 +399,34 @@ static void mpc8xxx_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) | |||
503 | mspi->xfer_in_progress = NULL; | 399 | mspi->xfer_in_progress = NULL; |
504 | } | 400 | } |
505 | 401 | ||
506 | static int mpc8xxx_spi_cpu_bufs(struct mpc8xxx_spi *mspi, | 402 | static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi, |
507 | struct spi_transfer *t, unsigned int len) | 403 | struct spi_transfer *t, unsigned int len) |
508 | { | 404 | { |
509 | u32 word; | 405 | u32 word; |
406 | struct fsl_spi_reg *reg_base = mspi->reg_base; | ||
510 | 407 | ||
511 | mspi->count = len; | 408 | mspi->count = len; |
512 | 409 | ||
513 | /* enable rx ints */ | 410 | /* enable rx ints */ |
514 | mpc8xxx_spi_write_reg(&mspi->base->mask, SPIM_NE); | 411 | mpc8xxx_spi_write_reg(®_base->mask, SPIM_NE); |
515 | 412 | ||
516 | /* transmit word */ | 413 | /* transmit word */ |
517 | word = mspi->get_tx(mspi); | 414 | word = mspi->get_tx(mspi); |
518 | mpc8xxx_spi_write_reg(&mspi->base->transmit, word); | 415 | mpc8xxx_spi_write_reg(®_base->transmit, word); |
519 | 416 | ||
520 | return 0; | 417 | return 0; |
521 | } | 418 | } |
522 | 419 | ||
523 | static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t, | 420 | static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t, |
524 | bool is_dma_mapped) | 421 | bool is_dma_mapped) |
525 | { | 422 | { |
526 | struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); | 423 | struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); |
424 | struct fsl_spi_reg *reg_base; | ||
527 | unsigned int len = t->len; | 425 | unsigned int len = t->len; |
528 | u8 bits_per_word; | 426 | u8 bits_per_word; |
529 | int ret; | 427 | int ret; |
530 | 428 | ||
429 | reg_base = mpc8xxx_spi->reg_base; | ||
531 | bits_per_word = spi->bits_per_word; | 430 | bits_per_word = spi->bits_per_word; |
532 | if (t->bits_per_word) | 431 | if (t->bits_per_word) |
533 | bits_per_word = t->bits_per_word; | 432 | bits_per_word = t->bits_per_word; |
@@ -551,24 +450,24 @@ static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t, | |||
551 | INIT_COMPLETION(mpc8xxx_spi->done); | 450 | INIT_COMPLETION(mpc8xxx_spi->done); |
552 | 451 | ||
553 | if (mpc8xxx_spi->flags & SPI_CPM_MODE) | 452 | if (mpc8xxx_spi->flags & SPI_CPM_MODE) |
554 | ret = mpc8xxx_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped); | 453 | ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped); |
555 | else | 454 | else |
556 | ret = mpc8xxx_spi_cpu_bufs(mpc8xxx_spi, t, len); | 455 | ret = fsl_spi_cpu_bufs(mpc8xxx_spi, t, len); |
557 | if (ret) | 456 | if (ret) |
558 | return ret; | 457 | return ret; |
559 | 458 | ||
560 | wait_for_completion(&mpc8xxx_spi->done); | 459 | wait_for_completion(&mpc8xxx_spi->done); |
561 | 460 | ||
562 | /* disable rx ints */ | 461 | /* disable rx ints */ |
563 | mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0); | 462 | mpc8xxx_spi_write_reg(®_base->mask, 0); |
564 | 463 | ||
565 | if (mpc8xxx_spi->flags & SPI_CPM_MODE) | 464 | if (mpc8xxx_spi->flags & SPI_CPM_MODE) |
566 | mpc8xxx_spi_cpm_bufs_complete(mpc8xxx_spi); | 465 | fsl_spi_cpm_bufs_complete(mpc8xxx_spi); |
567 | 466 | ||
568 | return mpc8xxx_spi->count; | 467 | return mpc8xxx_spi->count; |
569 | } | 468 | } |
570 | 469 | ||
571 | static void mpc8xxx_spi_do_one_msg(struct spi_message *m) | 470 | static void fsl_spi_do_one_msg(struct spi_message *m) |
572 | { | 471 | { |
573 | struct spi_device *spi = m->spi; | 472 | struct spi_device *spi = m->spi; |
574 | struct spi_transfer *t; | 473 | struct spi_transfer *t; |
@@ -584,18 +483,18 @@ static void mpc8xxx_spi_do_one_msg(struct spi_message *m) | |||
584 | status = -EINVAL; | 483 | status = -EINVAL; |
585 | 484 | ||
586 | if (cs_change) | 485 | if (cs_change) |
587 | status = mpc8xxx_spi_setup_transfer(spi, t); | 486 | status = fsl_spi_setup_transfer(spi, t); |
588 | if (status < 0) | 487 | if (status < 0) |
589 | break; | 488 | break; |
590 | } | 489 | } |
591 | 490 | ||
592 | if (cs_change) { | 491 | if (cs_change) { |
593 | mpc8xxx_spi_chipselect(spi, BITBANG_CS_ACTIVE); | 492 | fsl_spi_chipselect(spi, BITBANG_CS_ACTIVE); |
594 | ndelay(nsecs); | 493 | ndelay(nsecs); |
595 | } | 494 | } |
596 | cs_change = t->cs_change; | 495 | cs_change = t->cs_change; |
597 | if (t->len) | 496 | if (t->len) |
598 | status = mpc8xxx_spi_bufs(spi, t, m->is_dma_mapped); | 497 | status = fsl_spi_bufs(spi, t, m->is_dma_mapped); |
599 | if (status) { | 498 | if (status) { |
600 | status = -EMSGSIZE; | 499 | status = -EMSGSIZE; |
601 | break; | 500 | break; |
@@ -607,7 +506,7 @@ static void mpc8xxx_spi_do_one_msg(struct spi_message *m) | |||
607 | 506 | ||
608 | if (cs_change) { | 507 | if (cs_change) { |
609 | ndelay(nsecs); | 508 | ndelay(nsecs); |
610 | mpc8xxx_spi_chipselect(spi, BITBANG_CS_INACTIVE); | 509 | fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE); |
611 | ndelay(nsecs); | 510 | ndelay(nsecs); |
612 | } | 511 | } |
613 | } | 512 | } |
@@ -617,35 +516,16 @@ static void mpc8xxx_spi_do_one_msg(struct spi_message *m) | |||
617 | 516 | ||
618 | if (status || !cs_change) { | 517 | if (status || !cs_change) { |
619 | ndelay(nsecs); | 518 | ndelay(nsecs); |
620 | mpc8xxx_spi_chipselect(spi, BITBANG_CS_INACTIVE); | 519 | fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE); |
621 | } | 520 | } |
622 | 521 | ||
623 | mpc8xxx_spi_setup_transfer(spi, NULL); | 522 | fsl_spi_setup_transfer(spi, NULL); |
624 | } | ||
625 | |||
626 | static void mpc8xxx_spi_work(struct work_struct *work) | ||
627 | { | ||
628 | struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi, | ||
629 | work); | ||
630 | |||
631 | spin_lock_irq(&mpc8xxx_spi->lock); | ||
632 | while (!list_empty(&mpc8xxx_spi->queue)) { | ||
633 | struct spi_message *m = container_of(mpc8xxx_spi->queue.next, | ||
634 | struct spi_message, queue); | ||
635 | |||
636 | list_del_init(&m->queue); | ||
637 | spin_unlock_irq(&mpc8xxx_spi->lock); | ||
638 | |||
639 | mpc8xxx_spi_do_one_msg(m); | ||
640 | |||
641 | spin_lock_irq(&mpc8xxx_spi->lock); | ||
642 | } | ||
643 | spin_unlock_irq(&mpc8xxx_spi->lock); | ||
644 | } | 523 | } |
645 | 524 | ||
646 | static int mpc8xxx_spi_setup(struct spi_device *spi) | 525 | static int fsl_spi_setup(struct spi_device *spi) |
647 | { | 526 | { |
648 | struct mpc8xxx_spi *mpc8xxx_spi; | 527 | struct mpc8xxx_spi *mpc8xxx_spi; |
528 | struct fsl_spi_reg *reg_base; | ||
649 | int retval; | 529 | int retval; |
650 | u32 hw_mode; | 530 | u32 hw_mode; |
651 | struct spi_mpc8xxx_cs *cs = spi->controller_state; | 531 | struct spi_mpc8xxx_cs *cs = spi->controller_state; |
@@ -661,8 +541,10 @@ static int mpc8xxx_spi_setup(struct spi_device *spi) | |||
661 | } | 541 | } |
662 | mpc8xxx_spi = spi_master_get_devdata(spi->master); | 542 | mpc8xxx_spi = spi_master_get_devdata(spi->master); |
663 | 543 | ||
544 | reg_base = mpc8xxx_spi->reg_base; | ||
545 | |||
664 | hw_mode = cs->hw_mode; /* Save original settings */ | 546 | hw_mode = cs->hw_mode; /* Save original settings */ |
665 | cs->hw_mode = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode); | 547 | cs->hw_mode = mpc8xxx_spi_read_reg(®_base->mode); |
666 | /* mask out bits we are going to set */ | 548 | /* mask out bits we are going to set */ |
667 | cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH | 549 | cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH |
668 | | SPMODE_REV | SPMODE_LOOP); | 550 | | SPMODE_REV | SPMODE_LOOP); |
@@ -676,7 +558,7 @@ static int mpc8xxx_spi_setup(struct spi_device *spi) | |||
676 | if (spi->mode & SPI_LOOP) | 558 | if (spi->mode & SPI_LOOP) |
677 | cs->hw_mode |= SPMODE_LOOP; | 559 | cs->hw_mode |= SPMODE_LOOP; |
678 | 560 | ||
679 | retval = mpc8xxx_spi_setup_transfer(spi, NULL); | 561 | retval = fsl_spi_setup_transfer(spi, NULL); |
680 | if (retval < 0) { | 562 | if (retval < 0) { |
681 | cs->hw_mode = hw_mode; /* Restore settings */ | 563 | cs->hw_mode = hw_mode; /* Restore settings */ |
682 | return retval; | 564 | return retval; |
@@ -684,9 +566,10 @@ static int mpc8xxx_spi_setup(struct spi_device *spi) | |||
684 | return 0; | 566 | return 0; |
685 | } | 567 | } |
686 | 568 | ||
687 | static void mpc8xxx_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) | 569 | static void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) |
688 | { | 570 | { |
689 | u16 len; | 571 | u16 len; |
572 | struct fsl_spi_reg *reg_base = mspi->reg_base; | ||
690 | 573 | ||
691 | dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__, | 574 | dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__, |
692 | in_be16(&mspi->rx_bd->cbd_datlen), mspi->count); | 575 | in_be16(&mspi->rx_bd->cbd_datlen), mspi->count); |
@@ -698,20 +581,22 @@ static void mpc8xxx_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) | |||
698 | } | 581 | } |
699 | 582 | ||
700 | /* Clear the events */ | 583 | /* Clear the events */ |
701 | mpc8xxx_spi_write_reg(&mspi->base->event, events); | 584 | mpc8xxx_spi_write_reg(®_base->event, events); |
702 | 585 | ||
703 | mspi->count -= len; | 586 | mspi->count -= len; |
704 | if (mspi->count) | 587 | if (mspi->count) |
705 | mpc8xxx_spi_cpm_bufs_start(mspi); | 588 | fsl_spi_cpm_bufs_start(mspi); |
706 | else | 589 | else |
707 | complete(&mspi->done); | 590 | complete(&mspi->done); |
708 | } | 591 | } |
709 | 592 | ||
710 | static void mpc8xxx_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) | 593 | static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) |
711 | { | 594 | { |
595 | struct fsl_spi_reg *reg_base = mspi->reg_base; | ||
596 | |||
712 | /* We need handle RX first */ | 597 | /* We need handle RX first */ |
713 | if (events & SPIE_NE) { | 598 | if (events & SPIE_NE) { |
714 | u32 rx_data = mpc8xxx_spi_read_reg(&mspi->base->receive); | 599 | u32 rx_data = mpc8xxx_spi_read_reg(®_base->receive); |
715 | 600 | ||
716 | if (mspi->rx) | 601 | if (mspi->rx) |
717 | mspi->get_rx(rx_data, mspi); | 602 | mspi->get_rx(rx_data, mspi); |
@@ -720,102 +605,80 @@ static void mpc8xxx_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) | |||
720 | if ((events & SPIE_NF) == 0) | 605 | if ((events & SPIE_NF) == 0) |
721 | /* spin until TX is done */ | 606 | /* spin until TX is done */ |
722 | while (((events = | 607 | while (((events = |
723 | mpc8xxx_spi_read_reg(&mspi->base->event)) & | 608 | mpc8xxx_spi_read_reg(®_base->event)) & |
724 | SPIE_NF) == 0) | 609 | SPIE_NF) == 0) |
725 | cpu_relax(); | 610 | cpu_relax(); |
726 | 611 | ||
727 | /* Clear the events */ | 612 | /* Clear the events */ |
728 | mpc8xxx_spi_write_reg(&mspi->base->event, events); | 613 | mpc8xxx_spi_write_reg(®_base->event, events); |
729 | 614 | ||
730 | mspi->count -= 1; | 615 | mspi->count -= 1; |
731 | if (mspi->count) { | 616 | if (mspi->count) { |
732 | u32 word = mspi->get_tx(mspi); | 617 | u32 word = mspi->get_tx(mspi); |
733 | 618 | ||
734 | mpc8xxx_spi_write_reg(&mspi->base->transmit, word); | 619 | mpc8xxx_spi_write_reg(®_base->transmit, word); |
735 | } else { | 620 | } else { |
736 | complete(&mspi->done); | 621 | complete(&mspi->done); |
737 | } | 622 | } |
738 | } | 623 | } |
739 | 624 | ||
740 | static irqreturn_t mpc8xxx_spi_irq(s32 irq, void *context_data) | 625 | static irqreturn_t fsl_spi_irq(s32 irq, void *context_data) |
741 | { | 626 | { |
742 | struct mpc8xxx_spi *mspi = context_data; | 627 | struct mpc8xxx_spi *mspi = context_data; |
743 | irqreturn_t ret = IRQ_NONE; | 628 | irqreturn_t ret = IRQ_NONE; |
744 | u32 events; | 629 | u32 events; |
630 | struct fsl_spi_reg *reg_base = mspi->reg_base; | ||
745 | 631 | ||
746 | /* Get interrupt events(tx/rx) */ | 632 | /* Get interrupt events(tx/rx) */ |
747 | events = mpc8xxx_spi_read_reg(&mspi->base->event); | 633 | events = mpc8xxx_spi_read_reg(®_base->event); |
748 | if (events) | 634 | if (events) |
749 | ret = IRQ_HANDLED; | 635 | ret = IRQ_HANDLED; |
750 | 636 | ||
751 | dev_dbg(mspi->dev, "%s: events %x\n", __func__, events); | 637 | dev_dbg(mspi->dev, "%s: events %x\n", __func__, events); |
752 | 638 | ||
753 | if (mspi->flags & SPI_CPM_MODE) | 639 | if (mspi->flags & SPI_CPM_MODE) |
754 | mpc8xxx_spi_cpm_irq(mspi, events); | 640 | fsl_spi_cpm_irq(mspi, events); |
755 | else | 641 | else |
756 | mpc8xxx_spi_cpu_irq(mspi, events); | 642 | fsl_spi_cpu_irq(mspi, events); |
757 | 643 | ||
758 | return ret; | 644 | return ret; |
759 | } | 645 | } |
760 | 646 | ||
761 | static int mpc8xxx_spi_transfer(struct spi_device *spi, | 647 | static void *fsl_spi_alloc_dummy_rx(void) |
762 | struct spi_message *m) | ||
763 | { | 648 | { |
764 | struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); | 649 | mutex_lock(&fsl_dummy_rx_lock); |
765 | unsigned long flags; | ||
766 | 650 | ||
767 | m->actual_length = 0; | 651 | if (!fsl_dummy_rx) |
768 | m->status = -EINPROGRESS; | 652 | fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL); |
653 | if (fsl_dummy_rx) | ||
654 | fsl_dummy_rx_refcnt++; | ||
769 | 655 | ||
770 | spin_lock_irqsave(&mpc8xxx_spi->lock, flags); | 656 | mutex_unlock(&fsl_dummy_rx_lock); |
771 | list_add_tail(&m->queue, &mpc8xxx_spi->queue); | ||
772 | queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work); | ||
773 | spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags); | ||
774 | 657 | ||
775 | return 0; | 658 | return fsl_dummy_rx; |
776 | } | 659 | } |
777 | 660 | ||
778 | 661 | static void fsl_spi_free_dummy_rx(void) | |
779 | static void mpc8xxx_spi_cleanup(struct spi_device *spi) | ||
780 | { | 662 | { |
781 | kfree(spi->controller_state); | 663 | mutex_lock(&fsl_dummy_rx_lock); |
782 | } | ||
783 | 664 | ||
784 | static void *mpc8xxx_spi_alloc_dummy_rx(void) | 665 | switch (fsl_dummy_rx_refcnt) { |
785 | { | ||
786 | mutex_lock(&mpc8xxx_dummy_rx_lock); | ||
787 | |||
788 | if (!mpc8xxx_dummy_rx) | ||
789 | mpc8xxx_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL); | ||
790 | if (mpc8xxx_dummy_rx) | ||
791 | mpc8xxx_dummy_rx_refcnt++; | ||
792 | |||
793 | mutex_unlock(&mpc8xxx_dummy_rx_lock); | ||
794 | |||
795 | return mpc8xxx_dummy_rx; | ||
796 | } | ||
797 | |||
798 | static void mpc8xxx_spi_free_dummy_rx(void) | ||
799 | { | ||
800 | mutex_lock(&mpc8xxx_dummy_rx_lock); | ||
801 | |||
802 | switch (mpc8xxx_dummy_rx_refcnt) { | ||
803 | case 0: | 666 | case 0: |
804 | WARN_ON(1); | 667 | WARN_ON(1); |
805 | break; | 668 | break; |
806 | case 1: | 669 | case 1: |
807 | kfree(mpc8xxx_dummy_rx); | 670 | kfree(fsl_dummy_rx); |
808 | mpc8xxx_dummy_rx = NULL; | 671 | fsl_dummy_rx = NULL; |
809 | /* fall through */ | 672 | /* fall through */ |
810 | default: | 673 | default: |
811 | mpc8xxx_dummy_rx_refcnt--; | 674 | fsl_dummy_rx_refcnt--; |
812 | break; | 675 | break; |
813 | } | 676 | } |
814 | 677 | ||
815 | mutex_unlock(&mpc8xxx_dummy_rx_lock); | 678 | mutex_unlock(&fsl_dummy_rx_lock); |
816 | } | 679 | } |
817 | 680 | ||
818 | static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) | 681 | static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) |
819 | { | 682 | { |
820 | struct device *dev = mspi->dev; | 683 | struct device *dev = mspi->dev; |
821 | struct device_node *np = dev->of_node; | 684 | struct device_node *np = dev->of_node; |
@@ -869,7 +732,7 @@ static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) | |||
869 | return pram_ofs; | 732 | return pram_ofs; |
870 | } | 733 | } |
871 | 734 | ||
872 | static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi) | 735 | static int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) |
873 | { | 736 | { |
874 | struct device *dev = mspi->dev; | 737 | struct device *dev = mspi->dev; |
875 | struct device_node *np = dev->of_node; | 738 | struct device_node *np = dev->of_node; |
@@ -881,7 +744,7 @@ static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi) | |||
881 | if (!(mspi->flags & SPI_CPM_MODE)) | 744 | if (!(mspi->flags & SPI_CPM_MODE)) |
882 | return 0; | 745 | return 0; |
883 | 746 | ||
884 | if (!mpc8xxx_spi_alloc_dummy_rx()) | 747 | if (!fsl_spi_alloc_dummy_rx()) |
885 | return -ENOMEM; | 748 | return -ENOMEM; |
886 | 749 | ||
887 | if (mspi->flags & SPI_QE) { | 750 | if (mspi->flags & SPI_QE) { |
@@ -902,7 +765,7 @@ static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi) | |||
902 | } | 765 | } |
903 | } | 766 | } |
904 | 767 | ||
905 | pram_ofs = mpc8xxx_spi_cpm_get_pram(mspi); | 768 | pram_ofs = fsl_spi_cpm_get_pram(mspi); |
906 | if (IS_ERR_VALUE(pram_ofs)) { | 769 | if (IS_ERR_VALUE(pram_ofs)) { |
907 | dev_err(dev, "can't allocate spi parameter ram\n"); | 770 | dev_err(dev, "can't allocate spi parameter ram\n"); |
908 | goto err_pram; | 771 | goto err_pram; |
@@ -922,7 +785,7 @@ static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi) | |||
922 | goto err_dummy_tx; | 785 | goto err_dummy_tx; |
923 | } | 786 | } |
924 | 787 | ||
925 | mspi->dma_dummy_rx = dma_map_single(dev, mpc8xxx_dummy_rx, SPI_MRBLR, | 788 | mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR, |
926 | DMA_FROM_DEVICE); | 789 | DMA_FROM_DEVICE); |
927 | if (dma_mapping_error(dev, mspi->dma_dummy_rx)) { | 790 | if (dma_mapping_error(dev, mspi->dma_dummy_rx)) { |
928 | dev_err(dev, "unable to map dummy rx buffer\n"); | 791 | dev_err(dev, "unable to map dummy rx buffer\n"); |
@@ -960,11 +823,11 @@ err_dummy_tx: | |||
960 | err_bds: | 823 | err_bds: |
961 | cpm_muram_free(pram_ofs); | 824 | cpm_muram_free(pram_ofs); |
962 | err_pram: | 825 | err_pram: |
963 | mpc8xxx_spi_free_dummy_rx(); | 826 | fsl_spi_free_dummy_rx(); |
964 | return -ENOMEM; | 827 | return -ENOMEM; |
965 | } | 828 | } |
966 | 829 | ||
967 | static void mpc8xxx_spi_cpm_free(struct mpc8xxx_spi *mspi) | 830 | static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) |
968 | { | 831 | { |
969 | struct device *dev = mspi->dev; | 832 | struct device *dev = mspi->dev; |
970 | 833 | ||
@@ -972,30 +835,22 @@ static void mpc8xxx_spi_cpm_free(struct mpc8xxx_spi *mspi) | |||
972 | dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); | 835 | dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); |
973 | cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); | 836 | cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); |
974 | cpm_muram_free(cpm_muram_offset(mspi->pram)); | 837 | cpm_muram_free(cpm_muram_offset(mspi->pram)); |
975 | mpc8xxx_spi_free_dummy_rx(); | 838 | fsl_spi_free_dummy_rx(); |
976 | } | 839 | } |
977 | 840 | ||
978 | static const char *mpc8xxx_spi_strmode(unsigned int flags) | 841 | static void fsl_spi_remove(struct mpc8xxx_spi *mspi) |
979 | { | 842 | { |
980 | if (flags & SPI_QE_CPU_MODE) { | 843 | iounmap(mspi->reg_base); |
981 | return "QE CPU"; | 844 | fsl_spi_cpm_free(mspi); |
982 | } else if (flags & SPI_CPM_MODE) { | ||
983 | if (flags & SPI_QE) | ||
984 | return "QE"; | ||
985 | else if (flags & SPI_CPM2) | ||
986 | return "CPM2"; | ||
987 | else | ||
988 | return "CPM1"; | ||
989 | } | ||
990 | return "CPU"; | ||
991 | } | 845 | } |
992 | 846 | ||
993 | static struct spi_master * __devinit | 847 | static struct spi_master * __devinit fsl_spi_probe(struct device *dev, |
994 | mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) | 848 | struct resource *mem, unsigned int irq) |
995 | { | 849 | { |
996 | struct fsl_spi_platform_data *pdata = dev->platform_data; | 850 | struct fsl_spi_platform_data *pdata = dev->platform_data; |
997 | struct spi_master *master; | 851 | struct spi_master *master; |
998 | struct mpc8xxx_spi *mpc8xxx_spi; | 852 | struct mpc8xxx_spi *mpc8xxx_spi; |
853 | struct fsl_spi_reg *reg_base; | ||
999 | u32 regval; | 854 | u32 regval; |
1000 | int ret = 0; | 855 | int ret = 0; |
1001 | 856 | ||
@@ -1007,132 +862,77 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) | |||
1007 | 862 | ||
1008 | dev_set_drvdata(dev, master); | 863 | dev_set_drvdata(dev, master); |
1009 | 864 | ||
1010 | /* the spi->mode bits understood by this driver: */ | 865 | ret = mpc8xxx_spi_probe(dev, mem, irq); |
1011 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | 866 | if (ret) |
1012 | | SPI_LSB_FIRST | SPI_LOOP; | 867 | goto err_probe; |
1013 | 868 | ||
1014 | master->setup = mpc8xxx_spi_setup; | 869 | master->setup = fsl_spi_setup; |
1015 | master->transfer = mpc8xxx_spi_transfer; | ||
1016 | master->cleanup = mpc8xxx_spi_cleanup; | ||
1017 | master->dev.of_node = dev->of_node; | ||
1018 | 870 | ||
1019 | mpc8xxx_spi = spi_master_get_devdata(master); | 871 | mpc8xxx_spi = spi_master_get_devdata(master); |
1020 | mpc8xxx_spi->dev = dev; | 872 | mpc8xxx_spi->spi_do_one_msg = fsl_spi_do_one_msg; |
1021 | mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8; | 873 | mpc8xxx_spi->spi_remove = fsl_spi_remove; |
1022 | mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8; | 874 | |
1023 | mpc8xxx_spi->flags = pdata->flags; | ||
1024 | mpc8xxx_spi->spibrg = pdata->sysclk; | ||
1025 | 875 | ||
1026 | ret = mpc8xxx_spi_cpm_init(mpc8xxx_spi); | 876 | ret = fsl_spi_cpm_init(mpc8xxx_spi); |
1027 | if (ret) | 877 | if (ret) |
1028 | goto err_cpm_init; | 878 | goto err_cpm_init; |
1029 | 879 | ||
1030 | mpc8xxx_spi->rx_shift = 0; | ||
1031 | mpc8xxx_spi->tx_shift = 0; | ||
1032 | if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { | 880 | if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { |
1033 | mpc8xxx_spi->rx_shift = 16; | 881 | mpc8xxx_spi->rx_shift = 16; |
1034 | mpc8xxx_spi->tx_shift = 24; | 882 | mpc8xxx_spi->tx_shift = 24; |
1035 | } | 883 | } |
1036 | 884 | ||
1037 | init_completion(&mpc8xxx_spi->done); | 885 | mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); |
1038 | 886 | if (mpc8xxx_spi->reg_base == NULL) { | |
1039 | mpc8xxx_spi->base = ioremap(mem->start, resource_size(mem)); | ||
1040 | if (mpc8xxx_spi->base == NULL) { | ||
1041 | ret = -ENOMEM; | 887 | ret = -ENOMEM; |
1042 | goto err_ioremap; | 888 | goto err_ioremap; |
1043 | } | 889 | } |
1044 | 890 | ||
1045 | mpc8xxx_spi->irq = irq; | ||
1046 | |||
1047 | /* Register for SPI Interrupt */ | 891 | /* Register for SPI Interrupt */ |
1048 | ret = request_irq(mpc8xxx_spi->irq, mpc8xxx_spi_irq, | 892 | ret = request_irq(mpc8xxx_spi->irq, fsl_spi_irq, |
1049 | 0, "mpc8xxx_spi", mpc8xxx_spi); | 893 | 0, "fsl_spi", mpc8xxx_spi); |
1050 | 894 | ||
1051 | if (ret != 0) | 895 | if (ret != 0) |
1052 | goto unmap_io; | 896 | goto free_irq; |
1053 | 897 | ||
1054 | master->bus_num = pdata->bus_num; | 898 | reg_base = mpc8xxx_spi->reg_base; |
1055 | master->num_chipselect = pdata->max_chipselect; | ||
1056 | 899 | ||
1057 | /* SPI controller initializations */ | 900 | /* SPI controller initializations */ |
1058 | mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, 0); | 901 | mpc8xxx_spi_write_reg(®_base->mode, 0); |
1059 | mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0); | 902 | mpc8xxx_spi_write_reg(®_base->mask, 0); |
1060 | mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->command, 0); | 903 | mpc8xxx_spi_write_reg(®_base->command, 0); |
1061 | mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->event, 0xffffffff); | 904 | mpc8xxx_spi_write_reg(®_base->event, 0xffffffff); |
1062 | 905 | ||
1063 | /* Enable SPI interface */ | 906 | /* Enable SPI interface */ |
1064 | regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; | 907 | regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; |
1065 | if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) | 908 | if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) |
1066 | regval |= SPMODE_OP; | 909 | regval |= SPMODE_OP; |
1067 | 910 | ||
1068 | mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, regval); | 911 | mpc8xxx_spi_write_reg(®_base->mode, regval); |
1069 | spin_lock_init(&mpc8xxx_spi->lock); | ||
1070 | init_completion(&mpc8xxx_spi->done); | ||
1071 | INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work); | ||
1072 | INIT_LIST_HEAD(&mpc8xxx_spi->queue); | ||
1073 | |||
1074 | mpc8xxx_spi->workqueue = create_singlethread_workqueue( | ||
1075 | dev_name(master->dev.parent)); | ||
1076 | if (mpc8xxx_spi->workqueue == NULL) { | ||
1077 | ret = -EBUSY; | ||
1078 | goto free_irq; | ||
1079 | } | ||
1080 | 912 | ||
1081 | ret = spi_register_master(master); | 913 | ret = spi_register_master(master); |
1082 | if (ret < 0) | 914 | if (ret < 0) |
1083 | goto unreg_master; | 915 | goto unreg_master; |
1084 | 916 | ||
1085 | dev_info(dev, "at 0x%p (irq = %d), %s mode\n", mpc8xxx_spi->base, | 917 | dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base, |
1086 | mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags)); | 918 | mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags)); |
1087 | 919 | ||
1088 | return master; | 920 | return master; |
1089 | 921 | ||
1090 | unreg_master: | 922 | unreg_master: |
1091 | destroy_workqueue(mpc8xxx_spi->workqueue); | ||
1092 | free_irq: | ||
1093 | free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); | 923 | free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); |
1094 | unmap_io: | 924 | free_irq: |
1095 | iounmap(mpc8xxx_spi->base); | 925 | iounmap(mpc8xxx_spi->reg_base); |
1096 | err_ioremap: | 926 | err_ioremap: |
1097 | mpc8xxx_spi_cpm_free(mpc8xxx_spi); | 927 | fsl_spi_cpm_free(mpc8xxx_spi); |
1098 | err_cpm_init: | 928 | err_cpm_init: |
929 | err_probe: | ||
1099 | spi_master_put(master); | 930 | spi_master_put(master); |
1100 | err: | 931 | err: |
1101 | return ERR_PTR(ret); | 932 | return ERR_PTR(ret); |
1102 | } | 933 | } |
1103 | 934 | ||
1104 | static int __devexit mpc8xxx_spi_remove(struct device *dev) | 935 | static void fsl_spi_cs_control(struct spi_device *spi, bool on) |
1105 | { | ||
1106 | struct mpc8xxx_spi *mpc8xxx_spi; | ||
1107 | struct spi_master *master; | ||
1108 | |||
1109 | master = dev_get_drvdata(dev); | ||
1110 | mpc8xxx_spi = spi_master_get_devdata(master); | ||
1111 | |||
1112 | flush_workqueue(mpc8xxx_spi->workqueue); | ||
1113 | destroy_workqueue(mpc8xxx_spi->workqueue); | ||
1114 | spi_unregister_master(master); | ||
1115 | |||
1116 | free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); | ||
1117 | iounmap(mpc8xxx_spi->base); | ||
1118 | mpc8xxx_spi_cpm_free(mpc8xxx_spi); | ||
1119 | |||
1120 | return 0; | ||
1121 | } | ||
1122 | |||
1123 | struct mpc8xxx_spi_probe_info { | ||
1124 | struct fsl_spi_platform_data pdata; | ||
1125 | int *gpios; | ||
1126 | bool *alow_flags; | ||
1127 | }; | ||
1128 | |||
1129 | static struct mpc8xxx_spi_probe_info * | ||
1130 | to_of_pinfo(struct fsl_spi_platform_data *pdata) | ||
1131 | { | ||
1132 | return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); | ||
1133 | } | ||
1134 | |||
1135 | static void mpc8xxx_spi_cs_control(struct spi_device *spi, bool on) | ||
1136 | { | 936 | { |
1137 | struct device *dev = spi->dev.parent; | 937 | struct device *dev = spi->dev.parent; |
1138 | struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data); | 938 | struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data); |
@@ -1143,7 +943,7 @@ static void mpc8xxx_spi_cs_control(struct spi_device *spi, bool on) | |||
1143 | gpio_set_value(gpio, on ^ alow); | 943 | gpio_set_value(gpio, on ^ alow); |
1144 | } | 944 | } |
1145 | 945 | ||
1146 | static int of_mpc8xxx_spi_get_chipselects(struct device *dev) | 946 | static int of_fsl_spi_get_chipselects(struct device *dev) |
1147 | { | 947 | { |
1148 | struct device_node *np = dev->of_node; | 948 | struct device_node *np = dev->of_node; |
1149 | struct fsl_spi_platform_data *pdata = dev->platform_data; | 949 | struct fsl_spi_platform_data *pdata = dev->platform_data; |
@@ -1204,7 +1004,7 @@ static int of_mpc8xxx_spi_get_chipselects(struct device *dev) | |||
1204 | } | 1004 | } |
1205 | 1005 | ||
1206 | pdata->max_chipselect = ngpios; | 1006 | pdata->max_chipselect = ngpios; |
1207 | pdata->cs_control = mpc8xxx_spi_cs_control; | 1007 | pdata->cs_control = fsl_spi_cs_control; |
1208 | 1008 | ||
1209 | return 0; | 1009 | return 0; |
1210 | 1010 | ||
@@ -1223,7 +1023,7 @@ err_alloc_flags: | |||
1223 | return ret; | 1023 | return ret; |
1224 | } | 1024 | } |
1225 | 1025 | ||
1226 | static int of_mpc8xxx_spi_free_chipselects(struct device *dev) | 1026 | static int of_fsl_spi_free_chipselects(struct device *dev) |
1227 | { | 1027 | { |
1228 | struct fsl_spi_platform_data *pdata = dev->platform_data; | 1028 | struct fsl_spi_platform_data *pdata = dev->platform_data; |
1229 | struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); | 1029 | struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); |
@@ -1242,50 +1042,20 @@ static int of_mpc8xxx_spi_free_chipselects(struct device *dev) | |||
1242 | return 0; | 1042 | return 0; |
1243 | } | 1043 | } |
1244 | 1044 | ||
1245 | static int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev, | 1045 | static int __devinit of_fsl_spi_probe(struct platform_device *ofdev) |
1246 | const struct of_device_id *ofid) | ||
1247 | { | 1046 | { |
1248 | struct device *dev = &ofdev->dev; | 1047 | struct device *dev = &ofdev->dev; |
1249 | struct device_node *np = ofdev->dev.of_node; | 1048 | struct device_node *np = ofdev->dev.of_node; |
1250 | struct mpc8xxx_spi_probe_info *pinfo; | ||
1251 | struct fsl_spi_platform_data *pdata; | ||
1252 | struct spi_master *master; | 1049 | struct spi_master *master; |
1253 | struct resource mem; | 1050 | struct resource mem; |
1254 | struct resource irq; | 1051 | struct resource irq; |
1255 | const void *prop; | ||
1256 | int ret = -ENOMEM; | 1052 | int ret = -ENOMEM; |
1257 | 1053 | ||
1258 | pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL); | 1054 | ret = of_mpc8xxx_spi_probe(ofdev); |
1259 | if (!pinfo) | 1055 | if (ret) |
1260 | return -ENOMEM; | 1056 | return ret; |
1261 | |||
1262 | pdata = &pinfo->pdata; | ||
1263 | dev->platform_data = pdata; | ||
1264 | |||
1265 | /* Allocate bus num dynamically. */ | ||
1266 | pdata->bus_num = -1; | ||
1267 | |||
1268 | /* SPI controller is either clocked from QE or SoC clock. */ | ||
1269 | pdata->sysclk = get_brgfreq(); | ||
1270 | if (pdata->sysclk == -1) { | ||
1271 | pdata->sysclk = fsl_get_sys_freq(); | ||
1272 | if (pdata->sysclk == -1) { | ||
1273 | ret = -ENODEV; | ||
1274 | goto err_clk; | ||
1275 | } | ||
1276 | } | ||
1277 | 1057 | ||
1278 | prop = of_get_property(np, "mode", NULL); | 1058 | ret = of_fsl_spi_get_chipselects(dev); |
1279 | if (prop && !strcmp(prop, "cpu-qe")) | ||
1280 | pdata->flags = SPI_QE_CPU_MODE; | ||
1281 | else if (prop && !strcmp(prop, "qe")) | ||
1282 | pdata->flags = SPI_CPM_MODE | SPI_QE; | ||
1283 | else if (of_device_is_compatible(np, "fsl,cpm2-spi")) | ||
1284 | pdata->flags = SPI_CPM_MODE | SPI_CPM2; | ||
1285 | else if (of_device_is_compatible(np, "fsl,cpm1-spi")) | ||
1286 | pdata->flags = SPI_CPM_MODE | SPI_CPM1; | ||
1287 | |||
1288 | ret = of_mpc8xxx_spi_get_chipselects(dev); | ||
1289 | if (ret) | 1059 | if (ret) |
1290 | goto err; | 1060 | goto err; |
1291 | 1061 | ||
@@ -1299,7 +1069,7 @@ static int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev, | |||
1299 | goto err; | 1069 | goto err; |
1300 | } | 1070 | } |
1301 | 1071 | ||
1302 | master = mpc8xxx_spi_probe(dev, &mem, irq.start); | 1072 | master = fsl_spi_probe(dev, &mem, irq.start); |
1303 | if (IS_ERR(master)) { | 1073 | if (IS_ERR(master)) { |
1304 | ret = PTR_ERR(master); | 1074 | ret = PTR_ERR(master); |
1305 | goto err; | 1075 | goto err; |
@@ -1308,42 +1078,40 @@ static int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev, | |||
1308 | return 0; | 1078 | return 0; |
1309 | 1079 | ||
1310 | err: | 1080 | err: |
1311 | of_mpc8xxx_spi_free_chipselects(dev); | 1081 | of_fsl_spi_free_chipselects(dev); |
1312 | err_clk: | ||
1313 | kfree(pinfo); | ||
1314 | return ret; | 1082 | return ret; |
1315 | } | 1083 | } |
1316 | 1084 | ||
1317 | static int __devexit of_mpc8xxx_spi_remove(struct platform_device *ofdev) | 1085 | static int __devexit of_fsl_spi_remove(struct platform_device *ofdev) |
1318 | { | 1086 | { |
1319 | int ret; | 1087 | int ret; |
1320 | 1088 | ||
1321 | ret = mpc8xxx_spi_remove(&ofdev->dev); | 1089 | ret = mpc8xxx_spi_remove(&ofdev->dev); |
1322 | if (ret) | 1090 | if (ret) |
1323 | return ret; | 1091 | return ret; |
1324 | of_mpc8xxx_spi_free_chipselects(&ofdev->dev); | 1092 | of_fsl_spi_free_chipselects(&ofdev->dev); |
1325 | return 0; | 1093 | return 0; |
1326 | } | 1094 | } |
1327 | 1095 | ||
1328 | static const struct of_device_id of_mpc8xxx_spi_match[] = { | 1096 | static const struct of_device_id of_fsl_spi_match[] = { |
1329 | { .compatible = "fsl,spi" }, | 1097 | { .compatible = "fsl,spi" }, |
1330 | {}, | 1098 | {} |
1331 | }; | 1099 | }; |
1332 | MODULE_DEVICE_TABLE(of, of_mpc8xxx_spi_match); | 1100 | MODULE_DEVICE_TABLE(of, of_fsl_spi_match); |
1333 | 1101 | ||
1334 | static struct of_platform_driver of_mpc8xxx_spi_driver = { | 1102 | static struct platform_driver of_fsl_spi_driver = { |
1335 | .driver = { | 1103 | .driver = { |
1336 | .name = "mpc8xxx_spi", | 1104 | .name = "fsl_spi", |
1337 | .owner = THIS_MODULE, | 1105 | .owner = THIS_MODULE, |
1338 | .of_match_table = of_mpc8xxx_spi_match, | 1106 | .of_match_table = of_fsl_spi_match, |
1339 | }, | 1107 | }, |
1340 | .probe = of_mpc8xxx_spi_probe, | 1108 | .probe = of_fsl_spi_probe, |
1341 | .remove = __devexit_p(of_mpc8xxx_spi_remove), | 1109 | .remove = __devexit_p(of_fsl_spi_remove), |
1342 | }; | 1110 | }; |
1343 | 1111 | ||
1344 | #ifdef CONFIG_MPC832x_RDB | 1112 | #ifdef CONFIG_MPC832x_RDB |
1345 | /* | 1113 | /* |
1346 | * XXX XXX XXX | 1114 | * XXX XXX XXX |
1347 | * This is "legacy" platform driver, was used by the MPC8323E-RDB boards | 1115 | * This is "legacy" platform driver, was used by the MPC8323E-RDB boards |
1348 | * only. The driver should go away soon, since newer MPC8323E-RDB's device | 1116 | * only. The driver should go away soon, since newer MPC8323E-RDB's device |
1349 | * tree can work with OpenFirmware driver. But for now we support old trees | 1117 | * tree can work with OpenFirmware driver. But for now we support old trees |
@@ -1366,7 +1134,7 @@ static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev) | |||
1366 | if (irq <= 0) | 1134 | if (irq <= 0) |
1367 | return -EINVAL; | 1135 | return -EINVAL; |
1368 | 1136 | ||
1369 | master = mpc8xxx_spi_probe(&pdev->dev, mem, irq); | 1137 | master = fsl_spi_probe(&pdev->dev, mem, irq); |
1370 | if (IS_ERR(master)) | 1138 | if (IS_ERR(master)) |
1371 | return PTR_ERR(master); | 1139 | return PTR_ERR(master); |
1372 | return 0; | 1140 | return 0; |
@@ -1405,21 +1173,20 @@ static void __init legacy_driver_register(void) {} | |||
1405 | static void __exit legacy_driver_unregister(void) {} | 1173 | static void __exit legacy_driver_unregister(void) {} |
1406 | #endif /* CONFIG_MPC832x_RDB */ | 1174 | #endif /* CONFIG_MPC832x_RDB */ |
1407 | 1175 | ||
1408 | static int __init mpc8xxx_spi_init(void) | 1176 | static int __init fsl_spi_init(void) |
1409 | { | 1177 | { |
1410 | legacy_driver_register(); | 1178 | legacy_driver_register(); |
1411 | return of_register_platform_driver(&of_mpc8xxx_spi_driver); | 1179 | return platform_driver_register(&of_fsl_spi_driver); |
1412 | } | 1180 | } |
1181 | module_init(fsl_spi_init); | ||
1413 | 1182 | ||
1414 | static void __exit mpc8xxx_spi_exit(void) | 1183 | static void __exit fsl_spi_exit(void) |
1415 | { | 1184 | { |
1416 | of_unregister_platform_driver(&of_mpc8xxx_spi_driver); | 1185 | platform_driver_unregister(&of_fsl_spi_driver); |
1417 | legacy_driver_unregister(); | 1186 | legacy_driver_unregister(); |
1418 | } | 1187 | } |
1419 | 1188 | module_exit(fsl_spi_exit); | |
1420 | module_init(mpc8xxx_spi_init); | ||
1421 | module_exit(mpc8xxx_spi_exit); | ||
1422 | 1189 | ||
1423 | MODULE_AUTHOR("Kumar Gala"); | 1190 | MODULE_AUTHOR("Kumar Gala"); |
1424 | MODULE_DESCRIPTION("Simple MPC8xxx SPI Driver"); | 1191 | MODULE_DESCRIPTION("Simple Freescale SPI Driver"); |
1425 | MODULE_LICENSE("GPL"); | 1192 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c index 7972e9077473..69d6dba67c19 100644 --- a/drivers/spi/spi_imx.c +++ b/drivers/spi/spi_imx.c | |||
@@ -56,7 +56,27 @@ struct spi_imx_config { | |||
56 | unsigned int speed_hz; | 56 | unsigned int speed_hz; |
57 | unsigned int bpw; | 57 | unsigned int bpw; |
58 | unsigned int mode; | 58 | unsigned int mode; |
59 | int cs; | 59 | u8 cs; |
60 | }; | ||
61 | |||
62 | enum spi_imx_devtype { | ||
63 | SPI_IMX_VER_IMX1, | ||
64 | SPI_IMX_VER_0_0, | ||
65 | SPI_IMX_VER_0_4, | ||
66 | SPI_IMX_VER_0_5, | ||
67 | SPI_IMX_VER_0_7, | ||
68 | SPI_IMX_VER_2_3, | ||
69 | }; | ||
70 | |||
71 | struct spi_imx_data; | ||
72 | |||
73 | struct spi_imx_devtype_data { | ||
74 | void (*intctrl)(struct spi_imx_data *, int); | ||
75 | int (*config)(struct spi_imx_data *, struct spi_imx_config *); | ||
76 | void (*trigger)(struct spi_imx_data *); | ||
77 | int (*rx_available)(struct spi_imx_data *); | ||
78 | void (*reset)(struct spi_imx_data *); | ||
79 | unsigned int fifosize; | ||
60 | }; | 80 | }; |
61 | 81 | ||
62 | struct spi_imx_data { | 82 | struct spi_imx_data { |
@@ -76,11 +96,7 @@ struct spi_imx_data { | |||
76 | const void *tx_buf; | 96 | const void *tx_buf; |
77 | unsigned int txfifo; /* number of words pushed in tx FIFO */ | 97 | unsigned int txfifo; /* number of words pushed in tx FIFO */ |
78 | 98 | ||
79 | /* SoC specific functions */ | 99 | struct spi_imx_devtype_data devtype_data; |
80 | void (*intctrl)(struct spi_imx_data *, int); | ||
81 | int (*config)(struct spi_imx_data *, struct spi_imx_config *); | ||
82 | void (*trigger)(struct spi_imx_data *); | ||
83 | int (*rx_available)(struct spi_imx_data *); | ||
84 | }; | 100 | }; |
85 | 101 | ||
86 | #define MXC_SPI_BUF_RX(type) \ | 102 | #define MXC_SPI_BUF_RX(type) \ |
@@ -140,7 +156,7 @@ static unsigned int spi_imx_clkdiv_1(unsigned int fin, | |||
140 | return max; | 156 | return max; |
141 | } | 157 | } |
142 | 158 | ||
143 | /* MX1, MX31, MX35 */ | 159 | /* MX1, MX31, MX35, MX51 CSPI */ |
144 | static unsigned int spi_imx_clkdiv_2(unsigned int fin, | 160 | static unsigned int spi_imx_clkdiv_2(unsigned int fin, |
145 | unsigned int fspi) | 161 | unsigned int fspi) |
146 | { | 162 | { |
@@ -155,6 +171,134 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin, | |||
155 | return 7; | 171 | return 7; |
156 | } | 172 | } |
157 | 173 | ||
174 | #define SPI_IMX2_3_CTRL 0x08 | ||
175 | #define SPI_IMX2_3_CTRL_ENABLE (1 << 0) | ||
176 | #define SPI_IMX2_3_CTRL_XCH (1 << 2) | ||
177 | #define SPI_IMX2_3_CTRL_MODE_MASK (0xf << 4) | ||
178 | #define SPI_IMX2_3_CTRL_POSTDIV_OFFSET 8 | ||
179 | #define SPI_IMX2_3_CTRL_PREDIV_OFFSET 12 | ||
180 | #define SPI_IMX2_3_CTRL_CS(cs) ((cs) << 18) | ||
181 | #define SPI_IMX2_3_CTRL_BL_OFFSET 20 | ||
182 | |||
183 | #define SPI_IMX2_3_CONFIG 0x0c | ||
184 | #define SPI_IMX2_3_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0)) | ||
185 | #define SPI_IMX2_3_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4)) | ||
186 | #define SPI_IMX2_3_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8)) | ||
187 | #define SPI_IMX2_3_CONFIG_SSBPOL(cs) (1 << ((cs) + 12)) | ||
188 | |||
189 | #define SPI_IMX2_3_INT 0x10 | ||
190 | #define SPI_IMX2_3_INT_TEEN (1 << 0) | ||
191 | #define SPI_IMX2_3_INT_RREN (1 << 3) | ||
192 | |||
193 | #define SPI_IMX2_3_STAT 0x18 | ||
194 | #define SPI_IMX2_3_STAT_RR (1 << 3) | ||
195 | |||
196 | /* MX51 eCSPI */ | ||
197 | static unsigned int spi_imx2_3_clkdiv(unsigned int fin, unsigned int fspi) | ||
198 | { | ||
199 | /* | ||
200 | * there are two 4-bit dividers, the pre-divider divides by | ||
201 | * $pre, the post-divider by 2^$post | ||
202 | */ | ||
203 | unsigned int pre, post; | ||
204 | |||
205 | if (unlikely(fspi > fin)) | ||
206 | return 0; | ||
207 | |||
208 | post = fls(fin) - fls(fspi); | ||
209 | if (fin > fspi << post) | ||
210 | post++; | ||
211 | |||
212 | /* now we have: (fin <= fspi << post) with post being minimal */ | ||
213 | |||
214 | post = max(4U, post) - 4; | ||
215 | if (unlikely(post > 0xf)) { | ||
216 | pr_err("%s: cannot set clock freq: %u (base freq: %u)\n", | ||
217 | __func__, fspi, fin); | ||
218 | return 0xff; | ||
219 | } | ||
220 | |||
221 | pre = DIV_ROUND_UP(fin, fspi << post) - 1; | ||
222 | |||
223 | pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n", | ||
224 | __func__, fin, fspi, post, pre); | ||
225 | return (pre << SPI_IMX2_3_CTRL_PREDIV_OFFSET) | | ||
226 | (post << SPI_IMX2_3_CTRL_POSTDIV_OFFSET); | ||
227 | } | ||
228 | |||
229 | static void __maybe_unused spi_imx2_3_intctrl(struct spi_imx_data *spi_imx, int enable) | ||
230 | { | ||
231 | unsigned val = 0; | ||
232 | |||
233 | if (enable & MXC_INT_TE) | ||
234 | val |= SPI_IMX2_3_INT_TEEN; | ||
235 | |||
236 | if (enable & MXC_INT_RR) | ||
237 | val |= SPI_IMX2_3_INT_RREN; | ||
238 | |||
239 | writel(val, spi_imx->base + SPI_IMX2_3_INT); | ||
240 | } | ||
241 | |||
242 | static void __maybe_unused spi_imx2_3_trigger(struct spi_imx_data *spi_imx) | ||
243 | { | ||
244 | u32 reg; | ||
245 | |||
246 | reg = readl(spi_imx->base + SPI_IMX2_3_CTRL); | ||
247 | reg |= SPI_IMX2_3_CTRL_XCH; | ||
248 | writel(reg, spi_imx->base + SPI_IMX2_3_CTRL); | ||
249 | } | ||
250 | |||
251 | static int __maybe_unused spi_imx2_3_config(struct spi_imx_data *spi_imx, | ||
252 | struct spi_imx_config *config) | ||
253 | { | ||
254 | u32 ctrl = SPI_IMX2_3_CTRL_ENABLE, cfg = 0; | ||
255 | |||
256 | /* | ||
257 | * The hardware seems to have a race condition when changing modes. The | ||
258 | * current assumption is that the selection of the channel arrives | ||
259 | * earlier in the hardware than the mode bits when they are written at | ||
260 | * the same time. | ||
261 | * So set master mode for all channels as we do not support slave mode. | ||
262 | */ | ||
263 | ctrl |= SPI_IMX2_3_CTRL_MODE_MASK; | ||
264 | |||
265 | /* set clock speed */ | ||
266 | ctrl |= spi_imx2_3_clkdiv(spi_imx->spi_clk, config->speed_hz); | ||
267 | |||
268 | /* set chip select to use */ | ||
269 | ctrl |= SPI_IMX2_3_CTRL_CS(config->cs); | ||
270 | |||
271 | ctrl |= (config->bpw - 1) << SPI_IMX2_3_CTRL_BL_OFFSET; | ||
272 | |||
273 | cfg |= SPI_IMX2_3_CONFIG_SBBCTRL(config->cs); | ||
274 | |||
275 | if (config->mode & SPI_CPHA) | ||
276 | cfg |= SPI_IMX2_3_CONFIG_SCLKPHA(config->cs); | ||
277 | |||
278 | if (config->mode & SPI_CPOL) | ||
279 | cfg |= SPI_IMX2_3_CONFIG_SCLKPOL(config->cs); | ||
280 | |||
281 | if (config->mode & SPI_CS_HIGH) | ||
282 | cfg |= SPI_IMX2_3_CONFIG_SSBPOL(config->cs); | ||
283 | |||
284 | writel(ctrl, spi_imx->base + SPI_IMX2_3_CTRL); | ||
285 | writel(cfg, spi_imx->base + SPI_IMX2_3_CONFIG); | ||
286 | |||
287 | return 0; | ||
288 | } | ||
289 | |||
290 | static int __maybe_unused spi_imx2_3_rx_available(struct spi_imx_data *spi_imx) | ||
291 | { | ||
292 | return readl(spi_imx->base + SPI_IMX2_3_STAT) & SPI_IMX2_3_STAT_RR; | ||
293 | } | ||
294 | |||
295 | static void __maybe_unused spi_imx2_3_reset(struct spi_imx_data *spi_imx) | ||
296 | { | ||
297 | /* drain receive buffer */ | ||
298 | while (spi_imx2_3_rx_available(spi_imx)) | ||
299 | readl(spi_imx->base + MXC_CSPIRXDATA); | ||
300 | } | ||
301 | |||
158 | #define MX31_INTREG_TEEN (1 << 0) | 302 | #define MX31_INTREG_TEEN (1 << 0) |
159 | #define MX31_INTREG_RREN (1 << 3) | 303 | #define MX31_INTREG_RREN (1 << 3) |
160 | 304 | ||
@@ -178,7 +322,7 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin, | |||
178 | * the i.MX35 has a slightly different register layout for bits | 322 | * the i.MX35 has a slightly different register layout for bits |
179 | * we do not use here. | 323 | * we do not use here. |
180 | */ | 324 | */ |
181 | static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable) | 325 | static void __maybe_unused mx31_intctrl(struct spi_imx_data *spi_imx, int enable) |
182 | { | 326 | { |
183 | unsigned int val = 0; | 327 | unsigned int val = 0; |
184 | 328 | ||
@@ -190,7 +334,7 @@ static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable) | |||
190 | writel(val, spi_imx->base + MXC_CSPIINT); | 334 | writel(val, spi_imx->base + MXC_CSPIINT); |
191 | } | 335 | } |
192 | 336 | ||
193 | static void mx31_trigger(struct spi_imx_data *spi_imx) | 337 | static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx) |
194 | { | 338 | { |
195 | unsigned int reg; | 339 | unsigned int reg; |
196 | 340 | ||
@@ -199,20 +343,16 @@ static void mx31_trigger(struct spi_imx_data *spi_imx) | |||
199 | writel(reg, spi_imx->base + MXC_CSPICTRL); | 343 | writel(reg, spi_imx->base + MXC_CSPICTRL); |
200 | } | 344 | } |
201 | 345 | ||
202 | static int mx31_config(struct spi_imx_data *spi_imx, | 346 | static int __maybe_unused spi_imx0_4_config(struct spi_imx_data *spi_imx, |
203 | struct spi_imx_config *config) | 347 | struct spi_imx_config *config) |
204 | { | 348 | { |
205 | unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; | 349 | unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; |
350 | int cs = spi_imx->chipselect[config->cs]; | ||
206 | 351 | ||
207 | reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << | 352 | reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << |
208 | MX31_CSPICTRL_DR_SHIFT; | 353 | MX31_CSPICTRL_DR_SHIFT; |
209 | 354 | ||
210 | if (cpu_is_mx31()) | 355 | reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT; |
211 | reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT; | ||
212 | else if (cpu_is_mx25() || cpu_is_mx35()) { | ||
213 | reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT; | ||
214 | reg |= MX31_CSPICTRL_SSCTL; | ||
215 | } | ||
216 | 356 | ||
217 | if (config->mode & SPI_CPHA) | 357 | if (config->mode & SPI_CPHA) |
218 | reg |= MX31_CSPICTRL_PHA; | 358 | reg |= MX31_CSPICTRL_PHA; |
@@ -220,23 +360,52 @@ static int mx31_config(struct spi_imx_data *spi_imx, | |||
220 | reg |= MX31_CSPICTRL_POL; | 360 | reg |= MX31_CSPICTRL_POL; |
221 | if (config->mode & SPI_CS_HIGH) | 361 | if (config->mode & SPI_CS_HIGH) |
222 | reg |= MX31_CSPICTRL_SSPOL; | 362 | reg |= MX31_CSPICTRL_SSPOL; |
223 | if (config->cs < 0) { | 363 | if (cs < 0) |
224 | if (cpu_is_mx31()) | 364 | reg |= (cs + 32) << MX31_CSPICTRL_CS_SHIFT; |
225 | reg |= (config->cs + 32) << MX31_CSPICTRL_CS_SHIFT; | 365 | |
226 | else if (cpu_is_mx25() || cpu_is_mx35()) | 366 | writel(reg, spi_imx->base + MXC_CSPICTRL); |
227 | reg |= (config->cs + 32) << MX35_CSPICTRL_CS_SHIFT; | 367 | |
228 | } | 368 | return 0; |
369 | } | ||
370 | |||
371 | static int __maybe_unused spi_imx0_7_config(struct spi_imx_data *spi_imx, | ||
372 | struct spi_imx_config *config) | ||
373 | { | ||
374 | unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; | ||
375 | int cs = spi_imx->chipselect[config->cs]; | ||
376 | |||
377 | reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << | ||
378 | MX31_CSPICTRL_DR_SHIFT; | ||
379 | |||
380 | reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT; | ||
381 | reg |= MX31_CSPICTRL_SSCTL; | ||
382 | |||
383 | if (config->mode & SPI_CPHA) | ||
384 | reg |= MX31_CSPICTRL_PHA; | ||
385 | if (config->mode & SPI_CPOL) | ||
386 | reg |= MX31_CSPICTRL_POL; | ||
387 | if (config->mode & SPI_CS_HIGH) | ||
388 | reg |= MX31_CSPICTRL_SSPOL; | ||
389 | if (cs < 0) | ||
390 | reg |= (cs + 32) << MX35_CSPICTRL_CS_SHIFT; | ||
229 | 391 | ||
230 | writel(reg, spi_imx->base + MXC_CSPICTRL); | 392 | writel(reg, spi_imx->base + MXC_CSPICTRL); |
231 | 393 | ||
232 | return 0; | 394 | return 0; |
233 | } | 395 | } |
234 | 396 | ||
235 | static int mx31_rx_available(struct spi_imx_data *spi_imx) | 397 | static int __maybe_unused mx31_rx_available(struct spi_imx_data *spi_imx) |
236 | { | 398 | { |
237 | return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR; | 399 | return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR; |
238 | } | 400 | } |
239 | 401 | ||
402 | static void __maybe_unused spi_imx0_4_reset(struct spi_imx_data *spi_imx) | ||
403 | { | ||
404 | /* drain receive buffer */ | ||
405 | while (readl(spi_imx->base + MX3_CSPISTAT) & MX3_CSPISTAT_RR) | ||
406 | readl(spi_imx->base + MXC_CSPIRXDATA); | ||
407 | } | ||
408 | |||
240 | #define MX27_INTREG_RR (1 << 4) | 409 | #define MX27_INTREG_RR (1 << 4) |
241 | #define MX27_INTREG_TEEN (1 << 9) | 410 | #define MX27_INTREG_TEEN (1 << 9) |
242 | #define MX27_INTREG_RREN (1 << 13) | 411 | #define MX27_INTREG_RREN (1 << 13) |
@@ -250,7 +419,7 @@ static int mx31_rx_available(struct spi_imx_data *spi_imx) | |||
250 | #define MX27_CSPICTRL_DR_SHIFT 14 | 419 | #define MX27_CSPICTRL_DR_SHIFT 14 |
251 | #define MX27_CSPICTRL_CS_SHIFT 19 | 420 | #define MX27_CSPICTRL_CS_SHIFT 19 |
252 | 421 | ||
253 | static void mx27_intctrl(struct spi_imx_data *spi_imx, int enable) | 422 | static void __maybe_unused mx27_intctrl(struct spi_imx_data *spi_imx, int enable) |
254 | { | 423 | { |
255 | unsigned int val = 0; | 424 | unsigned int val = 0; |
256 | 425 | ||
@@ -262,7 +431,7 @@ static void mx27_intctrl(struct spi_imx_data *spi_imx, int enable) | |||
262 | writel(val, spi_imx->base + MXC_CSPIINT); | 431 | writel(val, spi_imx->base + MXC_CSPIINT); |
263 | } | 432 | } |
264 | 433 | ||
265 | static void mx27_trigger(struct spi_imx_data *spi_imx) | 434 | static void __maybe_unused mx27_trigger(struct spi_imx_data *spi_imx) |
266 | { | 435 | { |
267 | unsigned int reg; | 436 | unsigned int reg; |
268 | 437 | ||
@@ -271,10 +440,11 @@ static void mx27_trigger(struct spi_imx_data *spi_imx) | |||
271 | writel(reg, spi_imx->base + MXC_CSPICTRL); | 440 | writel(reg, spi_imx->base + MXC_CSPICTRL); |
272 | } | 441 | } |
273 | 442 | ||
274 | static int mx27_config(struct spi_imx_data *spi_imx, | 443 | static int __maybe_unused mx27_config(struct spi_imx_data *spi_imx, |
275 | struct spi_imx_config *config) | 444 | struct spi_imx_config *config) |
276 | { | 445 | { |
277 | unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER; | 446 | unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER; |
447 | int cs = spi_imx->chipselect[config->cs]; | ||
278 | 448 | ||
279 | reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz) << | 449 | reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz) << |
280 | MX27_CSPICTRL_DR_SHIFT; | 450 | MX27_CSPICTRL_DR_SHIFT; |
@@ -286,19 +456,24 @@ static int mx27_config(struct spi_imx_data *spi_imx, | |||
286 | reg |= MX27_CSPICTRL_POL; | 456 | reg |= MX27_CSPICTRL_POL; |
287 | if (config->mode & SPI_CS_HIGH) | 457 | if (config->mode & SPI_CS_HIGH) |
288 | reg |= MX27_CSPICTRL_SSPOL; | 458 | reg |= MX27_CSPICTRL_SSPOL; |
289 | if (config->cs < 0) | 459 | if (cs < 0) |
290 | reg |= (config->cs + 32) << MX27_CSPICTRL_CS_SHIFT; | 460 | reg |= (cs + 32) << MX27_CSPICTRL_CS_SHIFT; |
291 | 461 | ||
292 | writel(reg, spi_imx->base + MXC_CSPICTRL); | 462 | writel(reg, spi_imx->base + MXC_CSPICTRL); |
293 | 463 | ||
294 | return 0; | 464 | return 0; |
295 | } | 465 | } |
296 | 466 | ||
297 | static int mx27_rx_available(struct spi_imx_data *spi_imx) | 467 | static int __maybe_unused mx27_rx_available(struct spi_imx_data *spi_imx) |
298 | { | 468 | { |
299 | return readl(spi_imx->base + MXC_CSPIINT) & MX27_INTREG_RR; | 469 | return readl(spi_imx->base + MXC_CSPIINT) & MX27_INTREG_RR; |
300 | } | 470 | } |
301 | 471 | ||
472 | static void __maybe_unused spi_imx0_0_reset(struct spi_imx_data *spi_imx) | ||
473 | { | ||
474 | writel(1, spi_imx->base + MXC_RESET); | ||
475 | } | ||
476 | |||
302 | #define MX1_INTREG_RR (1 << 3) | 477 | #define MX1_INTREG_RR (1 << 3) |
303 | #define MX1_INTREG_TEEN (1 << 8) | 478 | #define MX1_INTREG_TEEN (1 << 8) |
304 | #define MX1_INTREG_RREN (1 << 11) | 479 | #define MX1_INTREG_RREN (1 << 11) |
@@ -310,7 +485,7 @@ static int mx27_rx_available(struct spi_imx_data *spi_imx) | |||
310 | #define MX1_CSPICTRL_MASTER (1 << 10) | 485 | #define MX1_CSPICTRL_MASTER (1 << 10) |
311 | #define MX1_CSPICTRL_DR_SHIFT 13 | 486 | #define MX1_CSPICTRL_DR_SHIFT 13 |
312 | 487 | ||
313 | static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable) | 488 | static void __maybe_unused mx1_intctrl(struct spi_imx_data *spi_imx, int enable) |
314 | { | 489 | { |
315 | unsigned int val = 0; | 490 | unsigned int val = 0; |
316 | 491 | ||
@@ -322,7 +497,7 @@ static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable) | |||
322 | writel(val, spi_imx->base + MXC_CSPIINT); | 497 | writel(val, spi_imx->base + MXC_CSPIINT); |
323 | } | 498 | } |
324 | 499 | ||
325 | static void mx1_trigger(struct spi_imx_data *spi_imx) | 500 | static void __maybe_unused mx1_trigger(struct spi_imx_data *spi_imx) |
326 | { | 501 | { |
327 | unsigned int reg; | 502 | unsigned int reg; |
328 | 503 | ||
@@ -331,7 +506,7 @@ static void mx1_trigger(struct spi_imx_data *spi_imx) | |||
331 | writel(reg, spi_imx->base + MXC_CSPICTRL); | 506 | writel(reg, spi_imx->base + MXC_CSPICTRL); |
332 | } | 507 | } |
333 | 508 | ||
334 | static int mx1_config(struct spi_imx_data *spi_imx, | 509 | static int __maybe_unused mx1_config(struct spi_imx_data *spi_imx, |
335 | struct spi_imx_config *config) | 510 | struct spi_imx_config *config) |
336 | { | 511 | { |
337 | unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; | 512 | unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; |
@@ -350,11 +525,73 @@ static int mx1_config(struct spi_imx_data *spi_imx, | |||
350 | return 0; | 525 | return 0; |
351 | } | 526 | } |
352 | 527 | ||
353 | static int mx1_rx_available(struct spi_imx_data *spi_imx) | 528 | static int __maybe_unused mx1_rx_available(struct spi_imx_data *spi_imx) |
354 | { | 529 | { |
355 | return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR; | 530 | return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR; |
356 | } | 531 | } |
357 | 532 | ||
533 | static void __maybe_unused mx1_reset(struct spi_imx_data *spi_imx) | ||
534 | { | ||
535 | writel(1, spi_imx->base + MXC_RESET); | ||
536 | } | ||
537 | |||
538 | /* | ||
539 | * These version numbers are taken from the Freescale driver. Unfortunately it | ||
540 | * doesn't support i.MX1, so this entry doesn't match the scheme. :-( | ||
541 | */ | ||
542 | static struct spi_imx_devtype_data spi_imx_devtype_data[] __devinitdata = { | ||
543 | #ifdef CONFIG_SPI_IMX_VER_IMX1 | ||
544 | [SPI_IMX_VER_IMX1] = { | ||
545 | .intctrl = mx1_intctrl, | ||
546 | .config = mx1_config, | ||
547 | .trigger = mx1_trigger, | ||
548 | .rx_available = mx1_rx_available, | ||
549 | .reset = mx1_reset, | ||
550 | .fifosize = 8, | ||
551 | }, | ||
552 | #endif | ||
553 | #ifdef CONFIG_SPI_IMX_VER_0_0 | ||
554 | [SPI_IMX_VER_0_0] = { | ||
555 | .intctrl = mx27_intctrl, | ||
556 | .config = mx27_config, | ||
557 | .trigger = mx27_trigger, | ||
558 | .rx_available = mx27_rx_available, | ||
559 | .reset = spi_imx0_0_reset, | ||
560 | .fifosize = 8, | ||
561 | }, | ||
562 | #endif | ||
563 | #ifdef CONFIG_SPI_IMX_VER_0_4 | ||
564 | [SPI_IMX_VER_0_4] = { | ||
565 | .intctrl = mx31_intctrl, | ||
566 | .config = spi_imx0_4_config, | ||
567 | .trigger = mx31_trigger, | ||
568 | .rx_available = mx31_rx_available, | ||
569 | .reset = spi_imx0_4_reset, | ||
570 | .fifosize = 8, | ||
571 | }, | ||
572 | #endif | ||
573 | #ifdef CONFIG_SPI_IMX_VER_0_7 | ||
574 | [SPI_IMX_VER_0_7] = { | ||
575 | .intctrl = mx31_intctrl, | ||
576 | .config = spi_imx0_7_config, | ||
577 | .trigger = mx31_trigger, | ||
578 | .rx_available = mx31_rx_available, | ||
579 | .reset = spi_imx0_4_reset, | ||
580 | .fifosize = 8, | ||
581 | }, | ||
582 | #endif | ||
583 | #ifdef CONFIG_SPI_IMX_VER_2_3 | ||
584 | [SPI_IMX_VER_2_3] = { | ||
585 | .intctrl = spi_imx2_3_intctrl, | ||
586 | .config = spi_imx2_3_config, | ||
587 | .trigger = spi_imx2_3_trigger, | ||
588 | .rx_available = spi_imx2_3_rx_available, | ||
589 | .reset = spi_imx2_3_reset, | ||
590 | .fifosize = 64, | ||
591 | }, | ||
592 | #endif | ||
593 | }; | ||
594 | |||
358 | static void spi_imx_chipselect(struct spi_device *spi, int is_active) | 595 | static void spi_imx_chipselect(struct spi_device *spi, int is_active) |
359 | { | 596 | { |
360 | struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); | 597 | struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); |
@@ -370,21 +607,21 @@ static void spi_imx_chipselect(struct spi_device *spi, int is_active) | |||
370 | 607 | ||
371 | static void spi_imx_push(struct spi_imx_data *spi_imx) | 608 | static void spi_imx_push(struct spi_imx_data *spi_imx) |
372 | { | 609 | { |
373 | while (spi_imx->txfifo < 8) { | 610 | while (spi_imx->txfifo < spi_imx->devtype_data.fifosize) { |
374 | if (!spi_imx->count) | 611 | if (!spi_imx->count) |
375 | break; | 612 | break; |
376 | spi_imx->tx(spi_imx); | 613 | spi_imx->tx(spi_imx); |
377 | spi_imx->txfifo++; | 614 | spi_imx->txfifo++; |
378 | } | 615 | } |
379 | 616 | ||
380 | spi_imx->trigger(spi_imx); | 617 | spi_imx->devtype_data.trigger(spi_imx); |
381 | } | 618 | } |
382 | 619 | ||
383 | static irqreturn_t spi_imx_isr(int irq, void *dev_id) | 620 | static irqreturn_t spi_imx_isr(int irq, void *dev_id) |
384 | { | 621 | { |
385 | struct spi_imx_data *spi_imx = dev_id; | 622 | struct spi_imx_data *spi_imx = dev_id; |
386 | 623 | ||
387 | while (spi_imx->rx_available(spi_imx)) { | 624 | while (spi_imx->devtype_data.rx_available(spi_imx)) { |
388 | spi_imx->rx(spi_imx); | 625 | spi_imx->rx(spi_imx); |
389 | spi_imx->txfifo--; | 626 | spi_imx->txfifo--; |
390 | } | 627 | } |
@@ -398,11 +635,12 @@ static irqreturn_t spi_imx_isr(int irq, void *dev_id) | |||
398 | /* No data left to push, but still waiting for rx data, | 635 | /* No data left to push, but still waiting for rx data, |
399 | * enable receive data available interrupt. | 636 | * enable receive data available interrupt. |
400 | */ | 637 | */ |
401 | spi_imx->intctrl(spi_imx, MXC_INT_RR); | 638 | spi_imx->devtype_data.intctrl( |
639 | spi_imx, MXC_INT_RR); | ||
402 | return IRQ_HANDLED; | 640 | return IRQ_HANDLED; |
403 | } | 641 | } |
404 | 642 | ||
405 | spi_imx->intctrl(spi_imx, 0); | 643 | spi_imx->devtype_data.intctrl(spi_imx, 0); |
406 | complete(&spi_imx->xfer_done); | 644 | complete(&spi_imx->xfer_done); |
407 | 645 | ||
408 | return IRQ_HANDLED; | 646 | return IRQ_HANDLED; |
@@ -417,7 +655,7 @@ static int spi_imx_setupxfer(struct spi_device *spi, | |||
417 | config.bpw = t ? t->bits_per_word : spi->bits_per_word; | 655 | config.bpw = t ? t->bits_per_word : spi->bits_per_word; |
418 | config.speed_hz = t ? t->speed_hz : spi->max_speed_hz; | 656 | config.speed_hz = t ? t->speed_hz : spi->max_speed_hz; |
419 | config.mode = spi->mode; | 657 | config.mode = spi->mode; |
420 | config.cs = spi_imx->chipselect[spi->chip_select]; | 658 | config.cs = spi->chip_select; |
421 | 659 | ||
422 | if (!config.speed_hz) | 660 | if (!config.speed_hz) |
423 | config.speed_hz = spi->max_speed_hz; | 661 | config.speed_hz = spi->max_speed_hz; |
@@ -439,7 +677,7 @@ static int spi_imx_setupxfer(struct spi_device *spi, | |||
439 | } else | 677 | } else |
440 | BUG(); | 678 | BUG(); |
441 | 679 | ||
442 | spi_imx->config(spi_imx, &config); | 680 | spi_imx->devtype_data.config(spi_imx, &config); |
443 | 681 | ||
444 | return 0; | 682 | return 0; |
445 | } | 683 | } |
@@ -458,7 +696,7 @@ static int spi_imx_transfer(struct spi_device *spi, | |||
458 | 696 | ||
459 | spi_imx_push(spi_imx); | 697 | spi_imx_push(spi_imx); |
460 | 698 | ||
461 | spi_imx->intctrl(spi_imx, MXC_INT_TE); | 699 | spi_imx->devtype_data.intctrl(spi_imx, MXC_INT_TE); |
462 | 700 | ||
463 | wait_for_completion(&spi_imx->xfer_done); | 701 | wait_for_completion(&spi_imx->xfer_done); |
464 | 702 | ||
@@ -485,6 +723,42 @@ static void spi_imx_cleanup(struct spi_device *spi) | |||
485 | { | 723 | { |
486 | } | 724 | } |
487 | 725 | ||
726 | static struct platform_device_id spi_imx_devtype[] = { | ||
727 | { | ||
728 | .name = "imx1-cspi", | ||
729 | .driver_data = SPI_IMX_VER_IMX1, | ||
730 | }, { | ||
731 | .name = "imx21-cspi", | ||
732 | .driver_data = SPI_IMX_VER_0_0, | ||
733 | }, { | ||
734 | .name = "imx25-cspi", | ||
735 | .driver_data = SPI_IMX_VER_0_7, | ||
736 | }, { | ||
737 | .name = "imx27-cspi", | ||
738 | .driver_data = SPI_IMX_VER_0_0, | ||
739 | }, { | ||
740 | .name = "imx31-cspi", | ||
741 | .driver_data = SPI_IMX_VER_0_4, | ||
742 | }, { | ||
743 | .name = "imx35-cspi", | ||
744 | .driver_data = SPI_IMX_VER_0_7, | ||
745 | }, { | ||
746 | .name = "imx51-cspi", | ||
747 | .driver_data = SPI_IMX_VER_0_7, | ||
748 | }, { | ||
749 | .name = "imx51-ecspi", | ||
750 | .driver_data = SPI_IMX_VER_2_3, | ||
751 | }, { | ||
752 | .name = "imx53-cspi", | ||
753 | .driver_data = SPI_IMX_VER_0_7, | ||
754 | }, { | ||
755 | .name = "imx53-ecspi", | ||
756 | .driver_data = SPI_IMX_VER_2_3, | ||
757 | }, { | ||
758 | /* sentinel */ | ||
759 | } | ||
760 | }; | ||
761 | |||
488 | static int __devinit spi_imx_probe(struct platform_device *pdev) | 762 | static int __devinit spi_imx_probe(struct platform_device *pdev) |
489 | { | 763 | { |
490 | struct spi_imx_master *mxc_platform_info; | 764 | struct spi_imx_master *mxc_platform_info; |
@@ -536,6 +810,9 @@ static int __devinit spi_imx_probe(struct platform_device *pdev) | |||
536 | 810 | ||
537 | init_completion(&spi_imx->xfer_done); | 811 | init_completion(&spi_imx->xfer_done); |
538 | 812 | ||
813 | spi_imx->devtype_data = | ||
814 | spi_imx_devtype_data[pdev->id_entry->driver_data]; | ||
815 | |||
539 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 816 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
540 | if (!res) { | 817 | if (!res) { |
541 | dev_err(&pdev->dev, "can't get platform resource\n"); | 818 | dev_err(&pdev->dev, "can't get platform resource\n"); |
@@ -556,7 +833,7 @@ static int __devinit spi_imx_probe(struct platform_device *pdev) | |||
556 | } | 833 | } |
557 | 834 | ||
558 | spi_imx->irq = platform_get_irq(pdev, 0); | 835 | spi_imx->irq = platform_get_irq(pdev, 0); |
559 | if (spi_imx->irq <= 0) { | 836 | if (spi_imx->irq < 0) { |
560 | ret = -EINVAL; | 837 | ret = -EINVAL; |
561 | goto out_iounmap; | 838 | goto out_iounmap; |
562 | } | 839 | } |
@@ -567,24 +844,6 @@ static int __devinit spi_imx_probe(struct platform_device *pdev) | |||
567 | goto out_iounmap; | 844 | goto out_iounmap; |
568 | } | 845 | } |
569 | 846 | ||
570 | if (cpu_is_mx25() || cpu_is_mx31() || cpu_is_mx35()) { | ||
571 | spi_imx->intctrl = mx31_intctrl; | ||
572 | spi_imx->config = mx31_config; | ||
573 | spi_imx->trigger = mx31_trigger; | ||
574 | spi_imx->rx_available = mx31_rx_available; | ||
575 | } else if (cpu_is_mx27() || cpu_is_mx21()) { | ||
576 | spi_imx->intctrl = mx27_intctrl; | ||
577 | spi_imx->config = mx27_config; | ||
578 | spi_imx->trigger = mx27_trigger; | ||
579 | spi_imx->rx_available = mx27_rx_available; | ||
580 | } else if (cpu_is_mx1()) { | ||
581 | spi_imx->intctrl = mx1_intctrl; | ||
582 | spi_imx->config = mx1_config; | ||
583 | spi_imx->trigger = mx1_trigger; | ||
584 | spi_imx->rx_available = mx1_rx_available; | ||
585 | } else | ||
586 | BUG(); | ||
587 | |||
588 | spi_imx->clk = clk_get(&pdev->dev, NULL); | 847 | spi_imx->clk = clk_get(&pdev->dev, NULL); |
589 | if (IS_ERR(spi_imx->clk)) { | 848 | if (IS_ERR(spi_imx->clk)) { |
590 | dev_err(&pdev->dev, "unable to get clock\n"); | 849 | dev_err(&pdev->dev, "unable to get clock\n"); |
@@ -595,15 +854,9 @@ static int __devinit spi_imx_probe(struct platform_device *pdev) | |||
595 | clk_enable(spi_imx->clk); | 854 | clk_enable(spi_imx->clk); |
596 | spi_imx->spi_clk = clk_get_rate(spi_imx->clk); | 855 | spi_imx->spi_clk = clk_get_rate(spi_imx->clk); |
597 | 856 | ||
598 | if (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27()) | 857 | spi_imx->devtype_data.reset(spi_imx); |
599 | writel(1, spi_imx->base + MXC_RESET); | ||
600 | |||
601 | /* drain receive buffer */ | ||
602 | if (cpu_is_mx25() || cpu_is_mx31() || cpu_is_mx35()) | ||
603 | while (readl(spi_imx->base + MX3_CSPISTAT) & MX3_CSPISTAT_RR) | ||
604 | readl(spi_imx->base + MXC_CSPIRXDATA); | ||
605 | 858 | ||
606 | spi_imx->intctrl(spi_imx, 0); | 859 | spi_imx->devtype_data.intctrl(spi_imx, 0); |
607 | 860 | ||
608 | ret = spi_bitbang_start(&spi_imx->bitbang); | 861 | ret = spi_bitbang_start(&spi_imx->bitbang); |
609 | if (ret) { | 862 | if (ret) { |
@@ -668,6 +921,7 @@ static struct platform_driver spi_imx_driver = { | |||
668 | .name = DRIVER_NAME, | 921 | .name = DRIVER_NAME, |
669 | .owner = THIS_MODULE, | 922 | .owner = THIS_MODULE, |
670 | }, | 923 | }, |
924 | .id_table = spi_imx_devtype, | ||
671 | .probe = spi_imx_probe, | 925 | .probe = spi_imx_probe, |
672 | .remove = __devexit_p(spi_imx_remove), | 926 | .remove = __devexit_p(spi_imx_remove), |
673 | }; | 927 | }; |
diff --git a/drivers/spi/spi_nuc900.c b/drivers/spi/spi_nuc900.c index dff63be0d0a8..3cd15f690f16 100644 --- a/drivers/spi/spi_nuc900.c +++ b/drivers/spi/spi_nuc900.c | |||
@@ -449,7 +449,7 @@ err_iomap: | |||
449 | release_mem_region(hw->res->start, resource_size(hw->res)); | 449 | release_mem_region(hw->res->start, resource_size(hw->res)); |
450 | kfree(hw->ioarea); | 450 | kfree(hw->ioarea); |
451 | err_pdata: | 451 | err_pdata: |
452 | spi_master_put(hw->master);; | 452 | spi_master_put(hw->master); |
453 | 453 | ||
454 | err_nomem: | 454 | err_nomem: |
455 | return err; | 455 | return err; |
@@ -463,7 +463,7 @@ static int __devexit nuc900_spi_remove(struct platform_device *dev) | |||
463 | 463 | ||
464 | platform_set_drvdata(dev, NULL); | 464 | platform_set_drvdata(dev, NULL); |
465 | 465 | ||
466 | spi_unregister_master(hw->master); | 466 | spi_bitbang_stop(&hw->bitbang); |
467 | 467 | ||
468 | clk_disable(hw->clk); | 468 | clk_disable(hw->clk); |
469 | clk_put(hw->clk); | 469 | clk_put(hw->clk); |
diff --git a/drivers/spi/spi_oc_tiny.c b/drivers/spi/spi_oc_tiny.c new file mode 100644 index 000000000000..f1bde66cea19 --- /dev/null +++ b/drivers/spi/spi_oc_tiny.c | |||
@@ -0,0 +1,425 @@ | |||
1 | /* | ||
2 | * OpenCores tiny SPI master driver | ||
3 | * | ||
4 | * http://opencores.org/project,tiny_spi | ||
5 | * | ||
6 | * Copyright (C) 2011 Thomas Chou <thomas@wytron.com.tw> | ||
7 | * | ||
8 | * Based on spi_s3c24xx.c, which is: | ||
9 | * Copyright (c) 2006 Ben Dooks | ||
10 | * Copyright (c) 2006 Simtec Electronics | ||
11 | * Ben Dooks <ben@simtec.co.uk> | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | ||
14 | * it under the terms of the GNU General Public License version 2 as | ||
15 | * published by the Free Software Foundation. | ||
16 | */ | ||
17 | |||
18 | #include <linux/init.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | #include <linux/spi/spi.h> | ||
23 | #include <linux/spi/spi_bitbang.h> | ||
24 | #include <linux/spi/spi_oc_tiny.h> | ||
25 | #include <linux/io.h> | ||
26 | #include <linux/gpio.h> | ||
27 | #include <linux/of.h> | ||
28 | |||
29 | #define DRV_NAME "spi_oc_tiny" | ||
30 | |||
31 | #define TINY_SPI_RXDATA 0 | ||
32 | #define TINY_SPI_TXDATA 4 | ||
33 | #define TINY_SPI_STATUS 8 | ||
34 | #define TINY_SPI_CONTROL 12 | ||
35 | #define TINY_SPI_BAUD 16 | ||
36 | |||
37 | #define TINY_SPI_STATUS_TXE 0x1 | ||
38 | #define TINY_SPI_STATUS_TXR 0x2 | ||
39 | |||
40 | struct tiny_spi { | ||
41 | /* bitbang has to be first */ | ||
42 | struct spi_bitbang bitbang; | ||
43 | struct completion done; | ||
44 | |||
45 | void __iomem *base; | ||
46 | int irq; | ||
47 | unsigned int freq; | ||
48 | unsigned int baudwidth; | ||
49 | unsigned int baud; | ||
50 | unsigned int speed_hz; | ||
51 | unsigned int mode; | ||
52 | unsigned int len; | ||
53 | unsigned int txc, rxc; | ||
54 | const u8 *txp; | ||
55 | u8 *rxp; | ||
56 | unsigned int gpio_cs_count; | ||
57 | int *gpio_cs; | ||
58 | }; | ||
59 | |||
60 | static inline struct tiny_spi *tiny_spi_to_hw(struct spi_device *sdev) | ||
61 | { | ||
62 | return spi_master_get_devdata(sdev->master); | ||
63 | } | ||
64 | |||
65 | static unsigned int tiny_spi_baud(struct spi_device *spi, unsigned int hz) | ||
66 | { | ||
67 | struct tiny_spi *hw = tiny_spi_to_hw(spi); | ||
68 | |||
69 | return min(DIV_ROUND_UP(hw->freq, hz * 2), (1U << hw->baudwidth)) - 1; | ||
70 | } | ||
71 | |||
72 | static void tiny_spi_chipselect(struct spi_device *spi, int is_active) | ||
73 | { | ||
74 | struct tiny_spi *hw = tiny_spi_to_hw(spi); | ||
75 | |||
76 | if (hw->gpio_cs_count) { | ||
77 | gpio_set_value(hw->gpio_cs[spi->chip_select], | ||
78 | (spi->mode & SPI_CS_HIGH) ? is_active : !is_active); | ||
79 | } | ||
80 | } | ||
81 | |||
82 | static int tiny_spi_setup_transfer(struct spi_device *spi, | ||
83 | struct spi_transfer *t) | ||
84 | { | ||
85 | struct tiny_spi *hw = tiny_spi_to_hw(spi); | ||
86 | unsigned int baud = hw->baud; | ||
87 | |||
88 | if (t) { | ||
89 | if (t->speed_hz && t->speed_hz != hw->speed_hz) | ||
90 | baud = tiny_spi_baud(spi, t->speed_hz); | ||
91 | } | ||
92 | writel(baud, hw->base + TINY_SPI_BAUD); | ||
93 | writel(hw->mode, hw->base + TINY_SPI_CONTROL); | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static int tiny_spi_setup(struct spi_device *spi) | ||
98 | { | ||
99 | struct tiny_spi *hw = tiny_spi_to_hw(spi); | ||
100 | |||
101 | if (spi->max_speed_hz != hw->speed_hz) { | ||
102 | hw->speed_hz = spi->max_speed_hz; | ||
103 | hw->baud = tiny_spi_baud(spi, hw->speed_hz); | ||
104 | } | ||
105 | hw->mode = spi->mode & (SPI_CPOL | SPI_CPHA); | ||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | static inline void tiny_spi_wait_txr(struct tiny_spi *hw) | ||
110 | { | ||
111 | while (!(readb(hw->base + TINY_SPI_STATUS) & | ||
112 | TINY_SPI_STATUS_TXR)) | ||
113 | cpu_relax(); | ||
114 | } | ||
115 | |||
116 | static inline void tiny_spi_wait_txe(struct tiny_spi *hw) | ||
117 | { | ||
118 | while (!(readb(hw->base + TINY_SPI_STATUS) & | ||
119 | TINY_SPI_STATUS_TXE)) | ||
120 | cpu_relax(); | ||
121 | } | ||
122 | |||
123 | static int tiny_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) | ||
124 | { | ||
125 | struct tiny_spi *hw = tiny_spi_to_hw(spi); | ||
126 | const u8 *txp = t->tx_buf; | ||
127 | u8 *rxp = t->rx_buf; | ||
128 | unsigned int i; | ||
129 | |||
130 | if (hw->irq >= 0) { | ||
131 | /* use intrrupt driven data transfer */ | ||
132 | hw->len = t->len; | ||
133 | hw->txp = t->tx_buf; | ||
134 | hw->rxp = t->rx_buf; | ||
135 | hw->txc = 0; | ||
136 | hw->rxc = 0; | ||
137 | |||
138 | /* send the first byte */ | ||
139 | if (t->len > 1) { | ||
140 | writeb(hw->txp ? *hw->txp++ : 0, | ||
141 | hw->base + TINY_SPI_TXDATA); | ||
142 | hw->txc++; | ||
143 | writeb(hw->txp ? *hw->txp++ : 0, | ||
144 | hw->base + TINY_SPI_TXDATA); | ||
145 | hw->txc++; | ||
146 | writeb(TINY_SPI_STATUS_TXR, hw->base + TINY_SPI_STATUS); | ||
147 | } else { | ||
148 | writeb(hw->txp ? *hw->txp++ : 0, | ||
149 | hw->base + TINY_SPI_TXDATA); | ||
150 | hw->txc++; | ||
151 | writeb(TINY_SPI_STATUS_TXE, hw->base + TINY_SPI_STATUS); | ||
152 | } | ||
153 | |||
154 | wait_for_completion(&hw->done); | ||
155 | } else if (txp && rxp) { | ||
156 | /* we need to tighten the transfer loop */ | ||
157 | writeb(*txp++, hw->base + TINY_SPI_TXDATA); | ||
158 | if (t->len > 1) { | ||
159 | writeb(*txp++, hw->base + TINY_SPI_TXDATA); | ||
160 | for (i = 2; i < t->len; i++) { | ||
161 | u8 rx, tx = *txp++; | ||
162 | tiny_spi_wait_txr(hw); | ||
163 | rx = readb(hw->base + TINY_SPI_TXDATA); | ||
164 | writeb(tx, hw->base + TINY_SPI_TXDATA); | ||
165 | *rxp++ = rx; | ||
166 | } | ||
167 | tiny_spi_wait_txr(hw); | ||
168 | *rxp++ = readb(hw->base + TINY_SPI_TXDATA); | ||
169 | } | ||
170 | tiny_spi_wait_txe(hw); | ||
171 | *rxp++ = readb(hw->base + TINY_SPI_RXDATA); | ||
172 | } else if (rxp) { | ||
173 | writeb(0, hw->base + TINY_SPI_TXDATA); | ||
174 | if (t->len > 1) { | ||
175 | writeb(0, | ||
176 | hw->base + TINY_SPI_TXDATA); | ||
177 | for (i = 2; i < t->len; i++) { | ||
178 | u8 rx; | ||
179 | tiny_spi_wait_txr(hw); | ||
180 | rx = readb(hw->base + TINY_SPI_TXDATA); | ||
181 | writeb(0, hw->base + TINY_SPI_TXDATA); | ||
182 | *rxp++ = rx; | ||
183 | } | ||
184 | tiny_spi_wait_txr(hw); | ||
185 | *rxp++ = readb(hw->base + TINY_SPI_TXDATA); | ||
186 | } | ||
187 | tiny_spi_wait_txe(hw); | ||
188 | *rxp++ = readb(hw->base + TINY_SPI_RXDATA); | ||
189 | } else if (txp) { | ||
190 | writeb(*txp++, hw->base + TINY_SPI_TXDATA); | ||
191 | if (t->len > 1) { | ||
192 | writeb(*txp++, hw->base + TINY_SPI_TXDATA); | ||
193 | for (i = 2; i < t->len; i++) { | ||
194 | u8 tx = *txp++; | ||
195 | tiny_spi_wait_txr(hw); | ||
196 | writeb(tx, hw->base + TINY_SPI_TXDATA); | ||
197 | } | ||
198 | } | ||
199 | tiny_spi_wait_txe(hw); | ||
200 | } else { | ||
201 | writeb(0, hw->base + TINY_SPI_TXDATA); | ||
202 | if (t->len > 1) { | ||
203 | writeb(0, hw->base + TINY_SPI_TXDATA); | ||
204 | for (i = 2; i < t->len; i++) { | ||
205 | tiny_spi_wait_txr(hw); | ||
206 | writeb(0, hw->base + TINY_SPI_TXDATA); | ||
207 | } | ||
208 | } | ||
209 | tiny_spi_wait_txe(hw); | ||
210 | } | ||
211 | return t->len; | ||
212 | } | ||
213 | |||
214 | static irqreturn_t tiny_spi_irq(int irq, void *dev) | ||
215 | { | ||
216 | struct tiny_spi *hw = dev; | ||
217 | |||
218 | writeb(0, hw->base + TINY_SPI_STATUS); | ||
219 | if (hw->rxc + 1 == hw->len) { | ||
220 | if (hw->rxp) | ||
221 | *hw->rxp++ = readb(hw->base + TINY_SPI_RXDATA); | ||
222 | hw->rxc++; | ||
223 | complete(&hw->done); | ||
224 | } else { | ||
225 | if (hw->rxp) | ||
226 | *hw->rxp++ = readb(hw->base + TINY_SPI_TXDATA); | ||
227 | hw->rxc++; | ||
228 | if (hw->txc < hw->len) { | ||
229 | writeb(hw->txp ? *hw->txp++ : 0, | ||
230 | hw->base + TINY_SPI_TXDATA); | ||
231 | hw->txc++; | ||
232 | writeb(TINY_SPI_STATUS_TXR, | ||
233 | hw->base + TINY_SPI_STATUS); | ||
234 | } else { | ||
235 | writeb(TINY_SPI_STATUS_TXE, | ||
236 | hw->base + TINY_SPI_STATUS); | ||
237 | } | ||
238 | } | ||
239 | return IRQ_HANDLED; | ||
240 | } | ||
241 | |||
242 | #ifdef CONFIG_OF | ||
243 | #include <linux/of_gpio.h> | ||
244 | |||
245 | static int __devinit tiny_spi_of_probe(struct platform_device *pdev) | ||
246 | { | ||
247 | struct tiny_spi *hw = platform_get_drvdata(pdev); | ||
248 | struct device_node *np = pdev->dev.of_node; | ||
249 | unsigned int i; | ||
250 | const __be32 *val; | ||
251 | int len; | ||
252 | |||
253 | if (!np) | ||
254 | return 0; | ||
255 | hw->gpio_cs_count = of_gpio_count(np); | ||
256 | if (hw->gpio_cs_count) { | ||
257 | hw->gpio_cs = devm_kzalloc(&pdev->dev, | ||
258 | hw->gpio_cs_count * sizeof(unsigned int), | ||
259 | GFP_KERNEL); | ||
260 | if (!hw->gpio_cs) | ||
261 | return -ENOMEM; | ||
262 | } | ||
263 | for (i = 0; i < hw->gpio_cs_count; i++) { | ||
264 | hw->gpio_cs[i] = of_get_gpio_flags(np, i, NULL); | ||
265 | if (hw->gpio_cs[i] < 0) | ||
266 | return -ENODEV; | ||
267 | } | ||
268 | hw->bitbang.master->dev.of_node = pdev->dev.of_node; | ||
269 | val = of_get_property(pdev->dev.of_node, | ||
270 | "clock-frequency", &len); | ||
271 | if (val && len >= sizeof(__be32)) | ||
272 | hw->freq = be32_to_cpup(val); | ||
273 | val = of_get_property(pdev->dev.of_node, "baud-width", &len); | ||
274 | if (val && len >= sizeof(__be32)) | ||
275 | hw->baudwidth = be32_to_cpup(val); | ||
276 | return 0; | ||
277 | } | ||
278 | #else /* !CONFIG_OF */ | ||
279 | static int __devinit tiny_spi_of_probe(struct platform_device *pdev) | ||
280 | { | ||
281 | return 0; | ||
282 | } | ||
283 | #endif /* CONFIG_OF */ | ||
284 | |||
285 | static int __devinit tiny_spi_probe(struct platform_device *pdev) | ||
286 | { | ||
287 | struct tiny_spi_platform_data *platp = pdev->dev.platform_data; | ||
288 | struct tiny_spi *hw; | ||
289 | struct spi_master *master; | ||
290 | struct resource *res; | ||
291 | unsigned int i; | ||
292 | int err = -ENODEV; | ||
293 | |||
294 | master = spi_alloc_master(&pdev->dev, sizeof(struct tiny_spi)); | ||
295 | if (!master) | ||
296 | return err; | ||
297 | |||
298 | /* setup the master state. */ | ||
299 | master->bus_num = pdev->id; | ||
300 | master->num_chipselect = 255; | ||
301 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; | ||
302 | master->setup = tiny_spi_setup; | ||
303 | |||
304 | hw = spi_master_get_devdata(master); | ||
305 | platform_set_drvdata(pdev, hw); | ||
306 | |||
307 | /* setup the state for the bitbang driver */ | ||
308 | hw->bitbang.master = spi_master_get(master); | ||
309 | if (!hw->bitbang.master) | ||
310 | return err; | ||
311 | hw->bitbang.setup_transfer = tiny_spi_setup_transfer; | ||
312 | hw->bitbang.chipselect = tiny_spi_chipselect; | ||
313 | hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs; | ||
314 | |||
315 | /* find and map our resources */ | ||
316 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
317 | if (!res) | ||
318 | goto exit_busy; | ||
319 | if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), | ||
320 | pdev->name)) | ||
321 | goto exit_busy; | ||
322 | hw->base = devm_ioremap_nocache(&pdev->dev, res->start, | ||
323 | resource_size(res)); | ||
324 | if (!hw->base) | ||
325 | goto exit_busy; | ||
326 | /* irq is optional */ | ||
327 | hw->irq = platform_get_irq(pdev, 0); | ||
328 | if (hw->irq >= 0) { | ||
329 | init_completion(&hw->done); | ||
330 | err = devm_request_irq(&pdev->dev, hw->irq, tiny_spi_irq, 0, | ||
331 | pdev->name, hw); | ||
332 | if (err) | ||
333 | goto exit; | ||
334 | } | ||
335 | /* find platform data */ | ||
336 | if (platp) { | ||
337 | hw->gpio_cs_count = platp->gpio_cs_count; | ||
338 | hw->gpio_cs = platp->gpio_cs; | ||
339 | if (platp->gpio_cs_count && !platp->gpio_cs) | ||
340 | goto exit_busy; | ||
341 | hw->freq = platp->freq; | ||
342 | hw->baudwidth = platp->baudwidth; | ||
343 | } else { | ||
344 | err = tiny_spi_of_probe(pdev); | ||
345 | if (err) | ||
346 | goto exit; | ||
347 | } | ||
348 | for (i = 0; i < hw->gpio_cs_count; i++) { | ||
349 | err = gpio_request(hw->gpio_cs[i], dev_name(&pdev->dev)); | ||
350 | if (err) | ||
351 | goto exit_gpio; | ||
352 | gpio_direction_output(hw->gpio_cs[i], 1); | ||
353 | } | ||
354 | hw->bitbang.master->num_chipselect = max(1U, hw->gpio_cs_count); | ||
355 | |||
356 | /* register our spi controller */ | ||
357 | err = spi_bitbang_start(&hw->bitbang); | ||
358 | if (err) | ||
359 | goto exit; | ||
360 | dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq); | ||
361 | |||
362 | return 0; | ||
363 | |||
364 | exit_gpio: | ||
365 | while (i-- > 0) | ||
366 | gpio_free(hw->gpio_cs[i]); | ||
367 | exit_busy: | ||
368 | err = -EBUSY; | ||
369 | exit: | ||
370 | platform_set_drvdata(pdev, NULL); | ||
371 | spi_master_put(master); | ||
372 | return err; | ||
373 | } | ||
374 | |||
375 | static int __devexit tiny_spi_remove(struct platform_device *pdev) | ||
376 | { | ||
377 | struct tiny_spi *hw = platform_get_drvdata(pdev); | ||
378 | struct spi_master *master = hw->bitbang.master; | ||
379 | unsigned int i; | ||
380 | |||
381 | spi_bitbang_stop(&hw->bitbang); | ||
382 | for (i = 0; i < hw->gpio_cs_count; i++) | ||
383 | gpio_free(hw->gpio_cs[i]); | ||
384 | platform_set_drvdata(pdev, NULL); | ||
385 | spi_master_put(master); | ||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | #ifdef CONFIG_OF | ||
390 | static const struct of_device_id tiny_spi_match[] = { | ||
391 | { .compatible = "opencores,tiny-spi-rtlsvn2", }, | ||
392 | {}, | ||
393 | }; | ||
394 | MODULE_DEVICE_TABLE(of, tiny_spi_match); | ||
395 | #else /* CONFIG_OF */ | ||
396 | #define tiny_spi_match NULL | ||
397 | #endif /* CONFIG_OF */ | ||
398 | |||
399 | static struct platform_driver tiny_spi_driver = { | ||
400 | .probe = tiny_spi_probe, | ||
401 | .remove = __devexit_p(tiny_spi_remove), | ||
402 | .driver = { | ||
403 | .name = DRV_NAME, | ||
404 | .owner = THIS_MODULE, | ||
405 | .pm = NULL, | ||
406 | .of_match_table = tiny_spi_match, | ||
407 | }, | ||
408 | }; | ||
409 | |||
410 | static int __init tiny_spi_init(void) | ||
411 | { | ||
412 | return platform_driver_register(&tiny_spi_driver); | ||
413 | } | ||
414 | module_init(tiny_spi_init); | ||
415 | |||
416 | static void __exit tiny_spi_exit(void) | ||
417 | { | ||
418 | platform_driver_unregister(&tiny_spi_driver); | ||
419 | } | ||
420 | module_exit(tiny_spi_exit); | ||
421 | |||
422 | MODULE_DESCRIPTION("OpenCores tiny SPI driver"); | ||
423 | MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>"); | ||
424 | MODULE_LICENSE("GPL"); | ||
425 | MODULE_ALIAS("platform:" DRV_NAME); | ||
diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi_ppc4xx.c index 80e172d3e72a..2a298c029194 100644 --- a/drivers/spi/spi_ppc4xx.c +++ b/drivers/spi/spi_ppc4xx.c | |||
@@ -390,8 +390,7 @@ static void free_gpios(struct ppc4xx_spi *hw) | |||
390 | /* | 390 | /* |
391 | * platform_device layer stuff... | 391 | * platform_device layer stuff... |
392 | */ | 392 | */ |
393 | static int __init spi_ppc4xx_of_probe(struct platform_device *op, | 393 | static int __init spi_ppc4xx_of_probe(struct platform_device *op) |
394 | const struct of_device_id *match) | ||
395 | { | 394 | { |
396 | struct ppc4xx_spi *hw; | 395 | struct ppc4xx_spi *hw; |
397 | struct spi_master *master; | 396 | struct spi_master *master; |
@@ -586,7 +585,7 @@ static const struct of_device_id spi_ppc4xx_of_match[] = { | |||
586 | 585 | ||
587 | MODULE_DEVICE_TABLE(of, spi_ppc4xx_of_match); | 586 | MODULE_DEVICE_TABLE(of, spi_ppc4xx_of_match); |
588 | 587 | ||
589 | static struct of_platform_driver spi_ppc4xx_of_driver = { | 588 | static struct platform_driver spi_ppc4xx_of_driver = { |
590 | .probe = spi_ppc4xx_of_probe, | 589 | .probe = spi_ppc4xx_of_probe, |
591 | .remove = __exit_p(spi_ppc4xx_of_remove), | 590 | .remove = __exit_p(spi_ppc4xx_of_remove), |
592 | .driver = { | 591 | .driver = { |
@@ -598,13 +597,13 @@ static struct of_platform_driver spi_ppc4xx_of_driver = { | |||
598 | 597 | ||
599 | static int __init spi_ppc4xx_init(void) | 598 | static int __init spi_ppc4xx_init(void) |
600 | { | 599 | { |
601 | return of_register_platform_driver(&spi_ppc4xx_of_driver); | 600 | return platform_driver_register(&spi_ppc4xx_of_driver); |
602 | } | 601 | } |
603 | module_init(spi_ppc4xx_init); | 602 | module_init(spi_ppc4xx_init); |
604 | 603 | ||
605 | static void __exit spi_ppc4xx_exit(void) | 604 | static void __exit spi_ppc4xx_exit(void) |
606 | { | 605 | { |
607 | of_unregister_platform_driver(&spi_ppc4xx_of_driver); | 606 | platform_driver_unregister(&spi_ppc4xx_of_driver); |
608 | } | 607 | } |
609 | module_exit(spi_ppc4xx_exit); | 608 | module_exit(spi_ppc4xx_exit); |
610 | 609 | ||
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c index 151a95e40653..1a5fcabfd565 100644 --- a/drivers/spi/spi_s3c24xx.c +++ b/drivers/spi/spi_s3c24xx.c | |||
@@ -668,7 +668,7 @@ static int __exit s3c24xx_spi_remove(struct platform_device *dev) | |||
668 | 668 | ||
669 | platform_set_drvdata(dev, NULL); | 669 | platform_set_drvdata(dev, NULL); |
670 | 670 | ||
671 | spi_unregister_master(hw->master); | 671 | spi_bitbang_stop(&hw->bitbang); |
672 | 672 | ||
673 | clk_disable(hw->clk); | 673 | clk_disable(hw->clk); |
674 | clk_put(hw->clk); | 674 | clk_put(hw->clk); |
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c index c3038da2648a..8945e201e42e 100644 --- a/drivers/spi/spi_s3c64xx.c +++ b/drivers/spi/spi_s3c64xx.c | |||
@@ -116,9 +116,7 @@ | |||
116 | (((i)->fifo_lvl_mask + 1))) \ | 116 | (((i)->fifo_lvl_mask + 1))) \ |
117 | ? 1 : 0) | 117 | ? 1 : 0) |
118 | 118 | ||
119 | #define S3C64XX_SPI_ST_TX_DONE(v, i) ((((v) >> (i)->rx_lvl_offset) & \ | 119 | #define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & (1 << (i)->tx_st_done)) ? 1 : 0) |
120 | (((i)->fifo_lvl_mask + 1) << 1)) \ | ||
121 | ? 1 : 0) | ||
122 | #define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask) | 120 | #define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask) |
123 | #define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask) | 121 | #define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask) |
124 | 122 | ||
@@ -261,15 +259,25 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, | |||
261 | chcfg |= S3C64XX_SPI_CH_TXCH_ON; | 259 | chcfg |= S3C64XX_SPI_CH_TXCH_ON; |
262 | if (dma_mode) { | 260 | if (dma_mode) { |
263 | modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; | 261 | modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; |
264 | s3c2410_dma_config(sdd->tx_dmach, 1); | 262 | s3c2410_dma_config(sdd->tx_dmach, sdd->cur_bpw / 8); |
265 | s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd, | 263 | s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd, |
266 | xfer->tx_dma, xfer->len); | 264 | xfer->tx_dma, xfer->len); |
267 | s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START); | 265 | s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START); |
268 | } else { | 266 | } else { |
269 | unsigned char *buf = (unsigned char *) xfer->tx_buf; | 267 | switch (sdd->cur_bpw) { |
270 | int i = 0; | 268 | case 32: |
271 | while (i < xfer->len) | 269 | iowrite32_rep(regs + S3C64XX_SPI_TX_DATA, |
272 | writeb(buf[i++], regs + S3C64XX_SPI_TX_DATA); | 270 | xfer->tx_buf, xfer->len / 4); |
271 | break; | ||
272 | case 16: | ||
273 | iowrite16_rep(regs + S3C64XX_SPI_TX_DATA, | ||
274 | xfer->tx_buf, xfer->len / 2); | ||
275 | break; | ||
276 | default: | ||
277 | iowrite8_rep(regs + S3C64XX_SPI_TX_DATA, | ||
278 | xfer->tx_buf, xfer->len); | ||
279 | break; | ||
280 | } | ||
273 | } | 281 | } |
274 | } | 282 | } |
275 | 283 | ||
@@ -286,7 +294,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, | |||
286 | writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) | 294 | writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) |
287 | | S3C64XX_SPI_PACKET_CNT_EN, | 295 | | S3C64XX_SPI_PACKET_CNT_EN, |
288 | regs + S3C64XX_SPI_PACKET_CNT); | 296 | regs + S3C64XX_SPI_PACKET_CNT); |
289 | s3c2410_dma_config(sdd->rx_dmach, 1); | 297 | s3c2410_dma_config(sdd->rx_dmach, sdd->cur_bpw / 8); |
290 | s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd, | 298 | s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd, |
291 | xfer->rx_dma, xfer->len); | 299 | xfer->rx_dma, xfer->len); |
292 | s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START); | 300 | s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START); |
@@ -366,20 +374,26 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, | |||
366 | return -EIO; | 374 | return -EIO; |
367 | } | 375 | } |
368 | } else { | 376 | } else { |
369 | unsigned char *buf; | ||
370 | int i; | ||
371 | |||
372 | /* If it was only Tx */ | 377 | /* If it was only Tx */ |
373 | if (xfer->rx_buf == NULL) { | 378 | if (xfer->rx_buf == NULL) { |
374 | sdd->state &= ~TXBUSY; | 379 | sdd->state &= ~TXBUSY; |
375 | return 0; | 380 | return 0; |
376 | } | 381 | } |
377 | 382 | ||
378 | i = 0; | 383 | switch (sdd->cur_bpw) { |
379 | buf = xfer->rx_buf; | 384 | case 32: |
380 | while (i < xfer->len) | 385 | ioread32_rep(regs + S3C64XX_SPI_RX_DATA, |
381 | buf[i++] = readb(regs + S3C64XX_SPI_RX_DATA); | 386 | xfer->rx_buf, xfer->len / 4); |
382 | 387 | break; | |
388 | case 16: | ||
389 | ioread16_rep(regs + S3C64XX_SPI_RX_DATA, | ||
390 | xfer->rx_buf, xfer->len / 2); | ||
391 | break; | ||
392 | default: | ||
393 | ioread8_rep(regs + S3C64XX_SPI_RX_DATA, | ||
394 | xfer->rx_buf, xfer->len); | ||
395 | break; | ||
396 | } | ||
383 | sdd->state &= ~RXBUSY; | 397 | sdd->state &= ~RXBUSY; |
384 | } | 398 | } |
385 | 399 | ||
@@ -399,13 +413,18 @@ static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd, | |||
399 | 413 | ||
400 | static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) | 414 | static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) |
401 | { | 415 | { |
416 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; | ||
402 | void __iomem *regs = sdd->regs; | 417 | void __iomem *regs = sdd->regs; |
403 | u32 val; | 418 | u32 val; |
404 | 419 | ||
405 | /* Disable Clock */ | 420 | /* Disable Clock */ |
406 | val = readl(regs + S3C64XX_SPI_CLK_CFG); | 421 | if (sci->clk_from_cmu) { |
407 | val &= ~S3C64XX_SPI_ENCLK_ENABLE; | 422 | clk_disable(sdd->src_clk); |
408 | writel(val, regs + S3C64XX_SPI_CLK_CFG); | 423 | } else { |
424 | val = readl(regs + S3C64XX_SPI_CLK_CFG); | ||
425 | val &= ~S3C64XX_SPI_ENCLK_ENABLE; | ||
426 | writel(val, regs + S3C64XX_SPI_CLK_CFG); | ||
427 | } | ||
409 | 428 | ||
410 | /* Set Polarity and Phase */ | 429 | /* Set Polarity and Phase */ |
411 | val = readl(regs + S3C64XX_SPI_CH_CFG); | 430 | val = readl(regs + S3C64XX_SPI_CH_CFG); |
@@ -429,29 +448,39 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) | |||
429 | switch (sdd->cur_bpw) { | 448 | switch (sdd->cur_bpw) { |
430 | case 32: | 449 | case 32: |
431 | val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD; | 450 | val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD; |
451 | val |= S3C64XX_SPI_MODE_CH_TSZ_WORD; | ||
432 | break; | 452 | break; |
433 | case 16: | 453 | case 16: |
434 | val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD; | 454 | val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD; |
455 | val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD; | ||
435 | break; | 456 | break; |
436 | default: | 457 | default: |
437 | val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE; | 458 | val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE; |
459 | val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; | ||
438 | break; | 460 | break; |
439 | } | 461 | } |
440 | val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; /* Always 8bits wide */ | ||
441 | 462 | ||
442 | writel(val, regs + S3C64XX_SPI_MODE_CFG); | 463 | writel(val, regs + S3C64XX_SPI_MODE_CFG); |
443 | 464 | ||
444 | /* Configure Clock */ | 465 | if (sci->clk_from_cmu) { |
445 | val = readl(regs + S3C64XX_SPI_CLK_CFG); | 466 | /* Configure Clock */ |
446 | val &= ~S3C64XX_SPI_PSR_MASK; | 467 | /* There is half-multiplier before the SPI */ |
447 | val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) | 468 | clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); |
448 | & S3C64XX_SPI_PSR_MASK); | 469 | /* Enable Clock */ |
449 | writel(val, regs + S3C64XX_SPI_CLK_CFG); | 470 | clk_enable(sdd->src_clk); |
450 | 471 | } else { | |
451 | /* Enable Clock */ | 472 | /* Configure Clock */ |
452 | val = readl(regs + S3C64XX_SPI_CLK_CFG); | 473 | val = readl(regs + S3C64XX_SPI_CLK_CFG); |
453 | val |= S3C64XX_SPI_ENCLK_ENABLE; | 474 | val &= ~S3C64XX_SPI_PSR_MASK; |
454 | writel(val, regs + S3C64XX_SPI_CLK_CFG); | 475 | val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) |
476 | & S3C64XX_SPI_PSR_MASK); | ||
477 | writel(val, regs + S3C64XX_SPI_CLK_CFG); | ||
478 | |||
479 | /* Enable Clock */ | ||
480 | val = readl(regs + S3C64XX_SPI_CLK_CFG); | ||
481 | val |= S3C64XX_SPI_ENCLK_ENABLE; | ||
482 | writel(val, regs + S3C64XX_SPI_CLK_CFG); | ||
483 | } | ||
455 | } | 484 | } |
456 | 485 | ||
457 | static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, | 486 | static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, |
@@ -499,6 +528,7 @@ static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id, | |||
499 | static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, | 528 | static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, |
500 | struct spi_message *msg) | 529 | struct spi_message *msg) |
501 | { | 530 | { |
531 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; | ||
502 | struct device *dev = &sdd->pdev->dev; | 532 | struct device *dev = &sdd->pdev->dev; |
503 | struct spi_transfer *xfer; | 533 | struct spi_transfer *xfer; |
504 | 534 | ||
@@ -514,6 +544,9 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, | |||
514 | /* Map until end or first fail */ | 544 | /* Map until end or first fail */ |
515 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | 545 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
516 | 546 | ||
547 | if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1)) | ||
548 | continue; | ||
549 | |||
517 | if (xfer->tx_buf != NULL) { | 550 | if (xfer->tx_buf != NULL) { |
518 | xfer->tx_dma = dma_map_single(dev, | 551 | xfer->tx_dma = dma_map_single(dev, |
519 | (void *)xfer->tx_buf, xfer->len, | 552 | (void *)xfer->tx_buf, xfer->len, |
@@ -545,6 +578,7 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, | |||
545 | static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, | 578 | static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, |
546 | struct spi_message *msg) | 579 | struct spi_message *msg) |
547 | { | 580 | { |
581 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; | ||
548 | struct device *dev = &sdd->pdev->dev; | 582 | struct device *dev = &sdd->pdev->dev; |
549 | struct spi_transfer *xfer; | 583 | struct spi_transfer *xfer; |
550 | 584 | ||
@@ -553,6 +587,9 @@ static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, | |||
553 | 587 | ||
554 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | 588 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
555 | 589 | ||
590 | if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1)) | ||
591 | continue; | ||
592 | |||
556 | if (xfer->rx_buf != NULL | 593 | if (xfer->rx_buf != NULL |
557 | && xfer->rx_dma != XFER_DMAADDR_INVALID) | 594 | && xfer->rx_dma != XFER_DMAADDR_INVALID) |
558 | dma_unmap_single(dev, xfer->rx_dma, | 595 | dma_unmap_single(dev, xfer->rx_dma, |
@@ -608,6 +645,14 @@ static void handle_msg(struct s3c64xx_spi_driver_data *sdd, | |||
608 | bpw = xfer->bits_per_word ? : spi->bits_per_word; | 645 | bpw = xfer->bits_per_word ? : spi->bits_per_word; |
609 | speed = xfer->speed_hz ? : spi->max_speed_hz; | 646 | speed = xfer->speed_hz ? : spi->max_speed_hz; |
610 | 647 | ||
648 | if (xfer->len % (bpw / 8)) { | ||
649 | dev_err(&spi->dev, | ||
650 | "Xfer length(%u) not a multiple of word size(%u)\n", | ||
651 | xfer->len, bpw / 8); | ||
652 | status = -EIO; | ||
653 | goto out; | ||
654 | } | ||
655 | |||
611 | if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) { | 656 | if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) { |
612 | sdd->cur_bpw = bpw; | 657 | sdd->cur_bpw = bpw; |
613 | sdd->cur_speed = speed; | 658 | sdd->cur_speed = speed; |
@@ -798,7 +843,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi) | |||
798 | struct s3c64xx_spi_driver_data *sdd; | 843 | struct s3c64xx_spi_driver_data *sdd; |
799 | struct s3c64xx_spi_info *sci; | 844 | struct s3c64xx_spi_info *sci; |
800 | struct spi_message *msg; | 845 | struct spi_message *msg; |
801 | u32 psr, speed; | ||
802 | unsigned long flags; | 846 | unsigned long flags; |
803 | int err = 0; | 847 | int err = 0; |
804 | 848 | ||
@@ -841,32 +885,37 @@ static int s3c64xx_spi_setup(struct spi_device *spi) | |||
841 | } | 885 | } |
842 | 886 | ||
843 | /* Check if we can provide the requested rate */ | 887 | /* Check if we can provide the requested rate */ |
844 | speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); /* Max possible */ | 888 | if (!sci->clk_from_cmu) { |
845 | 889 | u32 psr, speed; | |
846 | if (spi->max_speed_hz > speed) | 890 | |
847 | spi->max_speed_hz = speed; | 891 | /* Max possible */ |
848 | 892 | speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); | |
849 | psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; | 893 | |
850 | psr &= S3C64XX_SPI_PSR_MASK; | 894 | if (spi->max_speed_hz > speed) |
851 | if (psr == S3C64XX_SPI_PSR_MASK) | 895 | spi->max_speed_hz = speed; |
852 | psr--; | 896 | |
897 | psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; | ||
898 | psr &= S3C64XX_SPI_PSR_MASK; | ||
899 | if (psr == S3C64XX_SPI_PSR_MASK) | ||
900 | psr--; | ||
901 | |||
902 | speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); | ||
903 | if (spi->max_speed_hz < speed) { | ||
904 | if (psr+1 < S3C64XX_SPI_PSR_MASK) { | ||
905 | psr++; | ||
906 | } else { | ||
907 | err = -EINVAL; | ||
908 | goto setup_exit; | ||
909 | } | ||
910 | } | ||
853 | 911 | ||
854 | speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); | 912 | speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); |
855 | if (spi->max_speed_hz < speed) { | 913 | if (spi->max_speed_hz >= speed) |
856 | if (psr+1 < S3C64XX_SPI_PSR_MASK) { | 914 | spi->max_speed_hz = speed; |
857 | psr++; | 915 | else |
858 | } else { | ||
859 | err = -EINVAL; | 916 | err = -EINVAL; |
860 | goto setup_exit; | ||
861 | } | ||
862 | } | 917 | } |
863 | 918 | ||
864 | speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); | ||
865 | if (spi->max_speed_hz >= speed) | ||
866 | spi->max_speed_hz = speed; | ||
867 | else | ||
868 | err = -EINVAL; | ||
869 | |||
870 | setup_exit: | 919 | setup_exit: |
871 | 920 | ||
872 | /* setup() returns with device de-selected */ | 921 | /* setup() returns with device de-selected */ |
@@ -888,7 +937,8 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) | |||
888 | /* Disable Interrupts - we use Polling if not DMA mode */ | 937 | /* Disable Interrupts - we use Polling if not DMA mode */ |
889 | writel(0, regs + S3C64XX_SPI_INT_EN); | 938 | writel(0, regs + S3C64XX_SPI_INT_EN); |
890 | 939 | ||
891 | writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT, | 940 | if (!sci->clk_from_cmu) |
941 | writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT, | ||
892 | regs + S3C64XX_SPI_CLK_CFG); | 942 | regs + S3C64XX_SPI_CLK_CFG); |
893 | writel(0, regs + S3C64XX_SPI_MODE_CFG); | 943 | writel(0, regs + S3C64XX_SPI_MODE_CFG); |
894 | writel(0, regs + S3C64XX_SPI_PACKET_CNT); | 944 | writel(0, regs + S3C64XX_SPI_PACKET_CNT); |
diff --git a/drivers/spi/spi_sh.c b/drivers/spi/spi_sh.c new file mode 100644 index 000000000000..9eedd71ad898 --- /dev/null +++ b/drivers/spi/spi_sh.c | |||
@@ -0,0 +1,543 @@ | |||
1 | /* | ||
2 | * SH SPI bus driver | ||
3 | * | ||
4 | * Copyright (C) 2011 Renesas Solutions Corp. | ||
5 | * | ||
6 | * Based on pxa2xx_spi.c: | ||
7 | * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; version 2 of the License. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/timer.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/list.h> | ||
31 | #include <linux/workqueue.h> | ||
32 | #include <linux/interrupt.h> | ||
33 | #include <linux/platform_device.h> | ||
34 | #include <linux/io.h> | ||
35 | #include <linux/spi/spi.h> | ||
36 | |||
37 | #define SPI_SH_TBR 0x00 | ||
38 | #define SPI_SH_RBR 0x00 | ||
39 | #define SPI_SH_CR1 0x08 | ||
40 | #define SPI_SH_CR2 0x10 | ||
41 | #define SPI_SH_CR3 0x18 | ||
42 | #define SPI_SH_CR4 0x20 | ||
43 | #define SPI_SH_CR5 0x28 | ||
44 | |||
45 | /* CR1 */ | ||
46 | #define SPI_SH_TBE 0x80 | ||
47 | #define SPI_SH_TBF 0x40 | ||
48 | #define SPI_SH_RBE 0x20 | ||
49 | #define SPI_SH_RBF 0x10 | ||
50 | #define SPI_SH_PFONRD 0x08 | ||
51 | #define SPI_SH_SSDB 0x04 | ||
52 | #define SPI_SH_SSD 0x02 | ||
53 | #define SPI_SH_SSA 0x01 | ||
54 | |||
55 | /* CR2 */ | ||
56 | #define SPI_SH_RSTF 0x80 | ||
57 | #define SPI_SH_LOOPBK 0x40 | ||
58 | #define SPI_SH_CPOL 0x20 | ||
59 | #define SPI_SH_CPHA 0x10 | ||
60 | #define SPI_SH_L1M0 0x08 | ||
61 | |||
62 | /* CR3 */ | ||
63 | #define SPI_SH_MAX_BYTE 0xFF | ||
64 | |||
65 | /* CR4 */ | ||
66 | #define SPI_SH_TBEI 0x80 | ||
67 | #define SPI_SH_TBFI 0x40 | ||
68 | #define SPI_SH_RBEI 0x20 | ||
69 | #define SPI_SH_RBFI 0x10 | ||
70 | #define SPI_SH_WPABRT 0x04 | ||
71 | #define SPI_SH_SSS 0x01 | ||
72 | |||
73 | /* CR8 */ | ||
74 | #define SPI_SH_P1L0 0x80 | ||
75 | #define SPI_SH_PP1L0 0x40 | ||
76 | #define SPI_SH_MUXI 0x20 | ||
77 | #define SPI_SH_MUXIRQ 0x10 | ||
78 | |||
79 | #define SPI_SH_FIFO_SIZE 32 | ||
80 | #define SPI_SH_SEND_TIMEOUT (3 * HZ) | ||
81 | #define SPI_SH_RECEIVE_TIMEOUT (HZ >> 3) | ||
82 | |||
83 | #undef DEBUG | ||
84 | |||
85 | struct spi_sh_data { | ||
86 | void __iomem *addr; | ||
87 | int irq; | ||
88 | struct spi_master *master; | ||
89 | struct list_head queue; | ||
90 | struct workqueue_struct *workqueue; | ||
91 | struct work_struct ws; | ||
92 | unsigned long cr1; | ||
93 | wait_queue_head_t wait; | ||
94 | spinlock_t lock; | ||
95 | }; | ||
96 | |||
97 | static void spi_sh_write(struct spi_sh_data *ss, unsigned long data, | ||
98 | unsigned long offset) | ||
99 | { | ||
100 | writel(data, ss->addr + offset); | ||
101 | } | ||
102 | |||
103 | static unsigned long spi_sh_read(struct spi_sh_data *ss, unsigned long offset) | ||
104 | { | ||
105 | return readl(ss->addr + offset); | ||
106 | } | ||
107 | |||
108 | static void spi_sh_set_bit(struct spi_sh_data *ss, unsigned long val, | ||
109 | unsigned long offset) | ||
110 | { | ||
111 | unsigned long tmp; | ||
112 | |||
113 | tmp = spi_sh_read(ss, offset); | ||
114 | tmp |= val; | ||
115 | spi_sh_write(ss, tmp, offset); | ||
116 | } | ||
117 | |||
118 | static void spi_sh_clear_bit(struct spi_sh_data *ss, unsigned long val, | ||
119 | unsigned long offset) | ||
120 | { | ||
121 | unsigned long tmp; | ||
122 | |||
123 | tmp = spi_sh_read(ss, offset); | ||
124 | tmp &= ~val; | ||
125 | spi_sh_write(ss, tmp, offset); | ||
126 | } | ||
127 | |||
128 | static void clear_fifo(struct spi_sh_data *ss) | ||
129 | { | ||
130 | spi_sh_set_bit(ss, SPI_SH_RSTF, SPI_SH_CR2); | ||
131 | spi_sh_clear_bit(ss, SPI_SH_RSTF, SPI_SH_CR2); | ||
132 | } | ||
133 | |||
134 | static int spi_sh_wait_receive_buffer(struct spi_sh_data *ss) | ||
135 | { | ||
136 | int timeout = 100000; | ||
137 | |||
138 | while (spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) { | ||
139 | udelay(10); | ||
140 | if (timeout-- < 0) | ||
141 | return -ETIMEDOUT; | ||
142 | } | ||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | static int spi_sh_wait_write_buffer_empty(struct spi_sh_data *ss) | ||
147 | { | ||
148 | int timeout = 100000; | ||
149 | |||
150 | while (!(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBE)) { | ||
151 | udelay(10); | ||
152 | if (timeout-- < 0) | ||
153 | return -ETIMEDOUT; | ||
154 | } | ||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg, | ||
159 | struct spi_transfer *t) | ||
160 | { | ||
161 | int i, retval = 0; | ||
162 | int remain = t->len; | ||
163 | int cur_len; | ||
164 | unsigned char *data; | ||
165 | unsigned long tmp; | ||
166 | long ret; | ||
167 | |||
168 | if (t->len) | ||
169 | spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1); | ||
170 | |||
171 | data = (unsigned char *)t->tx_buf; | ||
172 | while (remain > 0) { | ||
173 | cur_len = min(SPI_SH_FIFO_SIZE, remain); | ||
174 | for (i = 0; i < cur_len && | ||
175 | !(spi_sh_read(ss, SPI_SH_CR4) & | ||
176 | SPI_SH_WPABRT) && | ||
177 | !(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBF); | ||
178 | i++) | ||
179 | spi_sh_write(ss, (unsigned long)data[i], SPI_SH_TBR); | ||
180 | |||
181 | if (spi_sh_read(ss, SPI_SH_CR4) & SPI_SH_WPABRT) { | ||
182 | /* Abort SPI operation */ | ||
183 | spi_sh_set_bit(ss, SPI_SH_WPABRT, SPI_SH_CR4); | ||
184 | retval = -EIO; | ||
185 | break; | ||
186 | } | ||
187 | |||
188 | cur_len = i; | ||
189 | |||
190 | remain -= cur_len; | ||
191 | data += cur_len; | ||
192 | |||
193 | if (remain > 0) { | ||
194 | ss->cr1 &= ~SPI_SH_TBE; | ||
195 | spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4); | ||
196 | ret = wait_event_interruptible_timeout(ss->wait, | ||
197 | ss->cr1 & SPI_SH_TBE, | ||
198 | SPI_SH_SEND_TIMEOUT); | ||
199 | if (ret == 0 && !(ss->cr1 & SPI_SH_TBE)) { | ||
200 | printk(KERN_ERR "%s: timeout\n", __func__); | ||
201 | return -ETIMEDOUT; | ||
202 | } | ||
203 | } | ||
204 | } | ||
205 | |||
206 | if (list_is_last(&t->transfer_list, &mesg->transfers)) { | ||
207 | tmp = spi_sh_read(ss, SPI_SH_CR1); | ||
208 | tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB); | ||
209 | spi_sh_write(ss, tmp, SPI_SH_CR1); | ||
210 | spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1); | ||
211 | |||
212 | ss->cr1 &= ~SPI_SH_TBE; | ||
213 | spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4); | ||
214 | ret = wait_event_interruptible_timeout(ss->wait, | ||
215 | ss->cr1 & SPI_SH_TBE, | ||
216 | SPI_SH_SEND_TIMEOUT); | ||
217 | if (ret == 0 && (ss->cr1 & SPI_SH_TBE)) { | ||
218 | printk(KERN_ERR "%s: timeout\n", __func__); | ||
219 | return -ETIMEDOUT; | ||
220 | } | ||
221 | } | ||
222 | |||
223 | return retval; | ||
224 | } | ||
225 | |||
226 | static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg, | ||
227 | struct spi_transfer *t) | ||
228 | { | ||
229 | int i; | ||
230 | int remain = t->len; | ||
231 | int cur_len; | ||
232 | unsigned char *data; | ||
233 | unsigned long tmp; | ||
234 | long ret; | ||
235 | |||
236 | if (t->len > SPI_SH_MAX_BYTE) | ||
237 | spi_sh_write(ss, SPI_SH_MAX_BYTE, SPI_SH_CR3); | ||
238 | else | ||
239 | spi_sh_write(ss, t->len, SPI_SH_CR3); | ||
240 | |||
241 | tmp = spi_sh_read(ss, SPI_SH_CR1); | ||
242 | tmp = tmp & ~(SPI_SH_SSD | SPI_SH_SSDB); | ||
243 | spi_sh_write(ss, tmp, SPI_SH_CR1); | ||
244 | spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1); | ||
245 | |||
246 | spi_sh_wait_write_buffer_empty(ss); | ||
247 | |||
248 | data = (unsigned char *)t->rx_buf; | ||
249 | while (remain > 0) { | ||
250 | if (remain >= SPI_SH_FIFO_SIZE) { | ||
251 | ss->cr1 &= ~SPI_SH_RBF; | ||
252 | spi_sh_set_bit(ss, SPI_SH_RBF, SPI_SH_CR4); | ||
253 | ret = wait_event_interruptible_timeout(ss->wait, | ||
254 | ss->cr1 & SPI_SH_RBF, | ||
255 | SPI_SH_RECEIVE_TIMEOUT); | ||
256 | if (ret == 0 && | ||
257 | spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) { | ||
258 | printk(KERN_ERR "%s: timeout\n", __func__); | ||
259 | return -ETIMEDOUT; | ||
260 | } | ||
261 | } | ||
262 | |||
263 | cur_len = min(SPI_SH_FIFO_SIZE, remain); | ||
264 | for (i = 0; i < cur_len; i++) { | ||
265 | if (spi_sh_wait_receive_buffer(ss)) | ||
266 | break; | ||
267 | data[i] = (unsigned char)spi_sh_read(ss, SPI_SH_RBR); | ||
268 | } | ||
269 | |||
270 | remain -= cur_len; | ||
271 | data += cur_len; | ||
272 | } | ||
273 | |||
274 | /* deassert CS when SPI is receiving. */ | ||
275 | if (t->len > SPI_SH_MAX_BYTE) { | ||
276 | clear_fifo(ss); | ||
277 | spi_sh_write(ss, 1, SPI_SH_CR3); | ||
278 | } else { | ||
279 | spi_sh_write(ss, 0, SPI_SH_CR3); | ||
280 | } | ||
281 | |||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | static void spi_sh_work(struct work_struct *work) | ||
286 | { | ||
287 | struct spi_sh_data *ss = container_of(work, struct spi_sh_data, ws); | ||
288 | struct spi_message *mesg; | ||
289 | struct spi_transfer *t; | ||
290 | unsigned long flags; | ||
291 | int ret; | ||
292 | |||
293 | pr_debug("%s: enter\n", __func__); | ||
294 | |||
295 | spin_lock_irqsave(&ss->lock, flags); | ||
296 | while (!list_empty(&ss->queue)) { | ||
297 | mesg = list_entry(ss->queue.next, struct spi_message, queue); | ||
298 | list_del_init(&mesg->queue); | ||
299 | |||
300 | spin_unlock_irqrestore(&ss->lock, flags); | ||
301 | list_for_each_entry(t, &mesg->transfers, transfer_list) { | ||
302 | pr_debug("tx_buf = %p, rx_buf = %p\n", | ||
303 | t->tx_buf, t->rx_buf); | ||
304 | pr_debug("len = %d, delay_usecs = %d\n", | ||
305 | t->len, t->delay_usecs); | ||
306 | |||
307 | if (t->tx_buf) { | ||
308 | ret = spi_sh_send(ss, mesg, t); | ||
309 | if (ret < 0) | ||
310 | goto error; | ||
311 | } | ||
312 | if (t->rx_buf) { | ||
313 | ret = spi_sh_receive(ss, mesg, t); | ||
314 | if (ret < 0) | ||
315 | goto error; | ||
316 | } | ||
317 | mesg->actual_length += t->len; | ||
318 | } | ||
319 | spin_lock_irqsave(&ss->lock, flags); | ||
320 | |||
321 | mesg->status = 0; | ||
322 | mesg->complete(mesg->context); | ||
323 | } | ||
324 | |||
325 | clear_fifo(ss); | ||
326 | spi_sh_set_bit(ss, SPI_SH_SSD, SPI_SH_CR1); | ||
327 | udelay(100); | ||
328 | |||
329 | spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD, | ||
330 | SPI_SH_CR1); | ||
331 | |||
332 | clear_fifo(ss); | ||
333 | |||
334 | spin_unlock_irqrestore(&ss->lock, flags); | ||
335 | |||
336 | return; | ||
337 | |||
338 | error: | ||
339 | mesg->status = ret; | ||
340 | mesg->complete(mesg->context); | ||
341 | |||
342 | spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD, | ||
343 | SPI_SH_CR1); | ||
344 | clear_fifo(ss); | ||
345 | |||
346 | } | ||
347 | |||
348 | static int spi_sh_setup(struct spi_device *spi) | ||
349 | { | ||
350 | struct spi_sh_data *ss = spi_master_get_devdata(spi->master); | ||
351 | |||
352 | if (!spi->bits_per_word) | ||
353 | spi->bits_per_word = 8; | ||
354 | |||
355 | pr_debug("%s: enter\n", __func__); | ||
356 | |||
357 | spi_sh_write(ss, 0xfe, SPI_SH_CR1); /* SPI sycle stop */ | ||
358 | spi_sh_write(ss, 0x00, SPI_SH_CR1); /* CR1 init */ | ||
359 | spi_sh_write(ss, 0x00, SPI_SH_CR3); /* CR3 init */ | ||
360 | |||
361 | clear_fifo(ss); | ||
362 | |||
363 | /* 1/8 clock */ | ||
364 | spi_sh_write(ss, spi_sh_read(ss, SPI_SH_CR2) | 0x07, SPI_SH_CR2); | ||
365 | udelay(10); | ||
366 | |||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | static int spi_sh_transfer(struct spi_device *spi, struct spi_message *mesg) | ||
371 | { | ||
372 | struct spi_sh_data *ss = spi_master_get_devdata(spi->master); | ||
373 | unsigned long flags; | ||
374 | |||
375 | pr_debug("%s: enter\n", __func__); | ||
376 | pr_debug("\tmode = %02x\n", spi->mode); | ||
377 | |||
378 | spin_lock_irqsave(&ss->lock, flags); | ||
379 | |||
380 | mesg->actual_length = 0; | ||
381 | mesg->status = -EINPROGRESS; | ||
382 | |||
383 | spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1); | ||
384 | |||
385 | list_add_tail(&mesg->queue, &ss->queue); | ||
386 | queue_work(ss->workqueue, &ss->ws); | ||
387 | |||
388 | spin_unlock_irqrestore(&ss->lock, flags); | ||
389 | |||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | static void spi_sh_cleanup(struct spi_device *spi) | ||
394 | { | ||
395 | struct spi_sh_data *ss = spi_master_get_devdata(spi->master); | ||
396 | |||
397 | pr_debug("%s: enter\n", __func__); | ||
398 | |||
399 | spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD, | ||
400 | SPI_SH_CR1); | ||
401 | } | ||
402 | |||
403 | static irqreturn_t spi_sh_irq(int irq, void *_ss) | ||
404 | { | ||
405 | struct spi_sh_data *ss = (struct spi_sh_data *)_ss; | ||
406 | unsigned long cr1; | ||
407 | |||
408 | cr1 = spi_sh_read(ss, SPI_SH_CR1); | ||
409 | if (cr1 & SPI_SH_TBE) | ||
410 | ss->cr1 |= SPI_SH_TBE; | ||
411 | if (cr1 & SPI_SH_TBF) | ||
412 | ss->cr1 |= SPI_SH_TBF; | ||
413 | if (cr1 & SPI_SH_RBE) | ||
414 | ss->cr1 |= SPI_SH_RBE; | ||
415 | if (cr1 & SPI_SH_RBF) | ||
416 | ss->cr1 |= SPI_SH_RBF; | ||
417 | |||
418 | if (ss->cr1) { | ||
419 | spi_sh_clear_bit(ss, ss->cr1, SPI_SH_CR4); | ||
420 | wake_up(&ss->wait); | ||
421 | } | ||
422 | |||
423 | return IRQ_HANDLED; | ||
424 | } | ||
425 | |||
426 | static int __devexit spi_sh_remove(struct platform_device *pdev) | ||
427 | { | ||
428 | struct spi_sh_data *ss = dev_get_drvdata(&pdev->dev); | ||
429 | |||
430 | spi_unregister_master(ss->master); | ||
431 | destroy_workqueue(ss->workqueue); | ||
432 | free_irq(ss->irq, ss); | ||
433 | iounmap(ss->addr); | ||
434 | |||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | static int __devinit spi_sh_probe(struct platform_device *pdev) | ||
439 | { | ||
440 | struct resource *res; | ||
441 | struct spi_master *master; | ||
442 | struct spi_sh_data *ss; | ||
443 | int ret, irq; | ||
444 | |||
445 | /* get base addr */ | ||
446 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
447 | if (unlikely(res == NULL)) { | ||
448 | dev_err(&pdev->dev, "invalid resource\n"); | ||
449 | return -EINVAL; | ||
450 | } | ||
451 | |||
452 | irq = platform_get_irq(pdev, 0); | ||
453 | if (irq < 0) { | ||
454 | dev_err(&pdev->dev, "platform_get_irq error\n"); | ||
455 | return -ENODEV; | ||
456 | } | ||
457 | |||
458 | master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data)); | ||
459 | if (master == NULL) { | ||
460 | dev_err(&pdev->dev, "spi_alloc_master error.\n"); | ||
461 | return -ENOMEM; | ||
462 | } | ||
463 | |||
464 | ss = spi_master_get_devdata(master); | ||
465 | dev_set_drvdata(&pdev->dev, ss); | ||
466 | |||
467 | ss->irq = irq; | ||
468 | ss->master = master; | ||
469 | ss->addr = ioremap(res->start, resource_size(res)); | ||
470 | if (ss->addr == NULL) { | ||
471 | dev_err(&pdev->dev, "ioremap error.\n"); | ||
472 | ret = -ENOMEM; | ||
473 | goto error1; | ||
474 | } | ||
475 | INIT_LIST_HEAD(&ss->queue); | ||
476 | spin_lock_init(&ss->lock); | ||
477 | INIT_WORK(&ss->ws, spi_sh_work); | ||
478 | init_waitqueue_head(&ss->wait); | ||
479 | ss->workqueue = create_singlethread_workqueue( | ||
480 | dev_name(master->dev.parent)); | ||
481 | if (ss->workqueue == NULL) { | ||
482 | dev_err(&pdev->dev, "create workqueue error\n"); | ||
483 | ret = -EBUSY; | ||
484 | goto error2; | ||
485 | } | ||
486 | |||
487 | ret = request_irq(irq, spi_sh_irq, IRQF_DISABLED, "spi_sh", ss); | ||
488 | if (ret < 0) { | ||
489 | dev_err(&pdev->dev, "request_irq error\n"); | ||
490 | goto error3; | ||
491 | } | ||
492 | |||
493 | master->num_chipselect = 2; | ||
494 | master->bus_num = pdev->id; | ||
495 | master->setup = spi_sh_setup; | ||
496 | master->transfer = spi_sh_transfer; | ||
497 | master->cleanup = spi_sh_cleanup; | ||
498 | |||
499 | ret = spi_register_master(master); | ||
500 | if (ret < 0) { | ||
501 | printk(KERN_ERR "spi_register_master error.\n"); | ||
502 | goto error4; | ||
503 | } | ||
504 | |||
505 | return 0; | ||
506 | |||
507 | error4: | ||
508 | free_irq(irq, ss); | ||
509 | error3: | ||
510 | destroy_workqueue(ss->workqueue); | ||
511 | error2: | ||
512 | iounmap(ss->addr); | ||
513 | error1: | ||
514 | spi_master_put(master); | ||
515 | |||
516 | return ret; | ||
517 | } | ||
518 | |||
519 | static struct platform_driver spi_sh_driver = { | ||
520 | .probe = spi_sh_probe, | ||
521 | .remove = __devexit_p(spi_sh_remove), | ||
522 | .driver = { | ||
523 | .name = "sh_spi", | ||
524 | .owner = THIS_MODULE, | ||
525 | }, | ||
526 | }; | ||
527 | |||
528 | static int __init spi_sh_init(void) | ||
529 | { | ||
530 | return platform_driver_register(&spi_sh_driver); | ||
531 | } | ||
532 | module_init(spi_sh_init); | ||
533 | |||
534 | static void __exit spi_sh_exit(void) | ||
535 | { | ||
536 | platform_driver_unregister(&spi_sh_driver); | ||
537 | } | ||
538 | module_exit(spi_sh_exit); | ||
539 | |||
540 | MODULE_DESCRIPTION("SH SPI bus driver"); | ||
541 | MODULE_LICENSE("GPL"); | ||
542 | MODULE_AUTHOR("Yoshihiro Shimoda"); | ||
543 | MODULE_ALIAS("platform:sh_spi"); | ||
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c index d93b66743ba7..e00d94b22250 100644 --- a/drivers/spi/spi_sh_msiof.c +++ b/drivers/spi/spi_sh_msiof.c | |||
@@ -9,22 +9,22 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/kernel.h> | 12 | #include <linux/bitmap.h> |
13 | #include <linux/init.h> | 13 | #include <linux/clk.h> |
14 | #include <linux/completion.h> | ||
14 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | #include <linux/err.h> | ||
17 | #include <linux/gpio.h> | ||
18 | #include <linux/init.h> | ||
15 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/io.h> | ||
21 | #include <linux/kernel.h> | ||
16 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
17 | #include <linux/completion.h> | ||
18 | #include <linux/pm_runtime.h> | 23 | #include <linux/pm_runtime.h> |
19 | #include <linux/gpio.h> | ||
20 | #include <linux/bitmap.h> | ||
21 | #include <linux/clk.h> | ||
22 | #include <linux/io.h> | ||
23 | #include <linux/err.h> | ||
24 | 24 | ||
25 | #include <linux/spi/sh_msiof.h> | ||
25 | #include <linux/spi/spi.h> | 26 | #include <linux/spi/spi.h> |
26 | #include <linux/spi/spi_bitbang.h> | 27 | #include <linux/spi/spi_bitbang.h> |
27 | #include <linux/spi/sh_msiof.h> | ||
28 | 28 | ||
29 | #include <asm/unaligned.h> | 29 | #include <asm/unaligned.h> |
30 | 30 | ||
@@ -67,7 +67,7 @@ struct sh_msiof_spi_priv { | |||
67 | #define STR_TEOF (1 << 23) | 67 | #define STR_TEOF (1 << 23) |
68 | #define STR_REOF (1 << 7) | 68 | #define STR_REOF (1 << 7) |
69 | 69 | ||
70 | static unsigned long sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs) | 70 | static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs) |
71 | { | 71 | { |
72 | switch (reg_offs) { | 72 | switch (reg_offs) { |
73 | case TSCR: | 73 | case TSCR: |
@@ -79,7 +79,7 @@ static unsigned long sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs) | |||
79 | } | 79 | } |
80 | 80 | ||
81 | static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs, | 81 | static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs, |
82 | unsigned long value) | 82 | u32 value) |
83 | { | 83 | { |
84 | switch (reg_offs) { | 84 | switch (reg_offs) { |
85 | case TSCR: | 85 | case TSCR: |
@@ -93,10 +93,10 @@ static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs, | |||
93 | } | 93 | } |
94 | 94 | ||
95 | static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p, | 95 | static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p, |
96 | unsigned long clr, unsigned long set) | 96 | u32 clr, u32 set) |
97 | { | 97 | { |
98 | unsigned long mask = clr | set; | 98 | u32 mask = clr | set; |
99 | unsigned long data; | 99 | u32 data; |
100 | int k; | 100 | int k; |
101 | 101 | ||
102 | data = sh_msiof_read(p, CTR); | 102 | data = sh_msiof_read(p, CTR); |
@@ -166,10 +166,10 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, | |||
166 | } | 166 | } |
167 | 167 | ||
168 | static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, | 168 | static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, |
169 | int cpol, int cpha, | 169 | u32 cpol, u32 cpha, |
170 | int tx_hi_z, int lsb_first) | 170 | u32 tx_hi_z, u32 lsb_first) |
171 | { | 171 | { |
172 | unsigned long tmp; | 172 | u32 tmp; |
173 | int edge; | 173 | int edge; |
174 | 174 | ||
175 | /* | 175 | /* |
@@ -187,7 +187,7 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, | |||
187 | tmp |= cpol << 30; /* TSCKIZ */ | 187 | tmp |= cpol << 30; /* TSCKIZ */ |
188 | tmp |= cpol << 28; /* RSCKIZ */ | 188 | tmp |= cpol << 28; /* RSCKIZ */ |
189 | 189 | ||
190 | edge = cpol ? cpha : !cpha; | 190 | edge = cpol ^ !cpha; |
191 | 191 | ||
192 | tmp |= edge << 27; /* TEDG */ | 192 | tmp |= edge << 27; /* TEDG */ |
193 | tmp |= edge << 26; /* REDG */ | 193 | tmp |= edge << 26; /* REDG */ |
@@ -197,11 +197,9 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, | |||
197 | 197 | ||
198 | static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p, | 198 | static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p, |
199 | const void *tx_buf, void *rx_buf, | 199 | const void *tx_buf, void *rx_buf, |
200 | int bits, int words) | 200 | u32 bits, u32 words) |
201 | { | 201 | { |
202 | unsigned long dr2; | 202 | u32 dr2 = ((bits - 1) << 24) | ((words - 1) << 16); |
203 | |||
204 | dr2 = ((bits - 1) << 24) | ((words - 1) << 16); | ||
205 | 203 | ||
206 | if (tx_buf) | 204 | if (tx_buf) |
207 | sh_msiof_write(p, TMDR2, dr2); | 205 | sh_msiof_write(p, TMDR2, dr2); |
@@ -222,7 +220,7 @@ static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) | |||
222 | static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, | 220 | static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, |
223 | const void *tx_buf, int words, int fs) | 221 | const void *tx_buf, int words, int fs) |
224 | { | 222 | { |
225 | const unsigned char *buf_8 = tx_buf; | 223 | const u8 *buf_8 = tx_buf; |
226 | int k; | 224 | int k; |
227 | 225 | ||
228 | for (k = 0; k < words; k++) | 226 | for (k = 0; k < words; k++) |
@@ -232,7 +230,7 @@ static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, | |||
232 | static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p, | 230 | static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p, |
233 | const void *tx_buf, int words, int fs) | 231 | const void *tx_buf, int words, int fs) |
234 | { | 232 | { |
235 | const unsigned short *buf_16 = tx_buf; | 233 | const u16 *buf_16 = tx_buf; |
236 | int k; | 234 | int k; |
237 | 235 | ||
238 | for (k = 0; k < words; k++) | 236 | for (k = 0; k < words; k++) |
@@ -242,7 +240,7 @@ static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p, | |||
242 | static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p, | 240 | static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p, |
243 | const void *tx_buf, int words, int fs) | 241 | const void *tx_buf, int words, int fs) |
244 | { | 242 | { |
245 | const unsigned short *buf_16 = tx_buf; | 243 | const u16 *buf_16 = tx_buf; |
246 | int k; | 244 | int k; |
247 | 245 | ||
248 | for (k = 0; k < words; k++) | 246 | for (k = 0; k < words; k++) |
@@ -252,7 +250,7 @@ static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p, | |||
252 | static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p, | 250 | static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p, |
253 | const void *tx_buf, int words, int fs) | 251 | const void *tx_buf, int words, int fs) |
254 | { | 252 | { |
255 | const unsigned int *buf_32 = tx_buf; | 253 | const u32 *buf_32 = tx_buf; |
256 | int k; | 254 | int k; |
257 | 255 | ||
258 | for (k = 0; k < words; k++) | 256 | for (k = 0; k < words; k++) |
@@ -262,17 +260,37 @@ static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p, | |||
262 | static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p, | 260 | static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p, |
263 | const void *tx_buf, int words, int fs) | 261 | const void *tx_buf, int words, int fs) |
264 | { | 262 | { |
265 | const unsigned int *buf_32 = tx_buf; | 263 | const u32 *buf_32 = tx_buf; |
266 | int k; | 264 | int k; |
267 | 265 | ||
268 | for (k = 0; k < words; k++) | 266 | for (k = 0; k < words; k++) |
269 | sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs); | 267 | sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs); |
270 | } | 268 | } |
271 | 269 | ||
270 | static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p, | ||
271 | const void *tx_buf, int words, int fs) | ||
272 | { | ||
273 | const u32 *buf_32 = tx_buf; | ||
274 | int k; | ||
275 | |||
276 | for (k = 0; k < words; k++) | ||
277 | sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs)); | ||
278 | } | ||
279 | |||
280 | static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p, | ||
281 | const void *tx_buf, int words, int fs) | ||
282 | { | ||
283 | const u32 *buf_32 = tx_buf; | ||
284 | int k; | ||
285 | |||
286 | for (k = 0; k < words; k++) | ||
287 | sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs)); | ||
288 | } | ||
289 | |||
272 | static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p, | 290 | static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p, |
273 | void *rx_buf, int words, int fs) | 291 | void *rx_buf, int words, int fs) |
274 | { | 292 | { |
275 | unsigned char *buf_8 = rx_buf; | 293 | u8 *buf_8 = rx_buf; |
276 | int k; | 294 | int k; |
277 | 295 | ||
278 | for (k = 0; k < words; k++) | 296 | for (k = 0; k < words; k++) |
@@ -282,7 +300,7 @@ static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p, | |||
282 | static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p, | 300 | static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p, |
283 | void *rx_buf, int words, int fs) | 301 | void *rx_buf, int words, int fs) |
284 | { | 302 | { |
285 | unsigned short *buf_16 = rx_buf; | 303 | u16 *buf_16 = rx_buf; |
286 | int k; | 304 | int k; |
287 | 305 | ||
288 | for (k = 0; k < words; k++) | 306 | for (k = 0; k < words; k++) |
@@ -292,7 +310,7 @@ static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p, | |||
292 | static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p, | 310 | static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p, |
293 | void *rx_buf, int words, int fs) | 311 | void *rx_buf, int words, int fs) |
294 | { | 312 | { |
295 | unsigned short *buf_16 = rx_buf; | 313 | u16 *buf_16 = rx_buf; |
296 | int k; | 314 | int k; |
297 | 315 | ||
298 | for (k = 0; k < words; k++) | 316 | for (k = 0; k < words; k++) |
@@ -302,7 +320,7 @@ static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p, | |||
302 | static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p, | 320 | static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p, |
303 | void *rx_buf, int words, int fs) | 321 | void *rx_buf, int words, int fs) |
304 | { | 322 | { |
305 | unsigned int *buf_32 = rx_buf; | 323 | u32 *buf_32 = rx_buf; |
306 | int k; | 324 | int k; |
307 | 325 | ||
308 | for (k = 0; k < words; k++) | 326 | for (k = 0; k < words; k++) |
@@ -312,19 +330,40 @@ static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p, | |||
312 | static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p, | 330 | static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p, |
313 | void *rx_buf, int words, int fs) | 331 | void *rx_buf, int words, int fs) |
314 | { | 332 | { |
315 | unsigned int *buf_32 = rx_buf; | 333 | u32 *buf_32 = rx_buf; |
316 | int k; | 334 | int k; |
317 | 335 | ||
318 | for (k = 0; k < words; k++) | 336 | for (k = 0; k < words; k++) |
319 | put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]); | 337 | put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]); |
320 | } | 338 | } |
321 | 339 | ||
340 | static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p, | ||
341 | void *rx_buf, int words, int fs) | ||
342 | { | ||
343 | u32 *buf_32 = rx_buf; | ||
344 | int k; | ||
345 | |||
346 | for (k = 0; k < words; k++) | ||
347 | buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs); | ||
348 | } | ||
349 | |||
350 | static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p, | ||
351 | void *rx_buf, int words, int fs) | ||
352 | { | ||
353 | u32 *buf_32 = rx_buf; | ||
354 | int k; | ||
355 | |||
356 | for (k = 0; k < words; k++) | ||
357 | put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]); | ||
358 | } | ||
359 | |||
322 | static int sh_msiof_spi_bits(struct spi_device *spi, struct spi_transfer *t) | 360 | static int sh_msiof_spi_bits(struct spi_device *spi, struct spi_transfer *t) |
323 | { | 361 | { |
324 | int bits; | 362 | int bits; |
325 | 363 | ||
326 | bits = t ? t->bits_per_word : 0; | 364 | bits = t ? t->bits_per_word : 0; |
327 | bits = bits ? bits : spi->bits_per_word; | 365 | if (!bits) |
366 | bits = spi->bits_per_word; | ||
328 | return bits; | 367 | return bits; |
329 | } | 368 | } |
330 | 369 | ||
@@ -334,7 +373,8 @@ static unsigned long sh_msiof_spi_hz(struct spi_device *spi, | |||
334 | unsigned long hz; | 373 | unsigned long hz; |
335 | 374 | ||
336 | hz = t ? t->speed_hz : 0; | 375 | hz = t ? t->speed_hz : 0; |
337 | hz = hz ? hz : spi->max_speed_hz; | 376 | if (!hz) |
377 | hz = spi->max_speed_hz; | ||
338 | return hz; | 378 | return hz; |
339 | } | 379 | } |
340 | 380 | ||
@@ -468,9 +508,17 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t) | |||
468 | int bytes_done; | 508 | int bytes_done; |
469 | int words; | 509 | int words; |
470 | int n; | 510 | int n; |
511 | bool swab; | ||
471 | 512 | ||
472 | bits = sh_msiof_spi_bits(spi, t); | 513 | bits = sh_msiof_spi_bits(spi, t); |
473 | 514 | ||
515 | if (bits <= 8 && t->len > 15 && !(t->len & 3)) { | ||
516 | bits = 32; | ||
517 | swab = true; | ||
518 | } else { | ||
519 | swab = false; | ||
520 | } | ||
521 | |||
474 | /* setup bytes per word and fifo read/write functions */ | 522 | /* setup bytes per word and fifo read/write functions */ |
475 | if (bits <= 8) { | 523 | if (bits <= 8) { |
476 | bytes_per_word = 1; | 524 | bytes_per_word = 1; |
@@ -487,6 +535,17 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t) | |||
487 | rx_fifo = sh_msiof_spi_read_fifo_16u; | 535 | rx_fifo = sh_msiof_spi_read_fifo_16u; |
488 | else | 536 | else |
489 | rx_fifo = sh_msiof_spi_read_fifo_16; | 537 | rx_fifo = sh_msiof_spi_read_fifo_16; |
538 | } else if (swab) { | ||
539 | bytes_per_word = 4; | ||
540 | if ((unsigned long)t->tx_buf & 0x03) | ||
541 | tx_fifo = sh_msiof_spi_write_fifo_s32u; | ||
542 | else | ||
543 | tx_fifo = sh_msiof_spi_write_fifo_s32; | ||
544 | |||
545 | if ((unsigned long)t->rx_buf & 0x03) | ||
546 | rx_fifo = sh_msiof_spi_read_fifo_s32u; | ||
547 | else | ||
548 | rx_fifo = sh_msiof_spi_read_fifo_s32; | ||
490 | } else { | 549 | } else { |
491 | bytes_per_word = 4; | 550 | bytes_per_word = 4; |
492 | if ((unsigned long)t->tx_buf & 0x03) | 551 | if ((unsigned long)t->tx_buf & 0x03) |
@@ -509,9 +568,11 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t) | |||
509 | bytes_done = 0; | 568 | bytes_done = 0; |
510 | 569 | ||
511 | while (bytes_done < t->len) { | 570 | while (bytes_done < t->len) { |
571 | void *rx_buf = t->rx_buf ? t->rx_buf + bytes_done : NULL; | ||
572 | const void *tx_buf = t->tx_buf ? t->tx_buf + bytes_done : NULL; | ||
512 | n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, | 573 | n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, |
513 | t->tx_buf + bytes_done, | 574 | tx_buf, |
514 | t->rx_buf + bytes_done, | 575 | rx_buf, |
515 | words, bits); | 576 | words, bits); |
516 | if (n < 0) | 577 | if (n < 0) |
517 | break; | 578 | break; |
@@ -635,7 +696,7 @@ static int sh_msiof_spi_remove(struct platform_device *pdev) | |||
635 | ret = spi_bitbang_stop(&p->bitbang); | 696 | ret = spi_bitbang_stop(&p->bitbang); |
636 | if (!ret) { | 697 | if (!ret) { |
637 | pm_runtime_disable(&pdev->dev); | 698 | pm_runtime_disable(&pdev->dev); |
638 | free_irq(platform_get_irq(pdev, 0), sh_msiof_spi_irq); | 699 | free_irq(platform_get_irq(pdev, 0), p); |
639 | iounmap(p->mapbase); | 700 | iounmap(p->mapbase); |
640 | clk_put(p->clk); | 701 | clk_put(p->clk); |
641 | spi_master_put(p->bitbang.master); | 702 | spi_master_put(p->bitbang.master); |
diff --git a/drivers/spi/spi_tegra.c b/drivers/spi/spi_tegra.c new file mode 100644 index 000000000000..6c3aa6ecaade --- /dev/null +++ b/drivers/spi/spi_tegra.c | |||
@@ -0,0 +1,618 @@ | |||
1 | /* | ||
2 | * Driver for Nvidia TEGRA spi controller. | ||
3 | * | ||
4 | * Copyright (C) 2010 Google, Inc. | ||
5 | * | ||
6 | * Author: | ||
7 | * Erik Gilling <konkers@android.com> | ||
8 | * | ||
9 | * This software is licensed under the terms of the GNU General Public | ||
10 | * License version 2, as published by the Free Software Foundation, and | ||
11 | * may be copied, distributed, and modified under those terms. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/err.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/io.h> | ||
25 | #include <linux/dma-mapping.h> | ||
26 | #include <linux/dmapool.h> | ||
27 | #include <linux/clk.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/delay.h> | ||
30 | |||
31 | #include <linux/spi/spi.h> | ||
32 | |||
33 | #include <mach/dma.h> | ||
34 | |||
35 | #define SLINK_COMMAND 0x000 | ||
36 | #define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0) | ||
37 | #define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5) | ||
38 | #define SLINK_BOTH_EN (1 << 10) | ||
39 | #define SLINK_CS_SW (1 << 11) | ||
40 | #define SLINK_CS_VALUE (1 << 12) | ||
41 | #define SLINK_CS_POLARITY (1 << 13) | ||
42 | #define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16) | ||
43 | #define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16) | ||
44 | #define SLINK_IDLE_SDA_PULL_LOW (2 << 16) | ||
45 | #define SLINK_IDLE_SDA_PULL_HIGH (3 << 16) | ||
46 | #define SLINK_IDLE_SDA_MASK (3 << 16) | ||
47 | #define SLINK_CS_POLARITY1 (1 << 20) | ||
48 | #define SLINK_CK_SDA (1 << 21) | ||
49 | #define SLINK_CS_POLARITY2 (1 << 22) | ||
50 | #define SLINK_CS_POLARITY3 (1 << 23) | ||
51 | #define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24) | ||
52 | #define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24) | ||
53 | #define SLINK_IDLE_SCLK_PULL_LOW (2 << 24) | ||
54 | #define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24) | ||
55 | #define SLINK_IDLE_SCLK_MASK (3 << 24) | ||
56 | #define SLINK_M_S (1 << 28) | ||
57 | #define SLINK_WAIT (1 << 29) | ||
58 | #define SLINK_GO (1 << 30) | ||
59 | #define SLINK_ENB (1 << 31) | ||
60 | |||
61 | #define SLINK_COMMAND2 0x004 | ||
62 | #define SLINK_LSBFE (1 << 0) | ||
63 | #define SLINK_SSOE (1 << 1) | ||
64 | #define SLINK_SPIE (1 << 4) | ||
65 | #define SLINK_BIDIROE (1 << 6) | ||
66 | #define SLINK_MODFEN (1 << 7) | ||
67 | #define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8) | ||
68 | #define SLINK_CS_ACTIVE_BETWEEN (1 << 17) | ||
69 | #define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18) | ||
70 | #define SLINK_SS_SETUP(x) (((x) & 0x3) << 20) | ||
71 | #define SLINK_FIFO_REFILLS_0 (0 << 22) | ||
72 | #define SLINK_FIFO_REFILLS_1 (1 << 22) | ||
73 | #define SLINK_FIFO_REFILLS_2 (2 << 22) | ||
74 | #define SLINK_FIFO_REFILLS_3 (3 << 22) | ||
75 | #define SLINK_FIFO_REFILLS_MASK (3 << 22) | ||
76 | #define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26) | ||
77 | #define SLINK_SPC0 (1 << 29) | ||
78 | #define SLINK_TXEN (1 << 30) | ||
79 | #define SLINK_RXEN (1 << 31) | ||
80 | |||
81 | #define SLINK_STATUS 0x008 | ||
82 | #define SLINK_COUNT(val) (((val) >> 0) & 0x1f) | ||
83 | #define SLINK_WORD(val) (((val) >> 5) & 0x1f) | ||
84 | #define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff) | ||
85 | #define SLINK_MODF (1 << 16) | ||
86 | #define SLINK_RX_UNF (1 << 18) | ||
87 | #define SLINK_TX_OVF (1 << 19) | ||
88 | #define SLINK_TX_FULL (1 << 20) | ||
89 | #define SLINK_TX_EMPTY (1 << 21) | ||
90 | #define SLINK_RX_FULL (1 << 22) | ||
91 | #define SLINK_RX_EMPTY (1 << 23) | ||
92 | #define SLINK_TX_UNF (1 << 24) | ||
93 | #define SLINK_RX_OVF (1 << 25) | ||
94 | #define SLINK_TX_FLUSH (1 << 26) | ||
95 | #define SLINK_RX_FLUSH (1 << 27) | ||
96 | #define SLINK_SCLK (1 << 28) | ||
97 | #define SLINK_ERR (1 << 29) | ||
98 | #define SLINK_RDY (1 << 30) | ||
99 | #define SLINK_BSY (1 << 31) | ||
100 | |||
101 | #define SLINK_MAS_DATA 0x010 | ||
102 | #define SLINK_SLAVE_DATA 0x014 | ||
103 | |||
104 | #define SLINK_DMA_CTL 0x018 | ||
105 | #define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0) | ||
106 | #define SLINK_TX_TRIG_1 (0 << 16) | ||
107 | #define SLINK_TX_TRIG_4 (1 << 16) | ||
108 | #define SLINK_TX_TRIG_8 (2 << 16) | ||
109 | #define SLINK_TX_TRIG_16 (3 << 16) | ||
110 | #define SLINK_TX_TRIG_MASK (3 << 16) | ||
111 | #define SLINK_RX_TRIG_1 (0 << 18) | ||
112 | #define SLINK_RX_TRIG_4 (1 << 18) | ||
113 | #define SLINK_RX_TRIG_8 (2 << 18) | ||
114 | #define SLINK_RX_TRIG_16 (3 << 18) | ||
115 | #define SLINK_RX_TRIG_MASK (3 << 18) | ||
116 | #define SLINK_PACKED (1 << 20) | ||
117 | #define SLINK_PACK_SIZE_4 (0 << 21) | ||
118 | #define SLINK_PACK_SIZE_8 (1 << 21) | ||
119 | #define SLINK_PACK_SIZE_16 (2 << 21) | ||
120 | #define SLINK_PACK_SIZE_32 (3 << 21) | ||
121 | #define SLINK_PACK_SIZE_MASK (3 << 21) | ||
122 | #define SLINK_IE_TXC (1 << 26) | ||
123 | #define SLINK_IE_RXC (1 << 27) | ||
124 | #define SLINK_DMA_EN (1 << 31) | ||
125 | |||
126 | #define SLINK_STATUS2 0x01c | ||
127 | #define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0) | ||
128 | #define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f) >> 16) | ||
129 | |||
130 | #define SLINK_TX_FIFO 0x100 | ||
131 | #define SLINK_RX_FIFO 0x180 | ||
132 | |||
133 | static const unsigned long spi_tegra_req_sels[] = { | ||
134 | TEGRA_DMA_REQ_SEL_SL2B1, | ||
135 | TEGRA_DMA_REQ_SEL_SL2B2, | ||
136 | TEGRA_DMA_REQ_SEL_SL2B3, | ||
137 | TEGRA_DMA_REQ_SEL_SL2B4, | ||
138 | }; | ||
139 | |||
140 | #define BB_LEN 32 | ||
141 | |||
142 | struct spi_tegra_data { | ||
143 | struct spi_master *master; | ||
144 | struct platform_device *pdev; | ||
145 | spinlock_t lock; | ||
146 | |||
147 | struct clk *clk; | ||
148 | void __iomem *base; | ||
149 | unsigned long phys; | ||
150 | |||
151 | u32 cur_speed; | ||
152 | |||
153 | struct list_head queue; | ||
154 | struct spi_transfer *cur; | ||
155 | unsigned cur_pos; | ||
156 | unsigned cur_len; | ||
157 | unsigned cur_bytes_per_word; | ||
158 | |||
159 | /* The tegra spi controller has a bug which causes the first word | ||
160 | * in PIO transactions to be garbage. Since packed DMA transactions | ||
161 | * require transfers to be 4 byte aligned we need a bounce buffer | ||
162 | * for the generic case. | ||
163 | */ | ||
164 | struct tegra_dma_req rx_dma_req; | ||
165 | struct tegra_dma_channel *rx_dma; | ||
166 | u32 *rx_bb; | ||
167 | dma_addr_t rx_bb_phys; | ||
168 | }; | ||
169 | |||
170 | |||
171 | static inline unsigned long spi_tegra_readl(struct spi_tegra_data *tspi, | ||
172 | unsigned long reg) | ||
173 | { | ||
174 | return readl(tspi->base + reg); | ||
175 | } | ||
176 | |||
177 | static inline void spi_tegra_writel(struct spi_tegra_data *tspi, | ||
178 | unsigned long val, | ||
179 | unsigned long reg) | ||
180 | { | ||
181 | writel(val, tspi->base + reg); | ||
182 | } | ||
183 | |||
184 | static void spi_tegra_go(struct spi_tegra_data *tspi) | ||
185 | { | ||
186 | unsigned long val; | ||
187 | |||
188 | wmb(); | ||
189 | |||
190 | val = spi_tegra_readl(tspi, SLINK_DMA_CTL); | ||
191 | val &= ~SLINK_DMA_BLOCK_SIZE(~0) & ~SLINK_DMA_EN; | ||
192 | val |= SLINK_DMA_BLOCK_SIZE(tspi->rx_dma_req.size / 4 - 1); | ||
193 | spi_tegra_writel(tspi, val, SLINK_DMA_CTL); | ||
194 | |||
195 | tegra_dma_enqueue_req(tspi->rx_dma, &tspi->rx_dma_req); | ||
196 | |||
197 | val |= SLINK_DMA_EN; | ||
198 | spi_tegra_writel(tspi, val, SLINK_DMA_CTL); | ||
199 | } | ||
200 | |||
201 | static unsigned spi_tegra_fill_tx_fifo(struct spi_tegra_data *tspi, | ||
202 | struct spi_transfer *t) | ||
203 | { | ||
204 | unsigned len = min(t->len - tspi->cur_pos, BB_LEN * | ||
205 | tspi->cur_bytes_per_word); | ||
206 | u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_pos; | ||
207 | int i, j; | ||
208 | unsigned long val; | ||
209 | |||
210 | val = spi_tegra_readl(tspi, SLINK_COMMAND); | ||
211 | val &= ~SLINK_WORD_SIZE(~0); | ||
212 | val |= SLINK_WORD_SIZE(len / tspi->cur_bytes_per_word - 1); | ||
213 | spi_tegra_writel(tspi, val, SLINK_COMMAND); | ||
214 | |||
215 | for (i = 0; i < len; i += tspi->cur_bytes_per_word) { | ||
216 | val = 0; | ||
217 | for (j = 0; j < tspi->cur_bytes_per_word; j++) | ||
218 | val |= tx_buf[i + j] << j * 8; | ||
219 | |||
220 | spi_tegra_writel(tspi, val, SLINK_TX_FIFO); | ||
221 | } | ||
222 | |||
223 | tspi->rx_dma_req.size = len / tspi->cur_bytes_per_word * 4; | ||
224 | |||
225 | return len; | ||
226 | } | ||
227 | |||
228 | static unsigned spi_tegra_drain_rx_fifo(struct spi_tegra_data *tspi, | ||
229 | struct spi_transfer *t) | ||
230 | { | ||
231 | unsigned len = tspi->cur_len; | ||
232 | u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_pos; | ||
233 | int i, j; | ||
234 | unsigned long val; | ||
235 | |||
236 | for (i = 0; i < len; i += tspi->cur_bytes_per_word) { | ||
237 | val = tspi->rx_bb[i / tspi->cur_bytes_per_word]; | ||
238 | for (j = 0; j < tspi->cur_bytes_per_word; j++) | ||
239 | rx_buf[i + j] = (val >> (j * 8)) & 0xff; | ||
240 | } | ||
241 | |||
242 | return len; | ||
243 | } | ||
244 | |||
245 | static void spi_tegra_start_transfer(struct spi_device *spi, | ||
246 | struct spi_transfer *t) | ||
247 | { | ||
248 | struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master); | ||
249 | u32 speed; | ||
250 | u8 bits_per_word; | ||
251 | unsigned long val; | ||
252 | |||
253 | speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz; | ||
254 | bits_per_word = t->bits_per_word ? t->bits_per_word : | ||
255 | spi->bits_per_word; | ||
256 | |||
257 | tspi->cur_bytes_per_word = (bits_per_word - 1) / 8 + 1; | ||
258 | |||
259 | if (speed != tspi->cur_speed) | ||
260 | clk_set_rate(tspi->clk, speed); | ||
261 | |||
262 | if (tspi->cur_speed == 0) | ||
263 | clk_enable(tspi->clk); | ||
264 | |||
265 | tspi->cur_speed = speed; | ||
266 | |||
267 | val = spi_tegra_readl(tspi, SLINK_COMMAND2); | ||
268 | val &= ~SLINK_SS_EN_CS(~0) | SLINK_RXEN | SLINK_TXEN; | ||
269 | if (t->rx_buf) | ||
270 | val |= SLINK_RXEN; | ||
271 | if (t->tx_buf) | ||
272 | val |= SLINK_TXEN; | ||
273 | val |= SLINK_SS_EN_CS(spi->chip_select); | ||
274 | val |= SLINK_SPIE; | ||
275 | spi_tegra_writel(tspi, val, SLINK_COMMAND2); | ||
276 | |||
277 | val = spi_tegra_readl(tspi, SLINK_COMMAND); | ||
278 | val &= ~SLINK_BIT_LENGTH(~0); | ||
279 | val |= SLINK_BIT_LENGTH(bits_per_word - 1); | ||
280 | |||
281 | /* FIXME: should probably control CS manually so that we can be sure | ||
282 | * it does not go low between transfer and to support delay_usecs | ||
283 | * correctly. | ||
284 | */ | ||
285 | val &= ~SLINK_IDLE_SCLK_MASK & ~SLINK_CK_SDA & ~SLINK_CS_SW; | ||
286 | |||
287 | if (spi->mode & SPI_CPHA) | ||
288 | val |= SLINK_CK_SDA; | ||
289 | |||
290 | if (spi->mode & SPI_CPOL) | ||
291 | val |= SLINK_IDLE_SCLK_DRIVE_HIGH; | ||
292 | else | ||
293 | val |= SLINK_IDLE_SCLK_DRIVE_LOW; | ||
294 | |||
295 | val |= SLINK_M_S; | ||
296 | |||
297 | spi_tegra_writel(tspi, val, SLINK_COMMAND); | ||
298 | |||
299 | spi_tegra_writel(tspi, SLINK_RX_FLUSH | SLINK_TX_FLUSH, SLINK_STATUS); | ||
300 | |||
301 | tspi->cur = t; | ||
302 | tspi->cur_pos = 0; | ||
303 | tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, t); | ||
304 | |||
305 | spi_tegra_go(tspi); | ||
306 | } | ||
307 | |||
308 | static void spi_tegra_start_message(struct spi_device *spi, | ||
309 | struct spi_message *m) | ||
310 | { | ||
311 | struct spi_transfer *t; | ||
312 | |||
313 | m->actual_length = 0; | ||
314 | m->status = 0; | ||
315 | |||
316 | t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list); | ||
317 | spi_tegra_start_transfer(spi, t); | ||
318 | } | ||
319 | |||
320 | static void tegra_spi_rx_dma_complete(struct tegra_dma_req *req) | ||
321 | { | ||
322 | struct spi_tegra_data *tspi = req->dev; | ||
323 | unsigned long flags; | ||
324 | struct spi_message *m; | ||
325 | struct spi_device *spi; | ||
326 | int timeout = 0; | ||
327 | unsigned long val; | ||
328 | |||
329 | /* the SPI controller may come back with both the BSY and RDY bits | ||
330 | * set. In this case we need to wait for the BSY bit to clear so | ||
331 | * that we are sure the DMA is finished. 1000 reads was empirically | ||
332 | * determined to be long enough. | ||
333 | */ | ||
334 | while (timeout++ < 1000) { | ||
335 | if (!(spi_tegra_readl(tspi, SLINK_STATUS) & SLINK_BSY)) | ||
336 | break; | ||
337 | } | ||
338 | |||
339 | spin_lock_irqsave(&tspi->lock, flags); | ||
340 | |||
341 | val = spi_tegra_readl(tspi, SLINK_STATUS); | ||
342 | val |= SLINK_RDY; | ||
343 | spi_tegra_writel(tspi, val, SLINK_STATUS); | ||
344 | |||
345 | m = list_first_entry(&tspi->queue, struct spi_message, queue); | ||
346 | |||
347 | if (timeout >= 1000) | ||
348 | m->status = -EIO; | ||
349 | |||
350 | spi = m->state; | ||
351 | |||
352 | tspi->cur_pos += spi_tegra_drain_rx_fifo(tspi, tspi->cur); | ||
353 | m->actual_length += tspi->cur_pos; | ||
354 | |||
355 | if (tspi->cur_pos < tspi->cur->len) { | ||
356 | tspi->cur_len = spi_tegra_fill_tx_fifo(tspi, tspi->cur); | ||
357 | spi_tegra_go(tspi); | ||
358 | } else if (!list_is_last(&tspi->cur->transfer_list, | ||
359 | &m->transfers)) { | ||
360 | tspi->cur = list_first_entry(&tspi->cur->transfer_list, | ||
361 | struct spi_transfer, | ||
362 | transfer_list); | ||
363 | spi_tegra_start_transfer(spi, tspi->cur); | ||
364 | } else { | ||
365 | list_del(&m->queue); | ||
366 | |||
367 | m->complete(m->context); | ||
368 | |||
369 | if (!list_empty(&tspi->queue)) { | ||
370 | m = list_first_entry(&tspi->queue, struct spi_message, | ||
371 | queue); | ||
372 | spi = m->state; | ||
373 | spi_tegra_start_message(spi, m); | ||
374 | } else { | ||
375 | clk_disable(tspi->clk); | ||
376 | tspi->cur_speed = 0; | ||
377 | } | ||
378 | } | ||
379 | |||
380 | spin_unlock_irqrestore(&tspi->lock, flags); | ||
381 | } | ||
382 | |||
383 | static int spi_tegra_setup(struct spi_device *spi) | ||
384 | { | ||
385 | struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master); | ||
386 | unsigned long cs_bit; | ||
387 | unsigned long val; | ||
388 | unsigned long flags; | ||
389 | |||
390 | dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n", | ||
391 | spi->bits_per_word, | ||
392 | spi->mode & SPI_CPOL ? "" : "~", | ||
393 | spi->mode & SPI_CPHA ? "" : "~", | ||
394 | spi->max_speed_hz); | ||
395 | |||
396 | |||
397 | switch (spi->chip_select) { | ||
398 | case 0: | ||
399 | cs_bit = SLINK_CS_POLARITY; | ||
400 | break; | ||
401 | |||
402 | case 1: | ||
403 | cs_bit = SLINK_CS_POLARITY1; | ||
404 | break; | ||
405 | |||
406 | case 2: | ||
407 | cs_bit = SLINK_CS_POLARITY2; | ||
408 | break; | ||
409 | |||
410 | case 4: | ||
411 | cs_bit = SLINK_CS_POLARITY3; | ||
412 | break; | ||
413 | |||
414 | default: | ||
415 | return -EINVAL; | ||
416 | } | ||
417 | |||
418 | spin_lock_irqsave(&tspi->lock, flags); | ||
419 | |||
420 | val = spi_tegra_readl(tspi, SLINK_COMMAND); | ||
421 | if (spi->mode & SPI_CS_HIGH) | ||
422 | val |= cs_bit; | ||
423 | else | ||
424 | val &= ~cs_bit; | ||
425 | spi_tegra_writel(tspi, val, SLINK_COMMAND); | ||
426 | |||
427 | spin_unlock_irqrestore(&tspi->lock, flags); | ||
428 | |||
429 | return 0; | ||
430 | } | ||
431 | |||
432 | static int spi_tegra_transfer(struct spi_device *spi, struct spi_message *m) | ||
433 | { | ||
434 | struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master); | ||
435 | struct spi_transfer *t; | ||
436 | unsigned long flags; | ||
437 | int was_empty; | ||
438 | |||
439 | if (list_empty(&m->transfers) || !m->complete) | ||
440 | return -EINVAL; | ||
441 | |||
442 | list_for_each_entry(t, &m->transfers, transfer_list) { | ||
443 | if (t->bits_per_word < 0 || t->bits_per_word > 32) | ||
444 | return -EINVAL; | ||
445 | |||
446 | if (t->len == 0) | ||
447 | return -EINVAL; | ||
448 | |||
449 | if (!t->rx_buf && !t->tx_buf) | ||
450 | return -EINVAL; | ||
451 | } | ||
452 | |||
453 | m->state = spi; | ||
454 | |||
455 | spin_lock_irqsave(&tspi->lock, flags); | ||
456 | was_empty = list_empty(&tspi->queue); | ||
457 | list_add_tail(&m->queue, &tspi->queue); | ||
458 | |||
459 | if (was_empty) | ||
460 | spi_tegra_start_message(spi, m); | ||
461 | |||
462 | spin_unlock_irqrestore(&tspi->lock, flags); | ||
463 | |||
464 | return 0; | ||
465 | } | ||
466 | |||
467 | static int __init spi_tegra_probe(struct platform_device *pdev) | ||
468 | { | ||
469 | struct spi_master *master; | ||
470 | struct spi_tegra_data *tspi; | ||
471 | struct resource *r; | ||
472 | int ret; | ||
473 | |||
474 | master = spi_alloc_master(&pdev->dev, sizeof *tspi); | ||
475 | if (master == NULL) { | ||
476 | dev_err(&pdev->dev, "master allocation failed\n"); | ||
477 | return -ENOMEM; | ||
478 | } | ||
479 | |||
480 | /* the spi->mode bits understood by this driver: */ | ||
481 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; | ||
482 | |||
483 | master->bus_num = pdev->id; | ||
484 | |||
485 | master->setup = spi_tegra_setup; | ||
486 | master->transfer = spi_tegra_transfer; | ||
487 | master->num_chipselect = 4; | ||
488 | |||
489 | dev_set_drvdata(&pdev->dev, master); | ||
490 | tspi = spi_master_get_devdata(master); | ||
491 | tspi->master = master; | ||
492 | tspi->pdev = pdev; | ||
493 | spin_lock_init(&tspi->lock); | ||
494 | |||
495 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
496 | if (r == NULL) { | ||
497 | ret = -ENODEV; | ||
498 | goto err0; | ||
499 | } | ||
500 | |||
501 | if (!request_mem_region(r->start, (r->end - r->start) + 1, | ||
502 | dev_name(&pdev->dev))) { | ||
503 | ret = -EBUSY; | ||
504 | goto err0; | ||
505 | } | ||
506 | |||
507 | tspi->phys = r->start; | ||
508 | tspi->base = ioremap(r->start, r->end - r->start + 1); | ||
509 | if (!tspi->base) { | ||
510 | dev_err(&pdev->dev, "can't ioremap iomem\n"); | ||
511 | ret = -ENOMEM; | ||
512 | goto err1; | ||
513 | } | ||
514 | |||
515 | tspi->clk = clk_get(&pdev->dev, NULL); | ||
516 | if (IS_ERR(tspi->clk)) { | ||
517 | dev_err(&pdev->dev, "can not get clock\n"); | ||
518 | ret = PTR_ERR(tspi->clk); | ||
519 | goto err2; | ||
520 | } | ||
521 | |||
522 | INIT_LIST_HEAD(&tspi->queue); | ||
523 | |||
524 | tspi->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT); | ||
525 | if (!tspi->rx_dma) { | ||
526 | dev_err(&pdev->dev, "can not allocate rx dma channel\n"); | ||
527 | ret = -ENODEV; | ||
528 | goto err3; | ||
529 | } | ||
530 | |||
531 | tspi->rx_bb = dma_alloc_coherent(&pdev->dev, sizeof(u32) * BB_LEN, | ||
532 | &tspi->rx_bb_phys, GFP_KERNEL); | ||
533 | if (!tspi->rx_bb) { | ||
534 | dev_err(&pdev->dev, "can not allocate rx bounce buffer\n"); | ||
535 | ret = -ENOMEM; | ||
536 | goto err4; | ||
537 | } | ||
538 | |||
539 | tspi->rx_dma_req.complete = tegra_spi_rx_dma_complete; | ||
540 | tspi->rx_dma_req.to_memory = 1; | ||
541 | tspi->rx_dma_req.dest_addr = tspi->rx_bb_phys; | ||
542 | tspi->rx_dma_req.dest_bus_width = 32; | ||
543 | tspi->rx_dma_req.source_addr = tspi->phys + SLINK_RX_FIFO; | ||
544 | tspi->rx_dma_req.source_bus_width = 32; | ||
545 | tspi->rx_dma_req.source_wrap = 4; | ||
546 | tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id]; | ||
547 | tspi->rx_dma_req.dev = tspi; | ||
548 | |||
549 | ret = spi_register_master(master); | ||
550 | |||
551 | if (ret < 0) | ||
552 | goto err5; | ||
553 | |||
554 | return ret; | ||
555 | |||
556 | err5: | ||
557 | dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN, | ||
558 | tspi->rx_bb, tspi->rx_bb_phys); | ||
559 | err4: | ||
560 | tegra_dma_free_channel(tspi->rx_dma); | ||
561 | err3: | ||
562 | clk_put(tspi->clk); | ||
563 | err2: | ||
564 | iounmap(tspi->base); | ||
565 | err1: | ||
566 | release_mem_region(r->start, (r->end - r->start) + 1); | ||
567 | err0: | ||
568 | spi_master_put(master); | ||
569 | return ret; | ||
570 | } | ||
571 | |||
572 | static int __devexit spi_tegra_remove(struct platform_device *pdev) | ||
573 | { | ||
574 | struct spi_master *master; | ||
575 | struct spi_tegra_data *tspi; | ||
576 | struct resource *r; | ||
577 | |||
578 | master = dev_get_drvdata(&pdev->dev); | ||
579 | tspi = spi_master_get_devdata(master); | ||
580 | |||
581 | spi_unregister_master(master); | ||
582 | tegra_dma_free_channel(tspi->rx_dma); | ||
583 | |||
584 | dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN, | ||
585 | tspi->rx_bb, tspi->rx_bb_phys); | ||
586 | |||
587 | clk_put(tspi->clk); | ||
588 | iounmap(tspi->base); | ||
589 | |||
590 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
591 | release_mem_region(r->start, (r->end - r->start) + 1); | ||
592 | |||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | MODULE_ALIAS("platform:spi_tegra"); | ||
597 | |||
598 | static struct platform_driver spi_tegra_driver = { | ||
599 | .driver = { | ||
600 | .name = "spi_tegra", | ||
601 | .owner = THIS_MODULE, | ||
602 | }, | ||
603 | .remove = __devexit_p(spi_tegra_remove), | ||
604 | }; | ||
605 | |||
606 | static int __init spi_tegra_init(void) | ||
607 | { | ||
608 | return platform_driver_probe(&spi_tegra_driver, spi_tegra_probe); | ||
609 | } | ||
610 | module_init(spi_tegra_init); | ||
611 | |||
612 | static void __exit spi_tegra_exit(void) | ||
613 | { | ||
614 | platform_driver_unregister(&spi_tegra_driver); | ||
615 | } | ||
616 | module_exit(spi_tegra_exit); | ||
617 | |||
618 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/spi/spi_topcliff_pch.c b/drivers/spi/spi_topcliff_pch.c new file mode 100644 index 000000000000..79e48d451137 --- /dev/null +++ b/drivers/spi/spi_topcliff_pch.c | |||
@@ -0,0 +1,1303 @@ | |||
1 | /* | ||
2 | * SPI bus driver for the Topcliff PCH used by Intel SoCs | ||
3 | * | ||
4 | * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; version 2 of the License. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. | ||
18 | */ | ||
19 | |||
20 | #include <linux/delay.h> | ||
21 | #include <linux/pci.h> | ||
22 | #include <linux/wait.h> | ||
23 | #include <linux/spi/spi.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/sched.h> | ||
26 | #include <linux/spi/spidev.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/device.h> | ||
29 | |||
30 | /* Register offsets */ | ||
31 | #define PCH_SPCR 0x00 /* SPI control register */ | ||
32 | #define PCH_SPBRR 0x04 /* SPI baud rate register */ | ||
33 | #define PCH_SPSR 0x08 /* SPI status register */ | ||
34 | #define PCH_SPDWR 0x0C /* SPI write data register */ | ||
35 | #define PCH_SPDRR 0x10 /* SPI read data register */ | ||
36 | #define PCH_SSNXCR 0x18 /* SSN Expand Control Register */ | ||
37 | #define PCH_SRST 0x1C /* SPI reset register */ | ||
38 | |||
39 | #define PCH_SPSR_TFD 0x000007C0 | ||
40 | #define PCH_SPSR_RFD 0x0000F800 | ||
41 | |||
42 | #define PCH_READABLE(x) (((x) & PCH_SPSR_RFD)>>11) | ||
43 | #define PCH_WRITABLE(x) (((x) & PCH_SPSR_TFD)>>6) | ||
44 | |||
45 | #define PCH_RX_THOLD 7 | ||
46 | #define PCH_RX_THOLD_MAX 15 | ||
47 | |||
48 | #define PCH_MAX_BAUDRATE 5000000 | ||
49 | #define PCH_MAX_FIFO_DEPTH 16 | ||
50 | |||
51 | #define STATUS_RUNNING 1 | ||
52 | #define STATUS_EXITING 2 | ||
53 | #define PCH_SLEEP_TIME 10 | ||
54 | |||
55 | #define PCH_ADDRESS_SIZE 0x20 | ||
56 | |||
57 | #define SSN_LOW 0x02U | ||
58 | #define SSN_NO_CONTROL 0x00U | ||
59 | #define PCH_MAX_CS 0xFF | ||
60 | #define PCI_DEVICE_ID_GE_SPI 0x8816 | ||
61 | |||
62 | #define SPCR_SPE_BIT (1 << 0) | ||
63 | #define SPCR_MSTR_BIT (1 << 1) | ||
64 | #define SPCR_LSBF_BIT (1 << 4) | ||
65 | #define SPCR_CPHA_BIT (1 << 5) | ||
66 | #define SPCR_CPOL_BIT (1 << 6) | ||
67 | #define SPCR_TFIE_BIT (1 << 8) | ||
68 | #define SPCR_RFIE_BIT (1 << 9) | ||
69 | #define SPCR_FIE_BIT (1 << 10) | ||
70 | #define SPCR_ORIE_BIT (1 << 11) | ||
71 | #define SPCR_MDFIE_BIT (1 << 12) | ||
72 | #define SPCR_FICLR_BIT (1 << 24) | ||
73 | #define SPSR_TFI_BIT (1 << 0) | ||
74 | #define SPSR_RFI_BIT (1 << 1) | ||
75 | #define SPSR_FI_BIT (1 << 2) | ||
76 | #define SPBRR_SIZE_BIT (1 << 10) | ||
77 | |||
78 | #define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|SPCR_ORIE_BIT|SPCR_MDFIE_BIT) | ||
79 | |||
80 | #define SPCR_RFIC_FIELD 20 | ||
81 | #define SPCR_TFIC_FIELD 16 | ||
82 | |||
83 | #define SPSR_INT_BITS 0x1F | ||
84 | #define MASK_SPBRR_SPBR_BITS (~((1 << 10) - 1)) | ||
85 | #define MASK_RFIC_SPCR_BITS (~(0xf << 20)) | ||
86 | #define MASK_TFIC_SPCR_BITS (~(0xf000f << 12)) | ||
87 | |||
88 | #define PCH_CLOCK_HZ 50000000 | ||
89 | #define PCH_MAX_SPBR 1023 | ||
90 | |||
91 | |||
92 | /** | ||
93 | * struct pch_spi_data - Holds the SPI channel specific details | ||
94 | * @io_remap_addr: The remapped PCI base address | ||
95 | * @master: Pointer to the SPI master structure | ||
96 | * @work: Reference to work queue handler | ||
97 | * @wk: Workqueue for carrying out execution of the | ||
98 | * requests | ||
99 | * @wait: Wait queue for waking up upon receiving an | ||
100 | * interrupt. | ||
101 | * @transfer_complete: Status of SPI Transfer | ||
102 | * @bcurrent_msg_processing: Status flag for message processing | ||
103 | * @lock: Lock for protecting this structure | ||
104 | * @queue: SPI Message queue | ||
105 | * @status: Status of the SPI driver | ||
106 | * @bpw_len: Length of data to be transferred in bits per | ||
107 | * word | ||
108 | * @transfer_active: Flag showing active transfer | ||
109 | * @tx_index: Transmit data count; for bookkeeping during | ||
110 | * transfer | ||
111 | * @rx_index: Receive data count; for bookkeeping during | ||
112 | * transfer | ||
113 | * @tx_buff: Buffer for data to be transmitted | ||
114 | * @rx_index: Buffer for Received data | ||
115 | * @n_curnt_chip: The chip number that this SPI driver currently | ||
116 | * operates on | ||
117 | * @current_chip: Reference to the current chip that this SPI | ||
118 | * driver currently operates on | ||
119 | * @current_msg: The current message that this SPI driver is | ||
120 | * handling | ||
121 | * @cur_trans: The current transfer that this SPI driver is | ||
122 | * handling | ||
123 | * @board_dat: Reference to the SPI device data structure | ||
124 | */ | ||
125 | struct pch_spi_data { | ||
126 | void __iomem *io_remap_addr; | ||
127 | struct spi_master *master; | ||
128 | struct work_struct work; | ||
129 | struct workqueue_struct *wk; | ||
130 | wait_queue_head_t wait; | ||
131 | u8 transfer_complete; | ||
132 | u8 bcurrent_msg_processing; | ||
133 | spinlock_t lock; | ||
134 | struct list_head queue; | ||
135 | u8 status; | ||
136 | u32 bpw_len; | ||
137 | u8 transfer_active; | ||
138 | u32 tx_index; | ||
139 | u32 rx_index; | ||
140 | u16 *pkt_tx_buff; | ||
141 | u16 *pkt_rx_buff; | ||
142 | u8 n_curnt_chip; | ||
143 | struct spi_device *current_chip; | ||
144 | struct spi_message *current_msg; | ||
145 | struct spi_transfer *cur_trans; | ||
146 | struct pch_spi_board_data *board_dat; | ||
147 | }; | ||
148 | |||
149 | /** | ||
150 | * struct pch_spi_board_data - Holds the SPI device specific details | ||
151 | * @pdev: Pointer to the PCI device | ||
152 | * @irq_reg_sts: Status of IRQ registration | ||
153 | * @pci_req_sts: Status of pci_request_regions | ||
154 | * @suspend_sts: Status of suspend | ||
155 | * @data: Pointer to SPI channel data structure | ||
156 | */ | ||
157 | struct pch_spi_board_data { | ||
158 | struct pci_dev *pdev; | ||
159 | u8 irq_reg_sts; | ||
160 | u8 pci_req_sts; | ||
161 | u8 suspend_sts; | ||
162 | struct pch_spi_data *data; | ||
163 | }; | ||
164 | |||
165 | static struct pci_device_id pch_spi_pcidev_id[] = { | ||
166 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_GE_SPI)}, | ||
167 | {0,} | ||
168 | }; | ||
169 | |||
170 | /** | ||
171 | * pch_spi_writereg() - Performs register writes | ||
172 | * @master: Pointer to struct spi_master. | ||
173 | * @idx: Register offset. | ||
174 | * @val: Value to be written to register. | ||
175 | */ | ||
176 | static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val) | ||
177 | { | ||
178 | struct pch_spi_data *data = spi_master_get_devdata(master); | ||
179 | iowrite32(val, (data->io_remap_addr + idx)); | ||
180 | } | ||
181 | |||
182 | /** | ||
183 | * pch_spi_readreg() - Performs register reads | ||
184 | * @master: Pointer to struct spi_master. | ||
185 | * @idx: Register offset. | ||
186 | */ | ||
187 | static inline u32 pch_spi_readreg(struct spi_master *master, int idx) | ||
188 | { | ||
189 | struct pch_spi_data *data = spi_master_get_devdata(master); | ||
190 | return ioread32(data->io_remap_addr + idx); | ||
191 | } | ||
192 | |||
193 | static inline void pch_spi_setclr_reg(struct spi_master *master, int idx, | ||
194 | u32 set, u32 clr) | ||
195 | { | ||
196 | u32 tmp = pch_spi_readreg(master, idx); | ||
197 | tmp = (tmp & ~clr) | set; | ||
198 | pch_spi_writereg(master, idx, tmp); | ||
199 | } | ||
200 | |||
201 | static void pch_spi_set_master_mode(struct spi_master *master) | ||
202 | { | ||
203 | pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0); | ||
204 | } | ||
205 | |||
206 | /** | ||
207 | * pch_spi_clear_fifo() - Clears the Transmit and Receive FIFOs | ||
208 | * @master: Pointer to struct spi_master. | ||
209 | */ | ||
210 | static void pch_spi_clear_fifo(struct spi_master *master) | ||
211 | { | ||
212 | pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0); | ||
213 | pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT); | ||
214 | } | ||
215 | |||
216 | static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val, | ||
217 | void __iomem *io_remap_addr) | ||
218 | { | ||
219 | u32 n_read, tx_index, rx_index, bpw_len; | ||
220 | u16 *pkt_rx_buffer, *pkt_tx_buff; | ||
221 | int read_cnt; | ||
222 | u32 reg_spcr_val; | ||
223 | void __iomem *spsr; | ||
224 | void __iomem *spdrr; | ||
225 | void __iomem *spdwr; | ||
226 | |||
227 | spsr = io_remap_addr + PCH_SPSR; | ||
228 | iowrite32(reg_spsr_val, spsr); | ||
229 | |||
230 | if (data->transfer_active) { | ||
231 | rx_index = data->rx_index; | ||
232 | tx_index = data->tx_index; | ||
233 | bpw_len = data->bpw_len; | ||
234 | pkt_rx_buffer = data->pkt_rx_buff; | ||
235 | pkt_tx_buff = data->pkt_tx_buff; | ||
236 | |||
237 | spdrr = io_remap_addr + PCH_SPDRR; | ||
238 | spdwr = io_remap_addr + PCH_SPDWR; | ||
239 | |||
240 | n_read = PCH_READABLE(reg_spsr_val); | ||
241 | |||
242 | for (read_cnt = 0; (read_cnt < n_read); read_cnt++) { | ||
243 | pkt_rx_buffer[rx_index++] = ioread32(spdrr); | ||
244 | if (tx_index < bpw_len) | ||
245 | iowrite32(pkt_tx_buff[tx_index++], spdwr); | ||
246 | } | ||
247 | |||
248 | /* disable RFI if not needed */ | ||
249 | if ((bpw_len - rx_index) <= PCH_MAX_FIFO_DEPTH) { | ||
250 | reg_spcr_val = ioread32(io_remap_addr + PCH_SPCR); | ||
251 | reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */ | ||
252 | |||
253 | /* reset rx threshold */ | ||
254 | reg_spcr_val &= MASK_RFIC_SPCR_BITS; | ||
255 | reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD); | ||
256 | iowrite32(((reg_spcr_val) &= (~(SPCR_RFIE_BIT))), | ||
257 | (io_remap_addr + PCH_SPCR)); | ||
258 | } | ||
259 | |||
260 | /* update counts */ | ||
261 | data->tx_index = tx_index; | ||
262 | data->rx_index = rx_index; | ||
263 | |||
264 | } | ||
265 | |||
266 | /* if transfer complete interrupt */ | ||
267 | if (reg_spsr_val & SPSR_FI_BIT) { | ||
268 | /* disable FI & RFI interrupts */ | ||
269 | pch_spi_setclr_reg(data->master, PCH_SPCR, 0, | ||
270 | SPCR_FIE_BIT | SPCR_RFIE_BIT); | ||
271 | |||
272 | /* transfer is completed;inform pch_spi_process_messages */ | ||
273 | data->transfer_complete = true; | ||
274 | wake_up(&data->wait); | ||
275 | } | ||
276 | } | ||
277 | |||
278 | /** | ||
279 | * pch_spi_handler() - Interrupt handler | ||
280 | * @irq: The interrupt number. | ||
281 | * @dev_id: Pointer to struct pch_spi_board_data. | ||
282 | */ | ||
283 | static irqreturn_t pch_spi_handler(int irq, void *dev_id) | ||
284 | { | ||
285 | u32 reg_spsr_val; | ||
286 | struct pch_spi_data *data; | ||
287 | void __iomem *spsr; | ||
288 | void __iomem *io_remap_addr; | ||
289 | irqreturn_t ret = IRQ_NONE; | ||
290 | struct pch_spi_board_data *board_dat = dev_id; | ||
291 | |||
292 | if (board_dat->suspend_sts) { | ||
293 | dev_dbg(&board_dat->pdev->dev, | ||
294 | "%s returning due to suspend\n", __func__); | ||
295 | return IRQ_NONE; | ||
296 | } | ||
297 | |||
298 | data = board_dat->data; | ||
299 | io_remap_addr = data->io_remap_addr; | ||
300 | spsr = io_remap_addr + PCH_SPSR; | ||
301 | |||
302 | reg_spsr_val = ioread32(spsr); | ||
303 | |||
304 | /* Check if the interrupt is for SPI device */ | ||
305 | if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { | ||
306 | pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr); | ||
307 | ret = IRQ_HANDLED; | ||
308 | } | ||
309 | |||
310 | dev_dbg(&board_dat->pdev->dev, "%s EXIT return value=%d\n", | ||
311 | __func__, ret); | ||
312 | |||
313 | return ret; | ||
314 | } | ||
315 | |||
316 | /** | ||
317 | * pch_spi_set_baud_rate() - Sets SPBR field in SPBRR | ||
318 | * @master: Pointer to struct spi_master. | ||
319 | * @speed_hz: Baud rate. | ||
320 | */ | ||
321 | static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz) | ||
322 | { | ||
323 | u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2); | ||
324 | |||
325 | /* if baud rate is less than we can support limit it */ | ||
326 | if (n_spbr > PCH_MAX_SPBR) | ||
327 | n_spbr = PCH_MAX_SPBR; | ||
328 | |||
329 | pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, ~MASK_SPBRR_SPBR_BITS); | ||
330 | } | ||
331 | |||
332 | /** | ||
333 | * pch_spi_set_bits_per_word() - Sets SIZE field in SPBRR | ||
334 | * @master: Pointer to struct spi_master. | ||
335 | * @bits_per_word: Bits per word for SPI transfer. | ||
336 | */ | ||
337 | static void pch_spi_set_bits_per_word(struct spi_master *master, | ||
338 | u8 bits_per_word) | ||
339 | { | ||
340 | if (bits_per_word == 8) | ||
341 | pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT); | ||
342 | else | ||
343 | pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0); | ||
344 | } | ||
345 | |||
346 | /** | ||
347 | * pch_spi_setup_transfer() - Configures the PCH SPI hardware for transfer | ||
348 | * @spi: Pointer to struct spi_device. | ||
349 | */ | ||
350 | static void pch_spi_setup_transfer(struct spi_device *spi) | ||
351 | { | ||
352 | u32 flags = 0; | ||
353 | |||
354 | dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n", | ||
355 | __func__, pch_spi_readreg(spi->master, PCH_SPBRR), | ||
356 | spi->max_speed_hz); | ||
357 | pch_spi_set_baud_rate(spi->master, spi->max_speed_hz); | ||
358 | |||
359 | /* set bits per word */ | ||
360 | pch_spi_set_bits_per_word(spi->master, spi->bits_per_word); | ||
361 | |||
362 | if (!(spi->mode & SPI_LSB_FIRST)) | ||
363 | flags |= SPCR_LSBF_BIT; | ||
364 | if (spi->mode & SPI_CPOL) | ||
365 | flags |= SPCR_CPOL_BIT; | ||
366 | if (spi->mode & SPI_CPHA) | ||
367 | flags |= SPCR_CPHA_BIT; | ||
368 | pch_spi_setclr_reg(spi->master, PCH_SPCR, flags, | ||
369 | (SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT)); | ||
370 | |||
371 | /* Clear the FIFO by toggling FICLR to 1 and back to 0 */ | ||
372 | pch_spi_clear_fifo(spi->master); | ||
373 | } | ||
374 | |||
375 | /** | ||
376 | * pch_spi_reset() - Clears SPI registers | ||
377 | * @master: Pointer to struct spi_master. | ||
378 | */ | ||
379 | static void pch_spi_reset(struct spi_master *master) | ||
380 | { | ||
381 | /* write 1 to reset SPI */ | ||
382 | pch_spi_writereg(master, PCH_SRST, 0x1); | ||
383 | |||
384 | /* clear reset */ | ||
385 | pch_spi_writereg(master, PCH_SRST, 0x0); | ||
386 | } | ||
387 | |||
388 | static int pch_spi_setup(struct spi_device *pspi) | ||
389 | { | ||
390 | /* check bits per word */ | ||
391 | if (pspi->bits_per_word == 0) { | ||
392 | pspi->bits_per_word = 8; | ||
393 | dev_dbg(&pspi->dev, "%s 8 bits per word\n", __func__); | ||
394 | } | ||
395 | |||
396 | if ((pspi->bits_per_word != 8) && (pspi->bits_per_word != 16)) { | ||
397 | dev_err(&pspi->dev, "%s Invalid bits per word\n", __func__); | ||
398 | return -EINVAL; | ||
399 | } | ||
400 | |||
401 | /* Check baud rate setting */ | ||
402 | /* if baud rate of chip is greater than | ||
403 | max we can support,return error */ | ||
404 | if ((pspi->max_speed_hz) > PCH_MAX_BAUDRATE) | ||
405 | pspi->max_speed_hz = PCH_MAX_BAUDRATE; | ||
406 | |||
407 | dev_dbg(&pspi->dev, "%s MODE = %x\n", __func__, | ||
408 | (pspi->mode) & (SPI_CPOL | SPI_CPHA)); | ||
409 | |||
410 | return 0; | ||
411 | } | ||
412 | |||
413 | static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg) | ||
414 | { | ||
415 | |||
416 | struct spi_transfer *transfer; | ||
417 | struct pch_spi_data *data = spi_master_get_devdata(pspi->master); | ||
418 | int retval; | ||
419 | unsigned long flags; | ||
420 | |||
421 | /* validate spi message and baud rate */ | ||
422 | if (unlikely(list_empty(&pmsg->transfers) == 1)) { | ||
423 | dev_err(&pspi->dev, "%s list empty\n", __func__); | ||
424 | retval = -EINVAL; | ||
425 | goto err_out; | ||
426 | } | ||
427 | |||
428 | if (unlikely(pspi->max_speed_hz == 0)) { | ||
429 | dev_err(&pspi->dev, "%s pch_spi_tranfer maxspeed=%d\n", | ||
430 | __func__, pspi->max_speed_hz); | ||
431 | retval = -EINVAL; | ||
432 | goto err_out; | ||
433 | } | ||
434 | |||
435 | dev_dbg(&pspi->dev, "%s Transfer List not empty. " | ||
436 | "Transfer Speed is set.\n", __func__); | ||
437 | |||
438 | /* validate Tx/Rx buffers and Transfer length */ | ||
439 | list_for_each_entry(transfer, &pmsg->transfers, transfer_list) { | ||
440 | if (!transfer->tx_buf && !transfer->rx_buf) { | ||
441 | dev_err(&pspi->dev, | ||
442 | "%s Tx and Rx buffer NULL\n", __func__); | ||
443 | retval = -EINVAL; | ||
444 | goto err_out; | ||
445 | } | ||
446 | |||
447 | if (!transfer->len) { | ||
448 | dev_err(&pspi->dev, "%s Transfer length invalid\n", | ||
449 | __func__); | ||
450 | retval = -EINVAL; | ||
451 | goto err_out; | ||
452 | } | ||
453 | |||
454 | dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length" | ||
455 | " valid\n", __func__); | ||
456 | |||
457 | /* if baud rate hs been specified validate the same */ | ||
458 | if (transfer->speed_hz > PCH_MAX_BAUDRATE) | ||
459 | transfer->speed_hz = PCH_MAX_BAUDRATE; | ||
460 | |||
461 | /* if bits per word has been specified validate the same */ | ||
462 | if (transfer->bits_per_word) { | ||
463 | if ((transfer->bits_per_word != 8) | ||
464 | && (transfer->bits_per_word != 16)) { | ||
465 | retval = -EINVAL; | ||
466 | dev_err(&pspi->dev, | ||
467 | "%s Invalid bits per word\n", __func__); | ||
468 | goto err_out; | ||
469 | } | ||
470 | } | ||
471 | } | ||
472 | |||
473 | spin_lock_irqsave(&data->lock, flags); | ||
474 | |||
475 | /* We won't process any messages if we have been asked to terminate */ | ||
476 | if (data->status == STATUS_EXITING) { | ||
477 | dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__); | ||
478 | retval = -ESHUTDOWN; | ||
479 | goto err_return_spinlock; | ||
480 | } | ||
481 | |||
482 | /* If suspended ,return -EINVAL */ | ||
483 | if (data->board_dat->suspend_sts) { | ||
484 | dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__); | ||
485 | retval = -EINVAL; | ||
486 | goto err_return_spinlock; | ||
487 | } | ||
488 | |||
489 | /* set status of message */ | ||
490 | pmsg->actual_length = 0; | ||
491 | dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status); | ||
492 | |||
493 | pmsg->status = -EINPROGRESS; | ||
494 | |||
495 | /* add message to queue */ | ||
496 | list_add_tail(&pmsg->queue, &data->queue); | ||
497 | dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__); | ||
498 | |||
499 | /* schedule work queue to run */ | ||
500 | queue_work(data->wk, &data->work); | ||
501 | dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__); | ||
502 | |||
503 | retval = 0; | ||
504 | |||
505 | err_return_spinlock: | ||
506 | spin_unlock_irqrestore(&data->lock, flags); | ||
507 | err_out: | ||
508 | dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval); | ||
509 | return retval; | ||
510 | } | ||
511 | |||
512 | static inline void pch_spi_select_chip(struct pch_spi_data *data, | ||
513 | struct spi_device *pspi) | ||
514 | { | ||
515 | if (data->current_chip != NULL) { | ||
516 | if (pspi->chip_select != data->n_curnt_chip) { | ||
517 | dev_dbg(&pspi->dev, "%s : different slave\n", __func__); | ||
518 | data->current_chip = NULL; | ||
519 | } | ||
520 | } | ||
521 | |||
522 | data->current_chip = pspi; | ||
523 | |||
524 | data->n_curnt_chip = data->current_chip->chip_select; | ||
525 | |||
526 | dev_dbg(&pspi->dev, "%s :Invoking pch_spi_setup_transfer\n", __func__); | ||
527 | pch_spi_setup_transfer(pspi); | ||
528 | } | ||
529 | |||
530 | static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw, | ||
531 | struct spi_message **ppmsg) | ||
532 | { | ||
533 | int size; | ||
534 | u32 n_writes; | ||
535 | int j; | ||
536 | struct spi_message *pmsg; | ||
537 | const u8 *tx_buf; | ||
538 | const u16 *tx_sbuf; | ||
539 | |||
540 | pmsg = *ppmsg; | ||
541 | |||
542 | /* set baud rate if needed */ | ||
543 | if (data->cur_trans->speed_hz) { | ||
544 | dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__); | ||
545 | pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz); | ||
546 | } | ||
547 | |||
548 | /* set bits per word if needed */ | ||
549 | if (data->cur_trans->bits_per_word && | ||
550 | (data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) { | ||
551 | dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__); | ||
552 | pch_spi_set_bits_per_word(data->master, | ||
553 | data->cur_trans->bits_per_word); | ||
554 | *bpw = data->cur_trans->bits_per_word; | ||
555 | } else { | ||
556 | *bpw = data->current_msg->spi->bits_per_word; | ||
557 | } | ||
558 | |||
559 | /* reset Tx/Rx index */ | ||
560 | data->tx_index = 0; | ||
561 | data->rx_index = 0; | ||
562 | |||
563 | data->bpw_len = data->cur_trans->len / (*bpw / 8); | ||
564 | |||
565 | /* find alloc size */ | ||
566 | size = data->cur_trans->len * sizeof(*data->pkt_tx_buff); | ||
567 | |||
568 | /* allocate memory for pkt_tx_buff & pkt_rx_buffer */ | ||
569 | data->pkt_tx_buff = kzalloc(size, GFP_KERNEL); | ||
570 | if (data->pkt_tx_buff != NULL) { | ||
571 | data->pkt_rx_buff = kzalloc(size, GFP_KERNEL); | ||
572 | if (!data->pkt_rx_buff) | ||
573 | kfree(data->pkt_tx_buff); | ||
574 | } | ||
575 | |||
576 | if (!data->pkt_rx_buff) { | ||
577 | /* flush queue and set status of all transfers to -ENOMEM */ | ||
578 | dev_err(&data->master->dev, "%s :kzalloc failed\n", __func__); | ||
579 | list_for_each_entry(pmsg, data->queue.next, queue) { | ||
580 | pmsg->status = -ENOMEM; | ||
581 | |||
582 | if (pmsg->complete != 0) | ||
583 | pmsg->complete(pmsg->context); | ||
584 | |||
585 | /* delete from queue */ | ||
586 | list_del_init(&pmsg->queue); | ||
587 | } | ||
588 | return; | ||
589 | } | ||
590 | |||
591 | /* copy Tx Data */ | ||
592 | if (data->cur_trans->tx_buf != NULL) { | ||
593 | if (*bpw == 8) { | ||
594 | tx_buf = data->cur_trans->tx_buf; | ||
595 | for (j = 0; j < data->bpw_len; j++) | ||
596 | data->pkt_tx_buff[j] = *tx_buf++; | ||
597 | } else { | ||
598 | tx_sbuf = data->cur_trans->tx_buf; | ||
599 | for (j = 0; j < data->bpw_len; j++) | ||
600 | data->pkt_tx_buff[j] = *tx_sbuf++; | ||
601 | } | ||
602 | } | ||
603 | |||
604 | /* if len greater than PCH_MAX_FIFO_DEPTH, write 16,else len bytes */ | ||
605 | n_writes = data->bpw_len; | ||
606 | if (n_writes > PCH_MAX_FIFO_DEPTH) | ||
607 | n_writes = PCH_MAX_FIFO_DEPTH; | ||
608 | |||
609 | dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing " | ||
610 | "0x2 to SSNXCR\n", __func__); | ||
611 | pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW); | ||
612 | |||
613 | for (j = 0; j < n_writes; j++) | ||
614 | pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]); | ||
615 | |||
616 | /* update tx_index */ | ||
617 | data->tx_index = j; | ||
618 | |||
619 | /* reset transfer complete flag */ | ||
620 | data->transfer_complete = false; | ||
621 | data->transfer_active = true; | ||
622 | } | ||
623 | |||
624 | |||
625 | static void pch_spi_nomore_transfer(struct pch_spi_data *data, | ||
626 | struct spi_message *pmsg) | ||
627 | { | ||
628 | dev_dbg(&data->master->dev, "%s called\n", __func__); | ||
629 | /* Invoke complete callback | ||
630 | * [To the spi core..indicating end of transfer] */ | ||
631 | data->current_msg->status = 0; | ||
632 | |||
633 | if (data->current_msg->complete != 0) { | ||
634 | dev_dbg(&data->master->dev, | ||
635 | "%s:Invoking callback of SPI core\n", __func__); | ||
636 | data->current_msg->complete(data->current_msg->context); | ||
637 | } | ||
638 | |||
639 | /* update status in global variable */ | ||
640 | data->bcurrent_msg_processing = false; | ||
641 | |||
642 | dev_dbg(&data->master->dev, | ||
643 | "%s:data->bcurrent_msg_processing = false\n", __func__); | ||
644 | |||
645 | data->current_msg = NULL; | ||
646 | data->cur_trans = NULL; | ||
647 | |||
648 | /* check if we have items in list and not suspending | ||
649 | * return 1 if list empty */ | ||
650 | if ((list_empty(&data->queue) == 0) && | ||
651 | (!data->board_dat->suspend_sts) && | ||
652 | (data->status != STATUS_EXITING)) { | ||
653 | /* We have some more work to do (either there is more tranint | ||
654 | * bpw;sfer requests in the current message or there are | ||
655 | *more messages) | ||
656 | */ | ||
657 | dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__); | ||
658 | queue_work(data->wk, &data->work); | ||
659 | } else if (data->board_dat->suspend_sts || | ||
660 | data->status == STATUS_EXITING) { | ||
661 | dev_dbg(&data->master->dev, | ||
662 | "%s suspend/remove initiated, flushing queue\n", | ||
663 | __func__); | ||
664 | list_for_each_entry(pmsg, data->queue.next, queue) { | ||
665 | pmsg->status = -EIO; | ||
666 | |||
667 | if (pmsg->complete) | ||
668 | pmsg->complete(pmsg->context); | ||
669 | |||
670 | /* delete from queue */ | ||
671 | list_del_init(&pmsg->queue); | ||
672 | } | ||
673 | } | ||
674 | } | ||
675 | |||
676 | static void pch_spi_set_ir(struct pch_spi_data *data) | ||
677 | { | ||
678 | /* enable interrupts */ | ||
679 | if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH) { | ||
680 | /* set receive threshold to PCH_RX_THOLD */ | ||
681 | pch_spi_setclr_reg(data->master, PCH_SPCR, | ||
682 | PCH_RX_THOLD << SPCR_RFIC_FIELD, | ||
683 | ~MASK_RFIC_SPCR_BITS); | ||
684 | /* enable FI and RFI interrupts */ | ||
685 | pch_spi_setclr_reg(data->master, PCH_SPCR, | ||
686 | SPCR_RFIE_BIT | SPCR_FIE_BIT, 0); | ||
687 | } else { | ||
688 | /* set receive threshold to maximum */ | ||
689 | pch_spi_setclr_reg(data->master, PCH_SPCR, | ||
690 | PCH_RX_THOLD_MAX << SPCR_TFIC_FIELD, | ||
691 | ~MASK_TFIC_SPCR_BITS); | ||
692 | /* enable FI interrupt */ | ||
693 | pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_FIE_BIT, 0); | ||
694 | } | ||
695 | |||
696 | dev_dbg(&data->master->dev, | ||
697 | "%s:invoking pch_spi_set_enable to enable SPI\n", __func__); | ||
698 | |||
699 | /* SPI set enable */ | ||
700 | pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, SPCR_SPE_BIT, 0); | ||
701 | |||
702 | /* Wait until the transfer completes; go to sleep after | ||
703 | initiating the transfer. */ | ||
704 | dev_dbg(&data->master->dev, | ||
705 | "%s:waiting for transfer to get over\n", __func__); | ||
706 | |||
707 | wait_event_interruptible(data->wait, data->transfer_complete); | ||
708 | |||
709 | pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); | ||
710 | dev_dbg(&data->master->dev, | ||
711 | "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); | ||
712 | |||
713 | data->transfer_active = false; | ||
714 | dev_dbg(&data->master->dev, | ||
715 | "%s set data->transfer_active = false\n", __func__); | ||
716 | |||
717 | /* clear all interrupts */ | ||
718 | pch_spi_writereg(data->master, PCH_SPSR, | ||
719 | pch_spi_readreg(data->master, PCH_SPSR)); | ||
720 | /* disable interrupts */ | ||
721 | pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); | ||
722 | } | ||
723 | |||
724 | static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw) | ||
725 | { | ||
726 | int j; | ||
727 | u8 *rx_buf; | ||
728 | u16 *rx_sbuf; | ||
729 | |||
730 | /* copy Rx Data */ | ||
731 | if (!data->cur_trans->rx_buf) | ||
732 | return; | ||
733 | |||
734 | if (bpw == 8) { | ||
735 | rx_buf = data->cur_trans->rx_buf; | ||
736 | for (j = 0; j < data->bpw_len; j++) | ||
737 | *rx_buf++ = data->pkt_rx_buff[j] & 0xFF; | ||
738 | } else { | ||
739 | rx_sbuf = data->cur_trans->rx_buf; | ||
740 | for (j = 0; j < data->bpw_len; j++) | ||
741 | *rx_sbuf++ = data->pkt_rx_buff[j]; | ||
742 | } | ||
743 | } | ||
744 | |||
745 | |||
746 | static void pch_spi_process_messages(struct work_struct *pwork) | ||
747 | { | ||
748 | struct spi_message *pmsg; | ||
749 | struct pch_spi_data *data; | ||
750 | int bpw; | ||
751 | |||
752 | data = container_of(pwork, struct pch_spi_data, work); | ||
753 | dev_dbg(&data->master->dev, "%s data initialized\n", __func__); | ||
754 | |||
755 | spin_lock(&data->lock); | ||
756 | |||
757 | /* check if suspend has been initiated;if yes flush queue */ | ||
758 | if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) { | ||
759 | dev_dbg(&data->master->dev, | ||
760 | "%s suspend/remove initiated,flushing queue\n", | ||
761 | __func__); | ||
762 | |||
763 | list_for_each_entry(pmsg, data->queue.next, queue) { | ||
764 | pmsg->status = -EIO; | ||
765 | |||
766 | if (pmsg->complete != 0) { | ||
767 | spin_unlock(&data->lock); | ||
768 | pmsg->complete(pmsg->context); | ||
769 | spin_lock(&data->lock); | ||
770 | } | ||
771 | |||
772 | /* delete from queue */ | ||
773 | list_del_init(&pmsg->queue); | ||
774 | } | ||
775 | |||
776 | spin_unlock(&data->lock); | ||
777 | return; | ||
778 | } | ||
779 | |||
780 | data->bcurrent_msg_processing = true; | ||
781 | dev_dbg(&data->master->dev, | ||
782 | "%s Set data->bcurrent_msg_processing= true\n", __func__); | ||
783 | |||
784 | /* Get the message from the queue and delete it from there. */ | ||
785 | data->current_msg = list_entry(data->queue.next, struct spi_message, | ||
786 | queue); | ||
787 | |||
788 | list_del_init(&data->current_msg->queue); | ||
789 | |||
790 | data->current_msg->status = 0; | ||
791 | |||
792 | pch_spi_select_chip(data, data->current_msg->spi); | ||
793 | |||
794 | spin_unlock(&data->lock); | ||
795 | |||
796 | do { | ||
797 | /* If we are already processing a message get the next | ||
798 | transfer structure from the message otherwise retrieve | ||
799 | the 1st transfer request from the message. */ | ||
800 | spin_lock(&data->lock); | ||
801 | |||
802 | if (data->cur_trans == NULL) { | ||
803 | data->cur_trans = | ||
804 | list_entry(data->current_msg->transfers. | ||
805 | next, struct spi_transfer, | ||
806 | transfer_list); | ||
807 | dev_dbg(&data->master->dev, | ||
808 | "%s :Getting 1st transfer message\n", __func__); | ||
809 | } else { | ||
810 | data->cur_trans = | ||
811 | list_entry(data->cur_trans->transfer_list.next, | ||
812 | struct spi_transfer, | ||
813 | transfer_list); | ||
814 | dev_dbg(&data->master->dev, | ||
815 | "%s :Getting next transfer message\n", | ||
816 | __func__); | ||
817 | } | ||
818 | |||
819 | spin_unlock(&data->lock); | ||
820 | |||
821 | pch_spi_set_tx(data, &bpw, &pmsg); | ||
822 | |||
823 | /* Control interrupt*/ | ||
824 | pch_spi_set_ir(data); | ||
825 | |||
826 | /* Disable SPI transfer */ | ||
827 | pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, 0, | ||
828 | SPCR_SPE_BIT); | ||
829 | |||
830 | /* clear FIFO */ | ||
831 | pch_spi_clear_fifo(data->master); | ||
832 | |||
833 | /* copy Rx Data */ | ||
834 | pch_spi_copy_rx_data(data, bpw); | ||
835 | |||
836 | /* free memory */ | ||
837 | kfree(data->pkt_rx_buff); | ||
838 | data->pkt_rx_buff = NULL; | ||
839 | |||
840 | kfree(data->pkt_tx_buff); | ||
841 | data->pkt_tx_buff = NULL; | ||
842 | |||
843 | /* increment message count */ | ||
844 | data->current_msg->actual_length += data->cur_trans->len; | ||
845 | |||
846 | dev_dbg(&data->master->dev, | ||
847 | "%s:data->current_msg->actual_length=%d\n", | ||
848 | __func__, data->current_msg->actual_length); | ||
849 | |||
850 | /* check for delay */ | ||
851 | if (data->cur_trans->delay_usecs) { | ||
852 | dev_dbg(&data->master->dev, "%s:" | ||
853 | "delay in usec=%d\n", __func__, | ||
854 | data->cur_trans->delay_usecs); | ||
855 | udelay(data->cur_trans->delay_usecs); | ||
856 | } | ||
857 | |||
858 | spin_lock(&data->lock); | ||
859 | |||
860 | /* No more transfer in this message. */ | ||
861 | if ((data->cur_trans->transfer_list.next) == | ||
862 | &(data->current_msg->transfers)) { | ||
863 | pch_spi_nomore_transfer(data, pmsg); | ||
864 | } | ||
865 | |||
866 | spin_unlock(&data->lock); | ||
867 | |||
868 | } while (data->cur_trans != NULL); | ||
869 | } | ||
870 | |||
871 | static void pch_spi_free_resources(struct pch_spi_board_data *board_dat) | ||
872 | { | ||
873 | dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__); | ||
874 | |||
875 | /* free workqueue */ | ||
876 | if (board_dat->data->wk != NULL) { | ||
877 | destroy_workqueue(board_dat->data->wk); | ||
878 | board_dat->data->wk = NULL; | ||
879 | dev_dbg(&board_dat->pdev->dev, | ||
880 | "%s destroy_workqueue invoked successfully\n", | ||
881 | __func__); | ||
882 | } | ||
883 | |||
884 | /* disable interrupts & free IRQ */ | ||
885 | if (board_dat->irq_reg_sts) { | ||
886 | /* disable interrupts */ | ||
887 | pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0, | ||
888 | PCH_ALL); | ||
889 | |||
890 | /* free IRQ */ | ||
891 | free_irq(board_dat->pdev->irq, board_dat); | ||
892 | |||
893 | dev_dbg(&board_dat->pdev->dev, | ||
894 | "%s free_irq invoked successfully\n", __func__); | ||
895 | |||
896 | board_dat->irq_reg_sts = false; | ||
897 | } | ||
898 | |||
899 | /* unmap PCI base address */ | ||
900 | if (board_dat->data->io_remap_addr != 0) { | ||
901 | pci_iounmap(board_dat->pdev, board_dat->data->io_remap_addr); | ||
902 | |||
903 | board_dat->data->io_remap_addr = 0; | ||
904 | |||
905 | dev_dbg(&board_dat->pdev->dev, | ||
906 | "%s pci_iounmap invoked successfully\n", __func__); | ||
907 | } | ||
908 | |||
909 | /* release PCI region */ | ||
910 | if (board_dat->pci_req_sts) { | ||
911 | pci_release_regions(board_dat->pdev); | ||
912 | dev_dbg(&board_dat->pdev->dev, | ||
913 | "%s pci_release_regions invoked successfully\n", | ||
914 | __func__); | ||
915 | board_dat->pci_req_sts = false; | ||
916 | } | ||
917 | } | ||
918 | |||
919 | static int pch_spi_get_resources(struct pch_spi_board_data *board_dat) | ||
920 | { | ||
921 | void __iomem *io_remap_addr; | ||
922 | int retval; | ||
923 | dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__); | ||
924 | |||
925 | /* create workqueue */ | ||
926 | board_dat->data->wk = create_singlethread_workqueue(KBUILD_MODNAME); | ||
927 | if (!board_dat->data->wk) { | ||
928 | dev_err(&board_dat->pdev->dev, | ||
929 | "%s create_singlet hread_workqueue failed\n", __func__); | ||
930 | retval = -EBUSY; | ||
931 | goto err_return; | ||
932 | } | ||
933 | |||
934 | dev_dbg(&board_dat->pdev->dev, | ||
935 | "%s create_singlethread_workqueue success\n", __func__); | ||
936 | |||
937 | retval = pci_request_regions(board_dat->pdev, KBUILD_MODNAME); | ||
938 | if (retval != 0) { | ||
939 | dev_err(&board_dat->pdev->dev, | ||
940 | "%s request_region failed\n", __func__); | ||
941 | goto err_return; | ||
942 | } | ||
943 | |||
944 | board_dat->pci_req_sts = true; | ||
945 | |||
946 | io_remap_addr = pci_iomap(board_dat->pdev, 1, 0); | ||
947 | if (io_remap_addr == 0) { | ||
948 | dev_err(&board_dat->pdev->dev, | ||
949 | "%s pci_iomap failed\n", __func__); | ||
950 | retval = -ENOMEM; | ||
951 | goto err_return; | ||
952 | } | ||
953 | |||
954 | /* calculate base address for all channels */ | ||
955 | board_dat->data->io_remap_addr = io_remap_addr; | ||
956 | |||
957 | /* reset PCH SPI h/w */ | ||
958 | pch_spi_reset(board_dat->data->master); | ||
959 | dev_dbg(&board_dat->pdev->dev, | ||
960 | "%s pch_spi_reset invoked successfully\n", __func__); | ||
961 | |||
962 | /* register IRQ */ | ||
963 | retval = request_irq(board_dat->pdev->irq, pch_spi_handler, | ||
964 | IRQF_SHARED, KBUILD_MODNAME, board_dat); | ||
965 | if (retval != 0) { | ||
966 | dev_err(&board_dat->pdev->dev, | ||
967 | "%s request_irq failed\n", __func__); | ||
968 | goto err_return; | ||
969 | } | ||
970 | |||
971 | dev_dbg(&board_dat->pdev->dev, "%s request_irq returned=%d\n", | ||
972 | __func__, retval); | ||
973 | |||
974 | board_dat->irq_reg_sts = true; | ||
975 | dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__); | ||
976 | |||
977 | err_return: | ||
978 | if (retval != 0) { | ||
979 | dev_err(&board_dat->pdev->dev, | ||
980 | "%s FAIL:invoking pch_spi_free_resources\n", __func__); | ||
981 | pch_spi_free_resources(board_dat); | ||
982 | } | ||
983 | |||
984 | dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval); | ||
985 | |||
986 | return retval; | ||
987 | } | ||
988 | |||
989 | static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id) | ||
990 | { | ||
991 | |||
992 | struct spi_master *master; | ||
993 | |||
994 | struct pch_spi_board_data *board_dat; | ||
995 | int retval; | ||
996 | |||
997 | dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); | ||
998 | |||
999 | /* allocate memory for private data */ | ||
1000 | board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL); | ||
1001 | if (board_dat == NULL) { | ||
1002 | dev_err(&pdev->dev, | ||
1003 | " %s memory allocation for private data failed\n", | ||
1004 | __func__); | ||
1005 | retval = -ENOMEM; | ||
1006 | goto err_kmalloc; | ||
1007 | } | ||
1008 | |||
1009 | dev_dbg(&pdev->dev, | ||
1010 | "%s memory allocation for private data success\n", __func__); | ||
1011 | |||
1012 | /* enable PCI device */ | ||
1013 | retval = pci_enable_device(pdev); | ||
1014 | if (retval != 0) { | ||
1015 | dev_err(&pdev->dev, "%s pci_enable_device FAILED\n", __func__); | ||
1016 | |||
1017 | goto err_pci_en_device; | ||
1018 | } | ||
1019 | |||
1020 | dev_dbg(&pdev->dev, "%s pci_enable_device returned=%d\n", | ||
1021 | __func__, retval); | ||
1022 | |||
1023 | board_dat->pdev = pdev; | ||
1024 | |||
1025 | /* alllocate memory for SPI master */ | ||
1026 | master = spi_alloc_master(&pdev->dev, sizeof(struct pch_spi_data)); | ||
1027 | if (master == NULL) { | ||
1028 | retval = -ENOMEM; | ||
1029 | dev_err(&pdev->dev, "%s Fail.\n", __func__); | ||
1030 | goto err_spi_alloc_master; | ||
1031 | } | ||
1032 | |||
1033 | dev_dbg(&pdev->dev, | ||
1034 | "%s spi_alloc_master returned non NULL\n", __func__); | ||
1035 | |||
1036 | /* initialize members of SPI master */ | ||
1037 | master->bus_num = -1; | ||
1038 | master->num_chipselect = PCH_MAX_CS; | ||
1039 | master->setup = pch_spi_setup; | ||
1040 | master->transfer = pch_spi_transfer; | ||
1041 | dev_dbg(&pdev->dev, | ||
1042 | "%s transfer member of SPI master initialized\n", __func__); | ||
1043 | |||
1044 | board_dat->data = spi_master_get_devdata(master); | ||
1045 | |||
1046 | board_dat->data->master = master; | ||
1047 | board_dat->data->n_curnt_chip = 255; | ||
1048 | board_dat->data->board_dat = board_dat; | ||
1049 | board_dat->data->status = STATUS_RUNNING; | ||
1050 | |||
1051 | INIT_LIST_HEAD(&board_dat->data->queue); | ||
1052 | spin_lock_init(&board_dat->data->lock); | ||
1053 | INIT_WORK(&board_dat->data->work, pch_spi_process_messages); | ||
1054 | init_waitqueue_head(&board_dat->data->wait); | ||
1055 | |||
1056 | /* allocate resources for PCH SPI */ | ||
1057 | retval = pch_spi_get_resources(board_dat); | ||
1058 | if (retval) { | ||
1059 | dev_err(&pdev->dev, "%s fail(retval=%d)\n", __func__, retval); | ||
1060 | goto err_spi_get_resources; | ||
1061 | } | ||
1062 | |||
1063 | dev_dbg(&pdev->dev, "%s pch_spi_get_resources returned=%d\n", | ||
1064 | __func__, retval); | ||
1065 | |||
1066 | /* save private data in dev */ | ||
1067 | pci_set_drvdata(pdev, board_dat); | ||
1068 | dev_dbg(&pdev->dev, "%s invoked pci_set_drvdata\n", __func__); | ||
1069 | |||
1070 | /* set master mode */ | ||
1071 | pch_spi_set_master_mode(master); | ||
1072 | dev_dbg(&pdev->dev, | ||
1073 | "%s invoked pch_spi_set_master_mode\n", __func__); | ||
1074 | |||
1075 | /* Register the controller with the SPI core. */ | ||
1076 | retval = spi_register_master(master); | ||
1077 | if (retval != 0) { | ||
1078 | dev_err(&pdev->dev, | ||
1079 | "%s spi_register_master FAILED\n", __func__); | ||
1080 | goto err_spi_reg_master; | ||
1081 | } | ||
1082 | |||
1083 | dev_dbg(&pdev->dev, "%s spi_register_master returned=%d\n", | ||
1084 | __func__, retval); | ||
1085 | |||
1086 | |||
1087 | return 0; | ||
1088 | |||
1089 | err_spi_reg_master: | ||
1090 | spi_unregister_master(master); | ||
1091 | err_spi_get_resources: | ||
1092 | err_spi_alloc_master: | ||
1093 | spi_master_put(master); | ||
1094 | pci_disable_device(pdev); | ||
1095 | err_pci_en_device: | ||
1096 | kfree(board_dat); | ||
1097 | err_kmalloc: | ||
1098 | return retval; | ||
1099 | } | ||
1100 | |||
1101 | static void pch_spi_remove(struct pci_dev *pdev) | ||
1102 | { | ||
1103 | struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev); | ||
1104 | int count; | ||
1105 | |||
1106 | dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); | ||
1107 | |||
1108 | if (!board_dat) { | ||
1109 | dev_err(&pdev->dev, | ||
1110 | "%s pci_get_drvdata returned NULL\n", __func__); | ||
1111 | return; | ||
1112 | } | ||
1113 | |||
1114 | /* check for any pending messages; no action is taken if the queue | ||
1115 | * is still full; but at least we tried. Unload anyway */ | ||
1116 | count = 500; | ||
1117 | spin_lock(&board_dat->data->lock); | ||
1118 | board_dat->data->status = STATUS_EXITING; | ||
1119 | while ((list_empty(&board_dat->data->queue) == 0) && --count) { | ||
1120 | dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n", | ||
1121 | __func__); | ||
1122 | spin_unlock(&board_dat->data->lock); | ||
1123 | msleep(PCH_SLEEP_TIME); | ||
1124 | spin_lock(&board_dat->data->lock); | ||
1125 | } | ||
1126 | spin_unlock(&board_dat->data->lock); | ||
1127 | |||
1128 | /* Free resources allocated for PCH SPI */ | ||
1129 | pch_spi_free_resources(board_dat); | ||
1130 | |||
1131 | spi_unregister_master(board_dat->data->master); | ||
1132 | |||
1133 | /* free memory for private data */ | ||
1134 | kfree(board_dat); | ||
1135 | |||
1136 | pci_set_drvdata(pdev, NULL); | ||
1137 | |||
1138 | /* disable PCI device */ | ||
1139 | pci_disable_device(pdev); | ||
1140 | |||
1141 | dev_dbg(&pdev->dev, "%s invoked pci_disable_device\n", __func__); | ||
1142 | } | ||
1143 | |||
1144 | #ifdef CONFIG_PM | ||
1145 | static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state) | ||
1146 | { | ||
1147 | u8 count; | ||
1148 | int retval; | ||
1149 | |||
1150 | struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev); | ||
1151 | |||
1152 | dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); | ||
1153 | |||
1154 | if (!board_dat) { | ||
1155 | dev_err(&pdev->dev, | ||
1156 | "%s pci_get_drvdata returned NULL\n", __func__); | ||
1157 | return -EFAULT; | ||
1158 | } | ||
1159 | |||
1160 | retval = 0; | ||
1161 | board_dat->suspend_sts = true; | ||
1162 | |||
1163 | /* check if the current message is processed: | ||
1164 | Only after thats done the transfer will be suspended */ | ||
1165 | count = 255; | ||
1166 | while ((--count) > 0) { | ||
1167 | if (!(board_dat->data->bcurrent_msg_processing)) { | ||
1168 | dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_" | ||
1169 | "msg_processing = false\n", __func__); | ||
1170 | break; | ||
1171 | } else { | ||
1172 | dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_msg_" | ||
1173 | "processing = true\n", __func__); | ||
1174 | } | ||
1175 | msleep(PCH_SLEEP_TIME); | ||
1176 | } | ||
1177 | |||
1178 | /* Free IRQ */ | ||
1179 | if (board_dat->irq_reg_sts) { | ||
1180 | /* disable all interrupts */ | ||
1181 | pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0, | ||
1182 | PCH_ALL); | ||
1183 | pch_spi_reset(board_dat->data->master); | ||
1184 | |||
1185 | free_irq(board_dat->pdev->irq, board_dat); | ||
1186 | |||
1187 | board_dat->irq_reg_sts = false; | ||
1188 | dev_dbg(&pdev->dev, | ||
1189 | "%s free_irq invoked successfully.\n", __func__); | ||
1190 | } | ||
1191 | |||
1192 | /* save config space */ | ||
1193 | retval = pci_save_state(pdev); | ||
1194 | |||
1195 | if (retval == 0) { | ||
1196 | dev_dbg(&pdev->dev, "%s pci_save_state returned=%d\n", | ||
1197 | __func__, retval); | ||
1198 | /* disable PM notifications */ | ||
1199 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
1200 | dev_dbg(&pdev->dev, | ||
1201 | "%s pci_enable_wake invoked successfully\n", __func__); | ||
1202 | /* disable PCI device */ | ||
1203 | pci_disable_device(pdev); | ||
1204 | dev_dbg(&pdev->dev, | ||
1205 | "%s pci_disable_device invoked successfully\n", | ||
1206 | __func__); | ||
1207 | /* move device to D3hot state */ | ||
1208 | pci_set_power_state(pdev, PCI_D3hot); | ||
1209 | dev_dbg(&pdev->dev, | ||
1210 | "%s pci_set_power_state invoked successfully\n", | ||
1211 | __func__); | ||
1212 | } else { | ||
1213 | dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__); | ||
1214 | } | ||
1215 | |||
1216 | dev_dbg(&pdev->dev, "%s return=%d\n", __func__, retval); | ||
1217 | |||
1218 | return retval; | ||
1219 | } | ||
1220 | |||
1221 | static int pch_spi_resume(struct pci_dev *pdev) | ||
1222 | { | ||
1223 | int retval; | ||
1224 | |||
1225 | struct pch_spi_board_data *board = pci_get_drvdata(pdev); | ||
1226 | dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); | ||
1227 | |||
1228 | if (!board) { | ||
1229 | dev_err(&pdev->dev, | ||
1230 | "%s pci_get_drvdata returned NULL\n", __func__); | ||
1231 | return -EFAULT; | ||
1232 | } | ||
1233 | |||
1234 | /* move device to DO power state */ | ||
1235 | pci_set_power_state(pdev, PCI_D0); | ||
1236 | |||
1237 | /* restore state */ | ||
1238 | pci_restore_state(pdev); | ||
1239 | |||
1240 | retval = pci_enable_device(pdev); | ||
1241 | if (retval < 0) { | ||
1242 | dev_err(&pdev->dev, | ||
1243 | "%s pci_enable_device failed\n", __func__); | ||
1244 | } else { | ||
1245 | /* disable PM notifications */ | ||
1246 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
1247 | |||
1248 | /* register IRQ handler */ | ||
1249 | if (!board->irq_reg_sts) { | ||
1250 | /* register IRQ */ | ||
1251 | retval = request_irq(board->pdev->irq, pch_spi_handler, | ||
1252 | IRQF_SHARED, KBUILD_MODNAME, | ||
1253 | board); | ||
1254 | if (retval < 0) { | ||
1255 | dev_err(&pdev->dev, | ||
1256 | "%s request_irq failed\n", __func__); | ||
1257 | return retval; | ||
1258 | } | ||
1259 | board->irq_reg_sts = true; | ||
1260 | |||
1261 | /* reset PCH SPI h/w */ | ||
1262 | pch_spi_reset(board->data->master); | ||
1263 | pch_spi_set_master_mode(board->data->master); | ||
1264 | |||
1265 | /* set suspend status to false */ | ||
1266 | board->suspend_sts = false; | ||
1267 | |||
1268 | } | ||
1269 | } | ||
1270 | |||
1271 | dev_dbg(&pdev->dev, "%s returning=%d\n", __func__, retval); | ||
1272 | |||
1273 | return retval; | ||
1274 | } | ||
1275 | #else | ||
1276 | #define pch_spi_suspend NULL | ||
1277 | #define pch_spi_resume NULL | ||
1278 | |||
1279 | #endif | ||
1280 | |||
1281 | static struct pci_driver pch_spi_pcidev = { | ||
1282 | .name = "pch_spi", | ||
1283 | .id_table = pch_spi_pcidev_id, | ||
1284 | .probe = pch_spi_probe, | ||
1285 | .remove = pch_spi_remove, | ||
1286 | .suspend = pch_spi_suspend, | ||
1287 | .resume = pch_spi_resume, | ||
1288 | }; | ||
1289 | |||
1290 | static int __init pch_spi_init(void) | ||
1291 | { | ||
1292 | return pci_register_driver(&pch_spi_pcidev); | ||
1293 | } | ||
1294 | module_init(pch_spi_init); | ||
1295 | |||
1296 | static void __exit pch_spi_exit(void) | ||
1297 | { | ||
1298 | pci_unregister_driver(&pch_spi_pcidev); | ||
1299 | } | ||
1300 | module_exit(pch_spi_exit); | ||
1301 | |||
1302 | MODULE_LICENSE("GPL"); | ||
1303 | MODULE_DESCRIPTION("Topcliff PCH SPI PCI Driver"); | ||
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index ea1bec3c9a13..d9fd86211365 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/errno.h> | 30 | #include <linux/errno.h> |
31 | #include <linux/mutex.h> | 31 | #include <linux/mutex.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/compat.h> | ||
33 | 34 | ||
34 | #include <linux/spi/spi.h> | 35 | #include <linux/spi/spi.h> |
35 | #include <linux/spi/spidev.h> | 36 | #include <linux/spi/spidev.h> |
@@ -38,7 +39,7 @@ | |||
38 | 39 | ||
39 | 40 | ||
40 | /* | 41 | /* |
41 | * This supports acccess to SPI devices using normal userspace I/O calls. | 42 | * This supports access to SPI devices using normal userspace I/O calls. |
42 | * Note that while traditional UNIX/POSIX I/O semantics are half duplex, | 43 | * Note that while traditional UNIX/POSIX I/O semantics are half duplex, |
43 | * and often mask message boundaries, full SPI support requires full duplex | 44 | * and often mask message boundaries, full SPI support requires full duplex |
44 | * transfers. There are several kinds of internal message boundaries to | 45 | * transfers. There are several kinds of internal message boundaries to |
@@ -471,6 +472,16 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
471 | return retval; | 472 | return retval; |
472 | } | 473 | } |
473 | 474 | ||
475 | #ifdef CONFIG_COMPAT | ||
476 | static long | ||
477 | spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||
478 | { | ||
479 | return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); | ||
480 | } | ||
481 | #else | ||
482 | #define spidev_compat_ioctl NULL | ||
483 | #endif /* CONFIG_COMPAT */ | ||
484 | |||
474 | static int spidev_open(struct inode *inode, struct file *filp) | 485 | static int spidev_open(struct inode *inode, struct file *filp) |
475 | { | 486 | { |
476 | struct spidev_data *spidev; | 487 | struct spidev_data *spidev; |
@@ -543,8 +554,10 @@ static const struct file_operations spidev_fops = { | |||
543 | .write = spidev_write, | 554 | .write = spidev_write, |
544 | .read = spidev_read, | 555 | .read = spidev_read, |
545 | .unlocked_ioctl = spidev_ioctl, | 556 | .unlocked_ioctl = spidev_ioctl, |
557 | .compat_ioctl = spidev_compat_ioctl, | ||
546 | .open = spidev_open, | 558 | .open = spidev_open, |
547 | .release = spidev_release, | 559 | .release = spidev_release, |
560 | .llseek = no_llseek, | ||
548 | }; | 561 | }; |
549 | 562 | ||
550 | /*-------------------------------------------------------------------------*/ | 563 | /*-------------------------------------------------------------------------*/ |
diff --git a/drivers/spi/ti-ssp-spi.c b/drivers/spi/ti-ssp-spi.c new file mode 100644 index 000000000000..ee22795c7973 --- /dev/null +++ b/drivers/spi/ti-ssp-spi.c | |||
@@ -0,0 +1,402 @@ | |||
1 | /* | ||
2 | * Sequencer Serial Port (SSP) based SPI master driver | ||
3 | * | ||
4 | * Copyright (C) 2010 Texas Instruments Inc | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | */ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/err.h> | ||
23 | #include <linux/completion.h> | ||
24 | #include <linux/delay.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/spi/spi.h> | ||
27 | #include <linux/mfd/ti_ssp.h> | ||
28 | |||
29 | #define MODE_BITS (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH) | ||
30 | |||
31 | struct ti_ssp_spi { | ||
32 | struct spi_master *master; | ||
33 | struct device *dev; | ||
34 | spinlock_t lock; | ||
35 | struct list_head msg_queue; | ||
36 | struct completion complete; | ||
37 | bool shutdown; | ||
38 | struct workqueue_struct *workqueue; | ||
39 | struct work_struct work; | ||
40 | u8 mode, bpw; | ||
41 | int cs_active; | ||
42 | u32 pc_en, pc_dis, pc_wr, pc_rd; | ||
43 | void (*select)(int cs); | ||
44 | }; | ||
45 | |||
46 | static u32 ti_ssp_spi_rx(struct ti_ssp_spi *hw) | ||
47 | { | ||
48 | u32 ret; | ||
49 | |||
50 | ti_ssp_run(hw->dev, hw->pc_rd, 0, &ret); | ||
51 | return ret; | ||
52 | } | ||
53 | |||
54 | static void ti_ssp_spi_tx(struct ti_ssp_spi *hw, u32 data) | ||
55 | { | ||
56 | ti_ssp_run(hw->dev, hw->pc_wr, data << (32 - hw->bpw), NULL); | ||
57 | } | ||
58 | |||
59 | static int ti_ssp_spi_txrx(struct ti_ssp_spi *hw, struct spi_message *msg, | ||
60 | struct spi_transfer *t) | ||
61 | { | ||
62 | int count; | ||
63 | |||
64 | if (hw->bpw <= 8) { | ||
65 | u8 *rx = t->rx_buf; | ||
66 | const u8 *tx = t->tx_buf; | ||
67 | |||
68 | for (count = 0; count < t->len; count += 1) { | ||
69 | if (t->tx_buf) | ||
70 | ti_ssp_spi_tx(hw, *tx++); | ||
71 | if (t->rx_buf) | ||
72 | *rx++ = ti_ssp_spi_rx(hw); | ||
73 | } | ||
74 | } else if (hw->bpw <= 16) { | ||
75 | u16 *rx = t->rx_buf; | ||
76 | const u16 *tx = t->tx_buf; | ||
77 | |||
78 | for (count = 0; count < t->len; count += 2) { | ||
79 | if (t->tx_buf) | ||
80 | ti_ssp_spi_tx(hw, *tx++); | ||
81 | if (t->rx_buf) | ||
82 | *rx++ = ti_ssp_spi_rx(hw); | ||
83 | } | ||
84 | } else { | ||
85 | u32 *rx = t->rx_buf; | ||
86 | const u32 *tx = t->tx_buf; | ||
87 | |||
88 | for (count = 0; count < t->len; count += 4) { | ||
89 | if (t->tx_buf) | ||
90 | ti_ssp_spi_tx(hw, *tx++); | ||
91 | if (t->rx_buf) | ||
92 | *rx++ = ti_ssp_spi_rx(hw); | ||
93 | } | ||
94 | } | ||
95 | |||
96 | msg->actual_length += count; /* bytes transferred */ | ||
97 | |||
98 | dev_dbg(&msg->spi->dev, "xfer %s%s, %d bytes, %d bpw, count %d%s\n", | ||
99 | t->tx_buf ? "tx" : "", t->rx_buf ? "rx" : "", t->len, | ||
100 | hw->bpw, count, (count < t->len) ? " (under)" : ""); | ||
101 | |||
102 | return (count < t->len) ? -EIO : 0; /* left over data */ | ||
103 | } | ||
104 | |||
105 | static void ti_ssp_spi_chip_select(struct ti_ssp_spi *hw, int cs_active) | ||
106 | { | ||
107 | cs_active = !!cs_active; | ||
108 | if (cs_active == hw->cs_active) | ||
109 | return; | ||
110 | ti_ssp_run(hw->dev, cs_active ? hw->pc_en : hw->pc_dis, 0, NULL); | ||
111 | hw->cs_active = cs_active; | ||
112 | } | ||
113 | |||
114 | #define __SHIFT_OUT(bits) (SSP_OPCODE_SHIFT | SSP_OUT_MODE | \ | ||
115 | cs_en | clk | SSP_COUNT((bits) * 2 - 1)) | ||
116 | #define __SHIFT_IN(bits) (SSP_OPCODE_SHIFT | SSP_IN_MODE | \ | ||
117 | cs_en | clk | SSP_COUNT((bits) * 2 - 1)) | ||
118 | |||
119 | static int ti_ssp_spi_setup_transfer(struct ti_ssp_spi *hw, u8 bpw, u8 mode) | ||
120 | { | ||
121 | int error, idx = 0; | ||
122 | u32 seqram[16]; | ||
123 | u32 cs_en, cs_dis, clk; | ||
124 | u32 topbits, botbits; | ||
125 | |||
126 | mode &= MODE_BITS; | ||
127 | if (mode == hw->mode && bpw == hw->bpw) | ||
128 | return 0; | ||
129 | |||
130 | cs_en = (mode & SPI_CS_HIGH) ? SSP_CS_HIGH : SSP_CS_LOW; | ||
131 | cs_dis = (mode & SPI_CS_HIGH) ? SSP_CS_LOW : SSP_CS_HIGH; | ||
132 | clk = (mode & SPI_CPOL) ? SSP_CLK_HIGH : SSP_CLK_LOW; | ||
133 | |||
134 | /* Construct instructions */ | ||
135 | |||
136 | /* Disable Chip Select */ | ||
137 | hw->pc_dis = idx; | ||
138 | seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_dis | clk; | ||
139 | seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_dis | clk; | ||
140 | |||
141 | /* Enable Chip Select */ | ||
142 | hw->pc_en = idx; | ||
143 | seqram[idx++] = SSP_OPCODE_DIRECT | SSP_OUT_MODE | cs_en | clk; | ||
144 | seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk; | ||
145 | |||
146 | /* Reads and writes need to be split for bpw > 16 */ | ||
147 | topbits = (bpw > 16) ? 16 : bpw; | ||
148 | botbits = bpw - topbits; | ||
149 | |||
150 | /* Write */ | ||
151 | hw->pc_wr = idx; | ||
152 | seqram[idx++] = __SHIFT_OUT(topbits) | SSP_ADDR_REG; | ||
153 | if (botbits) | ||
154 | seqram[idx++] = __SHIFT_OUT(botbits) | SSP_DATA_REG; | ||
155 | seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk; | ||
156 | |||
157 | /* Read */ | ||
158 | hw->pc_rd = idx; | ||
159 | if (botbits) | ||
160 | seqram[idx++] = __SHIFT_IN(botbits) | SSP_ADDR_REG; | ||
161 | seqram[idx++] = __SHIFT_IN(topbits) | SSP_DATA_REG; | ||
162 | seqram[idx++] = SSP_OPCODE_STOP | SSP_OUT_MODE | cs_en | clk; | ||
163 | |||
164 | error = ti_ssp_load(hw->dev, 0, seqram, idx); | ||
165 | if (error < 0) | ||
166 | return error; | ||
167 | |||
168 | error = ti_ssp_set_mode(hw->dev, ((mode & SPI_CPHA) ? | ||
169 | 0 : SSP_EARLY_DIN)); | ||
170 | if (error < 0) | ||
171 | return error; | ||
172 | |||
173 | hw->bpw = bpw; | ||
174 | hw->mode = mode; | ||
175 | |||
176 | return error; | ||
177 | } | ||
178 | |||
179 | static void ti_ssp_spi_work(struct work_struct *work) | ||
180 | { | ||
181 | struct ti_ssp_spi *hw = container_of(work, struct ti_ssp_spi, work); | ||
182 | |||
183 | spin_lock(&hw->lock); | ||
184 | |||
185 | while (!list_empty(&hw->msg_queue)) { | ||
186 | struct spi_message *m; | ||
187 | struct spi_device *spi; | ||
188 | struct spi_transfer *t = NULL; | ||
189 | int status = 0; | ||
190 | |||
191 | m = container_of(hw->msg_queue.next, struct spi_message, | ||
192 | queue); | ||
193 | |||
194 | list_del_init(&m->queue); | ||
195 | |||
196 | spin_unlock(&hw->lock); | ||
197 | |||
198 | spi = m->spi; | ||
199 | |||
200 | if (hw->select) | ||
201 | hw->select(spi->chip_select); | ||
202 | |||
203 | list_for_each_entry(t, &m->transfers, transfer_list) { | ||
204 | int bpw = spi->bits_per_word; | ||
205 | int xfer_status; | ||
206 | |||
207 | if (t->bits_per_word) | ||
208 | bpw = t->bits_per_word; | ||
209 | |||
210 | if (ti_ssp_spi_setup_transfer(hw, bpw, spi->mode) < 0) | ||
211 | break; | ||
212 | |||
213 | ti_ssp_spi_chip_select(hw, 1); | ||
214 | |||
215 | xfer_status = ti_ssp_spi_txrx(hw, m, t); | ||
216 | if (xfer_status < 0) | ||
217 | status = xfer_status; | ||
218 | |||
219 | if (t->delay_usecs) | ||
220 | udelay(t->delay_usecs); | ||
221 | |||
222 | if (t->cs_change) | ||
223 | ti_ssp_spi_chip_select(hw, 0); | ||
224 | } | ||
225 | |||
226 | ti_ssp_spi_chip_select(hw, 0); | ||
227 | m->status = status; | ||
228 | m->complete(m->context); | ||
229 | |||
230 | spin_lock(&hw->lock); | ||
231 | } | ||
232 | |||
233 | if (hw->shutdown) | ||
234 | complete(&hw->complete); | ||
235 | |||
236 | spin_unlock(&hw->lock); | ||
237 | } | ||
238 | |||
239 | static int ti_ssp_spi_setup(struct spi_device *spi) | ||
240 | { | ||
241 | if (spi->bits_per_word > 32) | ||
242 | return -EINVAL; | ||
243 | |||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | static int ti_ssp_spi_transfer(struct spi_device *spi, struct spi_message *m) | ||
248 | { | ||
249 | struct ti_ssp_spi *hw; | ||
250 | struct spi_transfer *t; | ||
251 | int error = 0; | ||
252 | |||
253 | m->actual_length = 0; | ||
254 | m->status = -EINPROGRESS; | ||
255 | |||
256 | hw = spi_master_get_devdata(spi->master); | ||
257 | |||
258 | if (list_empty(&m->transfers) || !m->complete) | ||
259 | return -EINVAL; | ||
260 | |||
261 | list_for_each_entry(t, &m->transfers, transfer_list) { | ||
262 | if (t->len && !(t->rx_buf || t->tx_buf)) { | ||
263 | dev_err(&spi->dev, "invalid xfer, no buffer\n"); | ||
264 | return -EINVAL; | ||
265 | } | ||
266 | |||
267 | if (t->len && t->rx_buf && t->tx_buf) { | ||
268 | dev_err(&spi->dev, "invalid xfer, full duplex\n"); | ||
269 | return -EINVAL; | ||
270 | } | ||
271 | |||
272 | if (t->bits_per_word > 32) { | ||
273 | dev_err(&spi->dev, "invalid xfer width %d\n", | ||
274 | t->bits_per_word); | ||
275 | return -EINVAL; | ||
276 | } | ||
277 | } | ||
278 | |||
279 | spin_lock(&hw->lock); | ||
280 | if (hw->shutdown) { | ||
281 | error = -ESHUTDOWN; | ||
282 | goto error_unlock; | ||
283 | } | ||
284 | list_add_tail(&m->queue, &hw->msg_queue); | ||
285 | queue_work(hw->workqueue, &hw->work); | ||
286 | error_unlock: | ||
287 | spin_unlock(&hw->lock); | ||
288 | return error; | ||
289 | } | ||
290 | |||
291 | static int __devinit ti_ssp_spi_probe(struct platform_device *pdev) | ||
292 | { | ||
293 | const struct ti_ssp_spi_data *pdata; | ||
294 | struct ti_ssp_spi *hw; | ||
295 | struct spi_master *master; | ||
296 | struct device *dev = &pdev->dev; | ||
297 | int error = 0; | ||
298 | |||
299 | pdata = dev->platform_data; | ||
300 | if (!pdata) { | ||
301 | dev_err(dev, "platform data not found\n"); | ||
302 | return -EINVAL; | ||
303 | } | ||
304 | |||
305 | master = spi_alloc_master(dev, sizeof(struct ti_ssp_spi)); | ||
306 | if (!master) { | ||
307 | dev_err(dev, "cannot allocate SPI master\n"); | ||
308 | return -ENOMEM; | ||
309 | } | ||
310 | |||
311 | hw = spi_master_get_devdata(master); | ||
312 | platform_set_drvdata(pdev, hw); | ||
313 | |||
314 | hw->master = master; | ||
315 | hw->dev = dev; | ||
316 | hw->select = pdata->select; | ||
317 | |||
318 | spin_lock_init(&hw->lock); | ||
319 | init_completion(&hw->complete); | ||
320 | INIT_LIST_HEAD(&hw->msg_queue); | ||
321 | INIT_WORK(&hw->work, ti_ssp_spi_work); | ||
322 | |||
323 | hw->workqueue = create_singlethread_workqueue(dev_name(dev)); | ||
324 | if (!hw->workqueue) { | ||
325 | error = -ENOMEM; | ||
326 | dev_err(dev, "work queue creation failed\n"); | ||
327 | goto error_wq; | ||
328 | } | ||
329 | |||
330 | error = ti_ssp_set_iosel(hw->dev, pdata->iosel); | ||
331 | if (error < 0) { | ||
332 | dev_err(dev, "io setup failed\n"); | ||
333 | goto error_iosel; | ||
334 | } | ||
335 | |||
336 | master->bus_num = pdev->id; | ||
337 | master->num_chipselect = pdata->num_cs; | ||
338 | master->mode_bits = MODE_BITS; | ||
339 | master->flags = SPI_MASTER_HALF_DUPLEX; | ||
340 | master->setup = ti_ssp_spi_setup; | ||
341 | master->transfer = ti_ssp_spi_transfer; | ||
342 | |||
343 | error = spi_register_master(master); | ||
344 | if (error) { | ||
345 | dev_err(dev, "master registration failed\n"); | ||
346 | goto error_reg; | ||
347 | } | ||
348 | |||
349 | return 0; | ||
350 | |||
351 | error_reg: | ||
352 | error_iosel: | ||
353 | destroy_workqueue(hw->workqueue); | ||
354 | error_wq: | ||
355 | spi_master_put(master); | ||
356 | return error; | ||
357 | } | ||
358 | |||
359 | static int __devexit ti_ssp_spi_remove(struct platform_device *pdev) | ||
360 | { | ||
361 | struct ti_ssp_spi *hw = platform_get_drvdata(pdev); | ||
362 | int error; | ||
363 | |||
364 | hw->shutdown = 1; | ||
365 | while (!list_empty(&hw->msg_queue)) { | ||
366 | error = wait_for_completion_interruptible(&hw->complete); | ||
367 | if (error < 0) { | ||
368 | hw->shutdown = 0; | ||
369 | return error; | ||
370 | } | ||
371 | } | ||
372 | destroy_workqueue(hw->workqueue); | ||
373 | spi_unregister_master(hw->master); | ||
374 | |||
375 | return 0; | ||
376 | } | ||
377 | |||
378 | static struct platform_driver ti_ssp_spi_driver = { | ||
379 | .probe = ti_ssp_spi_probe, | ||
380 | .remove = __devexit_p(ti_ssp_spi_remove), | ||
381 | .driver = { | ||
382 | .name = "ti-ssp-spi", | ||
383 | .owner = THIS_MODULE, | ||
384 | }, | ||
385 | }; | ||
386 | |||
387 | static int __init ti_ssp_spi_init(void) | ||
388 | { | ||
389 | return platform_driver_register(&ti_ssp_spi_driver); | ||
390 | } | ||
391 | module_init(ti_ssp_spi_init); | ||
392 | |||
393 | static void __exit ti_ssp_spi_exit(void) | ||
394 | { | ||
395 | platform_driver_unregister(&ti_ssp_spi_driver); | ||
396 | } | ||
397 | module_exit(ti_ssp_spi_exit); | ||
398 | |||
399 | MODULE_DESCRIPTION("SSP SPI Master"); | ||
400 | MODULE_AUTHOR("Cyril Chemparathy"); | ||
401 | MODULE_LICENSE("GPL"); | ||
402 | MODULE_ALIAS("platform:ti-ssp-spi"); | ||
diff --git a/drivers/spi/tle62x0.c b/drivers/spi/tle62x0.c index a3938958147c..32a40876532f 100644 --- a/drivers/spi/tle62x0.c +++ b/drivers/spi/tle62x0.c | |||
@@ -283,7 +283,7 @@ static int __devinit tle62x0_probe(struct spi_device *spi) | |||
283 | return 0; | 283 | return 0; |
284 | 284 | ||
285 | err_gpios: | 285 | err_gpios: |
286 | for (; ptr > 0; ptr--) | 286 | while (--ptr >= 0) |
287 | device_remove_file(&spi->dev, gpio_attrs[ptr]); | 287 | device_remove_file(&spi->dev, gpio_attrs[ptr]); |
288 | 288 | ||
289 | device_remove_file(&spi->dev, &dev_attr_status_show); | 289 | device_remove_file(&spi->dev, &dev_attr_status_show); |
@@ -301,6 +301,7 @@ static int __devexit tle62x0_remove(struct spi_device *spi) | |||
301 | for (ptr = 0; ptr < st->nr_gpio; ptr++) | 301 | for (ptr = 0; ptr < st->nr_gpio; ptr++) |
302 | device_remove_file(&spi->dev, gpio_attrs[ptr]); | 302 | device_remove_file(&spi->dev, gpio_attrs[ptr]); |
303 | 303 | ||
304 | device_remove_file(&spi->dev, &dev_attr_status_show); | ||
304 | kfree(st); | 305 | kfree(st); |
305 | return 0; | 306 | return 0; |
306 | } | 307 | } |
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c index 80f2db5bcfd6..4d2c75df886c 100644 --- a/drivers/spi/xilinx_spi.c +++ b/drivers/spi/xilinx_spi.c | |||
@@ -1,26 +1,27 @@ | |||
1 | /* | 1 | /* |
2 | * xilinx_spi.c | ||
3 | * | ||
4 | * Xilinx SPI controller driver (master mode only) | 2 | * Xilinx SPI controller driver (master mode only) |
5 | * | 3 | * |
6 | * Author: MontaVista Software, Inc. | 4 | * Author: MontaVista Software, Inc. |
7 | * source@mvista.com | 5 | * source@mvista.com |
8 | * | 6 | * |
9 | * 2002-2007 (c) MontaVista Software, Inc. This file is licensed under the | 7 | * Copyright (c) 2010 Secret Lab Technologies, Ltd. |
10 | * terms of the GNU General Public License version 2. This program is licensed | 8 | * Copyright (c) 2009 Intel Corporation |
11 | * "as is" without any warranty of any kind, whether express or implied. | 9 | * 2002-2007 (c) MontaVista Software, Inc. |
10 | |||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
12 | */ | 14 | */ |
13 | 15 | ||
14 | #include <linux/module.h> | 16 | #include <linux/module.h> |
15 | #include <linux/init.h> | 17 | #include <linux/init.h> |
16 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
17 | 19 | #include <linux/of.h> | |
20 | #include <linux/platform_device.h> | ||
18 | #include <linux/spi/spi.h> | 21 | #include <linux/spi/spi.h> |
19 | #include <linux/spi/spi_bitbang.h> | 22 | #include <linux/spi/spi_bitbang.h> |
20 | #include <linux/io.h> | ||
21 | |||
22 | #include "xilinx_spi.h" | ||
23 | #include <linux/spi/xilinx_spi.h> | 23 | #include <linux/spi/xilinx_spi.h> |
24 | #include <linux/io.h> | ||
24 | 25 | ||
25 | #define XILINX_SPI_NAME "xilinx_spi" | 26 | #define XILINX_SPI_NAME "xilinx_spi" |
26 | 27 | ||
@@ -350,19 +351,20 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id) | |||
350 | return IRQ_HANDLED; | 351 | return IRQ_HANDLED; |
351 | } | 352 | } |
352 | 353 | ||
354 | static const struct of_device_id xilinx_spi_of_match[] = { | ||
355 | { .compatible = "xlnx,xps-spi-2.00.a", }, | ||
356 | { .compatible = "xlnx,xps-spi-2.00.b", }, | ||
357 | {} | ||
358 | }; | ||
359 | MODULE_DEVICE_TABLE(of, xilinx_spi_of_match); | ||
360 | |||
353 | struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, | 361 | struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, |
354 | u32 irq, s16 bus_num) | 362 | u32 irq, s16 bus_num, int num_cs, int little_endian, int bits_per_word) |
355 | { | 363 | { |
356 | struct spi_master *master; | 364 | struct spi_master *master; |
357 | struct xilinx_spi *xspi; | 365 | struct xilinx_spi *xspi; |
358 | struct xspi_platform_data *pdata = dev->platform_data; | ||
359 | int ret; | 366 | int ret; |
360 | 367 | ||
361 | if (!pdata) { | ||
362 | dev_err(dev, "No platform data attached\n"); | ||
363 | return NULL; | ||
364 | } | ||
365 | |||
366 | master = spi_alloc_master(dev, sizeof(struct xilinx_spi)); | 368 | master = spi_alloc_master(dev, sizeof(struct xilinx_spi)); |
367 | if (!master) | 369 | if (!master) |
368 | return NULL; | 370 | return NULL; |
@@ -389,21 +391,19 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, | |||
389 | } | 391 | } |
390 | 392 | ||
391 | master->bus_num = bus_num; | 393 | master->bus_num = bus_num; |
392 | master->num_chipselect = pdata->num_chipselect; | 394 | master->num_chipselect = num_cs; |
393 | #ifdef CONFIG_OF | ||
394 | master->dev.of_node = dev->of_node; | 395 | master->dev.of_node = dev->of_node; |
395 | #endif | ||
396 | 396 | ||
397 | xspi->mem = *mem; | 397 | xspi->mem = *mem; |
398 | xspi->irq = irq; | 398 | xspi->irq = irq; |
399 | if (pdata->little_endian) { | 399 | if (little_endian) { |
400 | xspi->read_fn = xspi_read32; | 400 | xspi->read_fn = xspi_read32; |
401 | xspi->write_fn = xspi_write32; | 401 | xspi->write_fn = xspi_write32; |
402 | } else { | 402 | } else { |
403 | xspi->read_fn = xspi_read32_be; | 403 | xspi->read_fn = xspi_read32_be; |
404 | xspi->write_fn = xspi_write32_be; | 404 | xspi->write_fn = xspi_write32_be; |
405 | } | 405 | } |
406 | xspi->bits_per_word = pdata->bits_per_word; | 406 | xspi->bits_per_word = bits_per_word; |
407 | if (xspi->bits_per_word == 8) { | 407 | if (xspi->bits_per_word == 8) { |
408 | xspi->tx_fn = xspi_tx8; | 408 | xspi->tx_fn = xspi_tx8; |
409 | xspi->rx_fn = xspi_rx8; | 409 | xspi->rx_fn = xspi_rx8; |
@@ -462,6 +462,95 @@ void xilinx_spi_deinit(struct spi_master *master) | |||
462 | } | 462 | } |
463 | EXPORT_SYMBOL(xilinx_spi_deinit); | 463 | EXPORT_SYMBOL(xilinx_spi_deinit); |
464 | 464 | ||
465 | static int __devinit xilinx_spi_probe(struct platform_device *dev) | ||
466 | { | ||
467 | struct xspi_platform_data *pdata; | ||
468 | struct resource *r; | ||
469 | int irq, num_cs = 0, little_endian = 0, bits_per_word = 8; | ||
470 | struct spi_master *master; | ||
471 | u8 i; | ||
472 | |||
473 | pdata = dev->dev.platform_data; | ||
474 | if (pdata) { | ||
475 | num_cs = pdata->num_chipselect; | ||
476 | little_endian = pdata->little_endian; | ||
477 | bits_per_word = pdata->bits_per_word; | ||
478 | } | ||
479 | |||
480 | #ifdef CONFIG_OF | ||
481 | if (dev->dev.of_node) { | ||
482 | const __be32 *prop; | ||
483 | int len; | ||
484 | |||
485 | /* number of slave select bits is required */ | ||
486 | prop = of_get_property(dev->dev.of_node, "xlnx,num-ss-bits", | ||
487 | &len); | ||
488 | if (prop && len >= sizeof(*prop)) | ||
489 | num_cs = __be32_to_cpup(prop); | ||
490 | } | ||
491 | #endif | ||
492 | |||
493 | if (!num_cs) { | ||
494 | dev_err(&dev->dev, "Missing slave select configuration data\n"); | ||
495 | return -EINVAL; | ||
496 | } | ||
497 | |||
498 | |||
499 | r = platform_get_resource(dev, IORESOURCE_MEM, 0); | ||
500 | if (!r) | ||
501 | return -ENODEV; | ||
502 | |||
503 | irq = platform_get_irq(dev, 0); | ||
504 | if (irq < 0) | ||
505 | return -ENXIO; | ||
506 | |||
507 | master = xilinx_spi_init(&dev->dev, r, irq, dev->id, num_cs, | ||
508 | little_endian, bits_per_word); | ||
509 | if (!master) | ||
510 | return -ENODEV; | ||
511 | |||
512 | if (pdata) { | ||
513 | for (i = 0; i < pdata->num_devices; i++) | ||
514 | spi_new_device(master, pdata->devices + i); | ||
515 | } | ||
516 | |||
517 | platform_set_drvdata(dev, master); | ||
518 | return 0; | ||
519 | } | ||
520 | |||
521 | static int __devexit xilinx_spi_remove(struct platform_device *dev) | ||
522 | { | ||
523 | xilinx_spi_deinit(platform_get_drvdata(dev)); | ||
524 | platform_set_drvdata(dev, 0); | ||
525 | |||
526 | return 0; | ||
527 | } | ||
528 | |||
529 | /* work with hotplug and coldplug */ | ||
530 | MODULE_ALIAS("platform:" XILINX_SPI_NAME); | ||
531 | |||
532 | static struct platform_driver xilinx_spi_driver = { | ||
533 | .probe = xilinx_spi_probe, | ||
534 | .remove = __devexit_p(xilinx_spi_remove), | ||
535 | .driver = { | ||
536 | .name = XILINX_SPI_NAME, | ||
537 | .owner = THIS_MODULE, | ||
538 | .of_match_table = xilinx_spi_of_match, | ||
539 | }, | ||
540 | }; | ||
541 | |||
542 | static int __init xilinx_spi_pltfm_init(void) | ||
543 | { | ||
544 | return platform_driver_register(&xilinx_spi_driver); | ||
545 | } | ||
546 | module_init(xilinx_spi_pltfm_init); | ||
547 | |||
548 | static void __exit xilinx_spi_pltfm_exit(void) | ||
549 | { | ||
550 | platform_driver_unregister(&xilinx_spi_driver); | ||
551 | } | ||
552 | module_exit(xilinx_spi_pltfm_exit); | ||
553 | |||
465 | MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>"); | 554 | MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>"); |
466 | MODULE_DESCRIPTION("Xilinx SPI driver"); | 555 | MODULE_DESCRIPTION("Xilinx SPI driver"); |
467 | MODULE_LICENSE("GPL"); | 556 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/spi/xilinx_spi.h b/drivers/spi/xilinx_spi.h deleted file mode 100644 index d211accf68d2..000000000000 --- a/drivers/spi/xilinx_spi.h +++ /dev/null | |||
@@ -1,32 +0,0 @@ | |||
1 | /* | ||
2 | * Xilinx SPI device driver API and platform data header file | ||
3 | * | ||
4 | * Copyright (c) 2009 Intel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
18 | */ | ||
19 | |||
20 | #ifndef _XILINX_SPI_H_ | ||
21 | #define _XILINX_SPI_H_ | ||
22 | |||
23 | #include <linux/spi/spi.h> | ||
24 | #include <linux/spi/spi_bitbang.h> | ||
25 | |||
26 | #define XILINX_SPI_NAME "xilinx_spi" | ||
27 | |||
28 | struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, | ||
29 | u32 irq, s16 bus_num); | ||
30 | |||
31 | void xilinx_spi_deinit(struct spi_master *master); | ||
32 | #endif | ||
diff --git a/drivers/spi/xilinx_spi_of.c b/drivers/spi/xilinx_spi_of.c deleted file mode 100644 index b66c2dbf20a5..000000000000 --- a/drivers/spi/xilinx_spi_of.c +++ /dev/null | |||
@@ -1,133 +0,0 @@ | |||
1 | /* | ||
2 | * Xilinx SPI OF device driver | ||
3 | * | ||
4 | * Copyright (c) 2009 Intel Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
18 | */ | ||
19 | |||
20 | /* Supports: | ||
21 | * Xilinx SPI devices as OF devices | ||
22 | * | ||
23 | * Inspired by xilinx_spi.c, 2002-2007 (c) MontaVista Software, Inc. | ||
24 | */ | ||
25 | |||
26 | #include <linux/module.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/io.h> | ||
30 | #include <linux/slab.h> | ||
31 | |||
32 | #include <linux/of_address.h> | ||
33 | #include <linux/of_platform.h> | ||
34 | #include <linux/of_device.h> | ||
35 | #include <linux/of_spi.h> | ||
36 | |||
37 | #include <linux/spi/xilinx_spi.h> | ||
38 | #include "xilinx_spi.h" | ||
39 | |||
40 | |||
41 | static int __devinit xilinx_spi_of_probe(struct platform_device *ofdev, | ||
42 | const struct of_device_id *match) | ||
43 | { | ||
44 | struct spi_master *master; | ||
45 | struct xspi_platform_data *pdata; | ||
46 | struct resource r_mem; | ||
47 | struct resource r_irq; | ||
48 | int rc = 0; | ||
49 | const u32 *prop; | ||
50 | int len; | ||
51 | |||
52 | rc = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem); | ||
53 | if (rc) { | ||
54 | dev_warn(&ofdev->dev, "invalid address\n"); | ||
55 | return rc; | ||
56 | } | ||
57 | |||
58 | rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq); | ||
59 | if (rc == NO_IRQ) { | ||
60 | dev_warn(&ofdev->dev, "no IRQ found\n"); | ||
61 | return -ENODEV; | ||
62 | } | ||
63 | |||
64 | ofdev->dev.platform_data = | ||
65 | kzalloc(sizeof(struct xspi_platform_data), GFP_KERNEL); | ||
66 | pdata = ofdev->dev.platform_data; | ||
67 | if (!pdata) | ||
68 | return -ENOMEM; | ||
69 | |||
70 | /* number of slave select bits is required */ | ||
71 | prop = of_get_property(ofdev->dev.of_node, "xlnx,num-ss-bits", &len); | ||
72 | if (!prop || len < sizeof(*prop)) { | ||
73 | dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n"); | ||
74 | return -EINVAL; | ||
75 | } | ||
76 | pdata->num_chipselect = *prop; | ||
77 | pdata->bits_per_word = 8; | ||
78 | master = xilinx_spi_init(&ofdev->dev, &r_mem, r_irq.start, -1); | ||
79 | if (!master) | ||
80 | return -ENODEV; | ||
81 | |||
82 | dev_set_drvdata(&ofdev->dev, master); | ||
83 | |||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | static int __devexit xilinx_spi_remove(struct platform_device *ofdev) | ||
88 | { | ||
89 | xilinx_spi_deinit(dev_get_drvdata(&ofdev->dev)); | ||
90 | dev_set_drvdata(&ofdev->dev, 0); | ||
91 | kfree(ofdev->dev.platform_data); | ||
92 | ofdev->dev.platform_data = NULL; | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | static int __exit xilinx_spi_of_remove(struct platform_device *op) | ||
97 | { | ||
98 | return xilinx_spi_remove(op); | ||
99 | } | ||
100 | |||
101 | static const struct of_device_id xilinx_spi_of_match[] = { | ||
102 | { .compatible = "xlnx,xps-spi-2.00.a", }, | ||
103 | { .compatible = "xlnx,xps-spi-2.00.b", }, | ||
104 | {} | ||
105 | }; | ||
106 | |||
107 | MODULE_DEVICE_TABLE(of, xilinx_spi_of_match); | ||
108 | |||
109 | static struct of_platform_driver xilinx_spi_of_driver = { | ||
110 | .probe = xilinx_spi_of_probe, | ||
111 | .remove = __exit_p(xilinx_spi_of_remove), | ||
112 | .driver = { | ||
113 | .name = "xilinx-xps-spi", | ||
114 | .owner = THIS_MODULE, | ||
115 | .of_match_table = xilinx_spi_of_match, | ||
116 | }, | ||
117 | }; | ||
118 | |||
119 | static int __init xilinx_spi_of_init(void) | ||
120 | { | ||
121 | return of_register_platform_driver(&xilinx_spi_of_driver); | ||
122 | } | ||
123 | module_init(xilinx_spi_of_init); | ||
124 | |||
125 | static void __exit xilinx_spi_of_exit(void) | ||
126 | { | ||
127 | of_unregister_platform_driver(&xilinx_spi_of_driver); | ||
128 | } | ||
129 | module_exit(xilinx_spi_of_exit); | ||
130 | |||
131 | MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); | ||
132 | MODULE_DESCRIPTION("Xilinx SPI platform driver"); | ||
133 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/spi/xilinx_spi_pltfm.c b/drivers/spi/xilinx_spi_pltfm.c deleted file mode 100644 index 24debac646a9..000000000000 --- a/drivers/spi/xilinx_spi_pltfm.c +++ /dev/null | |||
@@ -1,102 +0,0 @@ | |||
1 | /* | ||
2 | * Support for Xilinx SPI platform devices | ||
3 | * Copyright (c) 2009 Intel Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
17 | */ | ||
18 | |||
19 | /* Supports: | ||
20 | * Xilinx SPI devices as platform devices | ||
21 | * | ||
22 | * Inspired by xilinx_spi.c, 2002-2007 (c) MontaVista Software, Inc. | ||
23 | */ | ||
24 | |||
25 | #include <linux/module.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/io.h> | ||
29 | #include <linux/platform_device.h> | ||
30 | |||
31 | #include <linux/spi/spi.h> | ||
32 | #include <linux/spi/spi_bitbang.h> | ||
33 | #include <linux/spi/xilinx_spi.h> | ||
34 | |||
35 | #include "xilinx_spi.h" | ||
36 | |||
37 | static int __devinit xilinx_spi_probe(struct platform_device *dev) | ||
38 | { | ||
39 | struct xspi_platform_data *pdata; | ||
40 | struct resource *r; | ||
41 | int irq; | ||
42 | struct spi_master *master; | ||
43 | u8 i; | ||
44 | |||
45 | pdata = dev->dev.platform_data; | ||
46 | if (!pdata) | ||
47 | return -ENODEV; | ||
48 | |||
49 | r = platform_get_resource(dev, IORESOURCE_MEM, 0); | ||
50 | if (!r) | ||
51 | return -ENODEV; | ||
52 | |||
53 | irq = platform_get_irq(dev, 0); | ||
54 | if (irq < 0) | ||
55 | return -ENXIO; | ||
56 | |||
57 | master = xilinx_spi_init(&dev->dev, r, irq, dev->id); | ||
58 | if (!master) | ||
59 | return -ENODEV; | ||
60 | |||
61 | for (i = 0; i < pdata->num_devices; i++) | ||
62 | spi_new_device(master, pdata->devices + i); | ||
63 | |||
64 | platform_set_drvdata(dev, master); | ||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | static int __devexit xilinx_spi_remove(struct platform_device *dev) | ||
69 | { | ||
70 | xilinx_spi_deinit(platform_get_drvdata(dev)); | ||
71 | platform_set_drvdata(dev, 0); | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | /* work with hotplug and coldplug */ | ||
77 | MODULE_ALIAS("platform:" XILINX_SPI_NAME); | ||
78 | |||
79 | static struct platform_driver xilinx_spi_driver = { | ||
80 | .probe = xilinx_spi_probe, | ||
81 | .remove = __devexit_p(xilinx_spi_remove), | ||
82 | .driver = { | ||
83 | .name = XILINX_SPI_NAME, | ||
84 | .owner = THIS_MODULE, | ||
85 | }, | ||
86 | }; | ||
87 | |||
88 | static int __init xilinx_spi_pltfm_init(void) | ||
89 | { | ||
90 | return platform_driver_register(&xilinx_spi_driver); | ||
91 | } | ||
92 | module_init(xilinx_spi_pltfm_init); | ||
93 | |||
94 | static void __exit xilinx_spi_pltfm_exit(void) | ||
95 | { | ||
96 | platform_driver_unregister(&xilinx_spi_driver); | ||
97 | } | ||
98 | module_exit(xilinx_spi_pltfm_exit); | ||
99 | |||
100 | MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); | ||
101 | MODULE_DESCRIPTION("Xilinx SPI platform driver"); | ||
102 | MODULE_LICENSE("GPL v2"); | ||