aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /drivers/spi
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig102
-rw-r--r--drivers/spi/Makefile19
-rw-r--r--drivers/spi/amba-pl022.c19
-rw-r--r--drivers/spi/atmel_spi.c7
-rw-r--r--drivers/spi/au1550_spi.c17
-rw-r--r--drivers/spi/coldfire_qspi.c640
-rw-r--r--drivers/spi/davinci_spi.c1256
-rw-r--r--drivers/spi/dw_spi.c990
-rw-r--r--drivers/spi/dw_spi_mmio.c148
-rw-r--r--drivers/spi/dw_spi_pci.c172
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c28
-rw-r--r--drivers/spi/mpc52xx_spi.c579
-rw-r--r--drivers/spi/omap2_mcspi.c30
-rw-r--r--drivers/spi/omap_spi_100k.c636
-rw-r--r--drivers/spi/omap_uwire.c11
-rw-r--r--drivers/spi/pxa2xx_spi.c3
-rw-r--r--drivers/spi/spi.c9
-rw-r--r--drivers/spi/spi_bfin5xx.c3
-rw-r--r--drivers/spi/spi_bitbang.c1
-rw-r--r--drivers/spi/spi_imx.c38
-rw-r--r--drivers/spi/spi_mpc8xxx.c631
-rw-r--r--drivers/spi/spi_nuc900.c505
-rw-r--r--drivers/spi/spi_ppc4xx.c3
-rw-r--r--drivers/spi/spi_s3c24xx.c247
-rw-r--r--drivers/spi/spi_s3c24xx_fiq.S116
-rw-r--r--drivers/spi/spi_s3c24xx_fiq.h26
-rw-r--r--drivers/spi/spi_s3c64xx.c1183
-rw-r--r--drivers/spi/spi_sh_msiof.c688
-rw-r--r--drivers/spi/spi_sh_sci.c2
-rw-r--r--drivers/spi/spi_stmp.c2
-rw-r--r--drivers/spi/spi_txx9.c6
-rw-r--r--drivers/spi/spidev.c29
-rw-r--r--drivers/spi/tle62x0.c1
-rw-r--r--drivers/spi/xilinx_spi.c378
-rw-r--r--drivers/spi/xilinx_spi.h32
-rw-r--r--drivers/spi/xilinx_spi_of.c135
-rw-r--r--drivers/spi/xilinx_spi_pltfm.c102
37 files changed, 8421 insertions, 373 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 4b6f7cba3b3d..a191fa2be7c5 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -100,6 +100,23 @@ config SPI_BUTTERFLY
100 inexpensive battery powered microcontroller evaluation board. 100 inexpensive battery powered microcontroller evaluation board.
101 This same cable can be used to flash new firmware. 101 This same cable can be used to flash new firmware.
102 102
103config SPI_COLDFIRE_QSPI
104 tristate "Freescale Coldfire QSPI controller"
105 depends on (M520x || M523x || M5249 || M527x || M528x || M532x)
106 help
107 This enables support for the Coldfire QSPI controller in master
108 mode.
109
110 This driver can also be built as a module. If so, the module
111 will be called coldfire_qspi.
112
113config SPI_DAVINCI
114 tristate "SPI controller driver for DaVinci/DA8xx SoC's"
115 depends on SPI_MASTER && ARCH_DAVINCI
116 select SPI_BITBANG
117 help
118 SPI master controller for DaVinci and DA8xx SPI modules.
119
103config SPI_GPIO 120config SPI_GPIO
104 tristate "GPIO-based bitbanging SPI Master" 121 tristate "GPIO-based bitbanging SPI Master"
105 depends on GENERIC_GPIO 122 depends on GENERIC_GPIO
@@ -133,6 +150,14 @@ config SPI_LM70_LLP
133 which interfaces to an LM70 temperature sensor using 150 which interfaces to an LM70 temperature sensor using
134 a parallel port. 151 a parallel port.
135 152
153config SPI_MPC52xx
154 tristate "Freescale MPC52xx SPI (non-PSC) controller support"
155 depends on PPC_MPC52xx && SPI
156 select SPI_MASTER_OF
157 help
158 This drivers supports the MPC52xx SPI controller in master SPI
159 mode.
160
136config SPI_MPC52xx_PSC 161config SPI_MPC52xx_PSC
137 tristate "Freescale MPC52xx PSC SPI controller" 162 tristate "Freescale MPC52xx PSC SPI controller"
138 depends on PPC_MPC52xx && EXPERIMENTAL 163 depends on PPC_MPC52xx && EXPERIMENTAL
@@ -147,9 +172,6 @@ config SPI_MPC8xxx
147 This enables using the Freescale MPC8xxx SPI controllers in master 172 This enables using the Freescale MPC8xxx SPI controllers in master
148 mode. 173 mode.
149 174
150 This driver uses a simple set of shift registers for data (opposed
151 to the CPM based descriptor model).
152
153config SPI_OMAP_UWIRE 175config SPI_OMAP_UWIRE
154 tristate "OMAP1 MicroWire" 176 tristate "OMAP1 MicroWire"
155 depends on ARCH_OMAP1 177 depends on ARCH_OMAP1
@@ -159,11 +181,17 @@ config SPI_OMAP_UWIRE
159 181
160config SPI_OMAP24XX 182config SPI_OMAP24XX
161 tristate "McSPI driver for OMAP24xx/OMAP34xx" 183 tristate "McSPI driver for OMAP24xx/OMAP34xx"
162 depends on ARCH_OMAP24XX || ARCH_OMAP34XX 184 depends on ARCH_OMAP2 || ARCH_OMAP3
163 help 185 help
164 SPI master controller for OMAP24xx/OMAP34xx Multichannel SPI 186 SPI master controller for OMAP24xx/OMAP34xx Multichannel SPI
165 (McSPI) modules. 187 (McSPI) modules.
166 188
189config SPI_OMAP_100K
190 tristate "OMAP SPI 100K"
191 depends on SPI_MASTER && (ARCH_OMAP850 || ARCH_OMAP730)
192 help
193 OMAP SPI 100K master controller for omap7xx boards.
194
167config SPI_ORION 195config SPI_ORION
168 tristate "Orion SPI master (EXPERIMENTAL)" 196 tristate "Orion SPI master (EXPERIMENTAL)"
169 depends on PLAT_ORION && EXPERIMENTAL 197 depends on PLAT_ORION && EXPERIMENTAL
@@ -205,6 +233,17 @@ config SPI_S3C24XX
205 help 233 help
206 SPI driver for Samsung S3C24XX series ARM SoCs 234 SPI driver for Samsung S3C24XX series ARM SoCs
207 235
236config SPI_S3C24XX_FIQ
237 bool "S3C24XX driver with FIQ pseudo-DMA"
238 depends on SPI_S3C24XX
239 select FIQ
240 help
241 Enable FIQ support for the S3C24XX SPI driver to provide pseudo
242 DMA by using the fast-interrupt request framework, This allows
243 the driver to get DMA-like performance when there are either
244 no free DMA channels, or when doing transfers that required both
245 TX and RX data paths.
246
208config SPI_S3C24XX_GPIO 247config SPI_S3C24XX_GPIO
209 tristate "Samsung S3C24XX series SPI by GPIO" 248 tristate "Samsung S3C24XX series SPI by GPIO"
210 depends on ARCH_S3C2410 && EXPERIMENTAL 249 depends on ARCH_S3C2410 && EXPERIMENTAL
@@ -215,6 +254,20 @@ config SPI_S3C24XX_GPIO
215 the inbuilt hardware cannot provide the transfer mode, or 254 the inbuilt hardware cannot provide the transfer mode, or
216 where the board is using non hardware connected pins. 255 where the board is using non hardware connected pins.
217 256
257config SPI_S3C64XX
258 tristate "Samsung S3C64XX series type SPI"
259 depends on ARCH_S3C64XX && EXPERIMENTAL
260 select S3C64XX_DMA
261 help
262 SPI driver for Samsung S3C64XX and newer SoCs.
263
264config SPI_SH_MSIOF
265 tristate "SuperH MSIOF SPI controller"
266 depends on SUPERH && HAVE_CLK
267 select SPI_BITBANG
268 help
269 SPI driver for SuperH MSIOF blocks.
270
218config SPI_SH_SCI 271config SPI_SH_SCI
219 tristate "SuperH SCI SPI controller" 272 tristate "SuperH SCI SPI controller"
220 depends on SUPERH 273 depends on SUPERH
@@ -235,19 +288,56 @@ config SPI_TXX9
235 SPI driver for Toshiba TXx9 MIPS SoCs 288 SPI driver for Toshiba TXx9 MIPS SoCs
236 289
237config SPI_XILINX 290config SPI_XILINX
238 tristate "Xilinx SPI controller" 291 tristate "Xilinx SPI controller common module"
239 depends on (XILINX_VIRTEX || MICROBLAZE) && EXPERIMENTAL 292 depends on HAS_IOMEM && EXPERIMENTAL
240 select SPI_BITBANG 293 select SPI_BITBANG
294 select SPI_XILINX_OF if (XILINX_VIRTEX || MICROBLAZE)
241 help 295 help
242 This exposes the SPI controller IP from the Xilinx EDK. 296 This exposes the SPI controller IP from the Xilinx EDK.
243 297
244 See the "OPB Serial Peripheral Interface (SPI) (v1.00e)" 298 See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
245 Product Specification document (DS464) for hardware details. 299 Product Specification document (DS464) for hardware details.
246 300
301 Or for the DS570, see "XPS Serial Peripheral Interface (SPI) (v2.00b)"
302
303config SPI_XILINX_OF
304 tristate "Xilinx SPI controller OF device"
305 depends on SPI_XILINX && (XILINX_VIRTEX || MICROBLAZE)
306 help
307 This is the OF driver for the SPI controller IP from the Xilinx EDK.
308
309config SPI_XILINX_PLTFM
310 tristate "Xilinx SPI controller platform device"
311 depends on SPI_XILINX
312 help
313 This is the platform driver for the SPI controller IP
314 from the Xilinx EDK.
315
316config SPI_NUC900
317 tristate "Nuvoton NUC900 series SPI"
318 depends on ARCH_W90X900 && EXPERIMENTAL
319 select SPI_BITBANG
320 help
321 SPI driver for Nuvoton NUC900 series ARM SoCs
322
247# 323#
248# Add new SPI master controllers in alphabetical order above this line 324# Add new SPI master controllers in alphabetical order above this line
249# 325#
250 326
327config SPI_DESIGNWARE
328 tristate "DesignWare SPI controller core support"
329 depends on SPI_MASTER
330 help
331 general driver for SPI controller core from DesignWare
332
333config SPI_DW_PCI
334 tristate "PCI interface driver for DW SPI core"
335 depends on SPI_DESIGNWARE && PCI
336
337config SPI_DW_MMIO
338 tristate "Memory-mapped io interface driver for DW SPI core"
339 depends on SPI_DESIGNWARE && HAVE_CLK
340
251# 341#
252# There are lots of SPI device types, with sensors and memory 342# There are lots of SPI device types, with sensors and memory
253# being probably the most widely used ones. 343# being probably the most widely used ones.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 21a118269cac..d7d0f89b797b 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -16,23 +16,40 @@ obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o
16obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o 16obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
17obj-$(CONFIG_SPI_AU1550) += au1550_spi.o 17obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
18obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o 18obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
19obj-$(CONFIG_SPI_COLDFIRE_QSPI) += coldfire_qspi.o
20obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o
21obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o
22obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o
23obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o
19obj-$(CONFIG_SPI_GPIO) += spi_gpio.o 24obj-$(CONFIG_SPI_GPIO) += spi_gpio.o
20obj-$(CONFIG_SPI_IMX) += spi_imx.o 25obj-$(CONFIG_SPI_IMX) += spi_imx.o
21obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o 26obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
22obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o 27obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
23obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o 28obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
24obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o 29obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
30obj-$(CONFIG_SPI_OMAP_100K) += omap_spi_100k.o
25obj-$(CONFIG_SPI_ORION) += orion_spi.o 31obj-$(CONFIG_SPI_ORION) += orion_spi.o
26obj-$(CONFIG_SPI_PL022) += amba-pl022.o 32obj-$(CONFIG_SPI_PL022) += amba-pl022.o
27obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o 33obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
34obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o
28obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o 35obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o
29obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o 36obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o
30obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o 37obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
31obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o 38obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o
39obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o
32obj-$(CONFIG_SPI_TXX9) += spi_txx9.o 40obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
33obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o 41obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
42obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o
43obj-$(CONFIG_SPI_XILINX_PLTFM) += xilinx_spi_pltfm.o
34obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o 44obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
45obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o
35obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o 46obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o
47obj-$(CONFIG_SPI_NUC900) += spi_nuc900.o
48
49# special build for s3c24xx spi driver with fiq support
50spi_s3c24xx_hw-y := spi_s3c24xx.o
51spi_s3c24xx_hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi_s3c24xx_fiq.o
52
36# ... add above this line ... 53# ... add above this line ...
37 54
38# SPI protocol drivers (device/link on bus) 55# SPI protocol drivers (device/link on bus)
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index ff5bbb9c43c9..e9aeee16d922 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -44,6 +44,7 @@
44#include <linux/amba/bus.h> 44#include <linux/amba/bus.h>
45#include <linux/amba/pl022.h> 45#include <linux/amba/pl022.h>
46#include <linux/io.h> 46#include <linux/io.h>
47#include <linux/slab.h>
47 48
48/* 49/*
49 * This macro is used to define some register default values. 50 * This macro is used to define some register default values.
@@ -363,6 +364,7 @@ struct pl022 {
363 void *rx_end; 364 void *rx_end;
364 enum ssp_reading read; 365 enum ssp_reading read;
365 enum ssp_writing write; 366 enum ssp_writing write;
367 u32 exp_fifo_level;
366}; 368};
367 369
368/** 370/**
@@ -501,6 +503,9 @@ static int flush(struct pl022 *pl022)
501 while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) 503 while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
502 readw(SSP_DR(pl022->virtbase)); 504 readw(SSP_DR(pl022->virtbase));
503 } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--); 505 } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--);
506
507 pl022->exp_fifo_level = 0;
508
504 return limit; 509 return limit;
505} 510}
506 511
@@ -583,10 +588,9 @@ static void readwriter(struct pl022 *pl022)
583 * errons in 8bit wide transfers on ARM variants (just 8 words 588 * errons in 8bit wide transfers on ARM variants (just 8 words
584 * FIFO, means only 8x8 = 64 bits in FIFO) at least. 589 * FIFO, means only 8x8 = 64 bits in FIFO) at least.
585 * 590 *
586 * FIXME: currently we have no logic to account for this. 591 * To prevent this issue, the TX FIFO is only filled to the
587 * perhaps there is even something broken in HW regarding 592 * unused RX FIFO fill length, regardless of what the TX
588 * 8bit transfers (it doesn't fail on 16bit) so this needs 593 * FIFO status flag indicates.
589 * more investigation...
590 */ 594 */
591 dev_dbg(&pl022->adev->dev, 595 dev_dbg(&pl022->adev->dev,
592 "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n", 596 "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
@@ -613,11 +617,12 @@ static void readwriter(struct pl022 *pl022)
613 break; 617 break;
614 } 618 }
615 pl022->rx += (pl022->cur_chip->n_bytes); 619 pl022->rx += (pl022->cur_chip->n_bytes);
620 pl022->exp_fifo_level--;
616 } 621 }
617 /* 622 /*
618 * Write as much as you can, while keeping an eye on the RX FIFO! 623 * Write as much as possible up to the RX FIFO size
619 */ 624 */
620 while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF) 625 while ((pl022->exp_fifo_level < pl022->vendor->fifodepth)
621 && (pl022->tx < pl022->tx_end)) { 626 && (pl022->tx < pl022->tx_end)) {
622 switch (pl022->write) { 627 switch (pl022->write) {
623 case WRITING_NULL: 628 case WRITING_NULL:
@@ -634,6 +639,7 @@ static void readwriter(struct pl022 *pl022)
634 break; 639 break;
635 } 640 }
636 pl022->tx += (pl022->cur_chip->n_bytes); 641 pl022->tx += (pl022->cur_chip->n_bytes);
642 pl022->exp_fifo_level++;
637 /* 643 /*
638 * This inner reader takes care of things appearing in the RX 644 * This inner reader takes care of things appearing in the RX
639 * FIFO as we're transmitting. This will happen a lot since the 645 * FIFO as we're transmitting. This will happen a lot since the
@@ -660,6 +666,7 @@ static void readwriter(struct pl022 *pl022)
660 break; 666 break;
661 } 667 }
662 pl022->rx += (pl022->cur_chip->n_bytes); 668 pl022->rx += (pl022->cur_chip->n_bytes);
669 pl022->exp_fifo_level--;
663 } 670 }
664 } 671 }
665 /* 672 /*
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index f5b3fdbb1e27..c4e04428992d 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -18,6 +18,7 @@
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/spi/spi.h> 20#include <linux/spi/spi.h>
21#include <linux/slab.h>
21 22
22#include <asm/io.h> 23#include <asm/io.h>
23#include <mach/board.h> 24#include <mach/board.h>
@@ -189,14 +190,14 @@ static void atmel_spi_next_xfer_data(struct spi_master *master,
189 190
190 /* use scratch buffer only when rx or tx data is unspecified */ 191 /* use scratch buffer only when rx or tx data is unspecified */
191 if (xfer->rx_buf) 192 if (xfer->rx_buf)
192 *rx_dma = xfer->rx_dma + xfer->len - len; 193 *rx_dma = xfer->rx_dma + xfer->len - *plen;
193 else { 194 else {
194 *rx_dma = as->buffer_dma; 195 *rx_dma = as->buffer_dma;
195 if (len > BUFFER_SIZE) 196 if (len > BUFFER_SIZE)
196 len = BUFFER_SIZE; 197 len = BUFFER_SIZE;
197 } 198 }
198 if (xfer->tx_buf) 199 if (xfer->tx_buf)
199 *tx_dma = xfer->tx_dma + xfer->len - len; 200 *tx_dma = xfer->tx_dma + xfer->len - *plen;
200 else { 201 else {
201 *tx_dma = as->buffer_dma; 202 *tx_dma = as->buffer_dma;
202 if (len > BUFFER_SIZE) 203 if (len > BUFFER_SIZE)
@@ -788,7 +789,7 @@ static int __init atmel_spi_probe(struct platform_device *pdev)
788 spin_lock_init(&as->lock); 789 spin_lock_init(&as->lock);
789 INIT_LIST_HEAD(&as->queue); 790 INIT_LIST_HEAD(&as->queue);
790 as->pdev = pdev; 791 as->pdev = pdev;
791 as->regs = ioremap(regs->start, (regs->end - regs->start) + 1); 792 as->regs = ioremap(regs->start, resource_size(regs));
792 if (!as->regs) 793 if (!as->regs)
793 goto out_free_buffer; 794 goto out_free_buffer;
794 as->irq = irq; 795 as->irq = irq;
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c
index 76cbc1a66598..3c9ade69643f 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/au1550_spi.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/slab.h>
26#include <linux/errno.h> 27#include <linux/errno.h>
27#include <linux/device.h> 28#include <linux/device.h>
28#include <linux/platform_device.h> 29#include <linux/platform_device.h>
@@ -237,8 +238,14 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
237 unsigned bpw, hz; 238 unsigned bpw, hz;
238 u32 cfg, stat; 239 u32 cfg, stat;
239 240
240 bpw = t ? t->bits_per_word : spi->bits_per_word; 241 bpw = spi->bits_per_word;
241 hz = t ? t->speed_hz : spi->max_speed_hz; 242 hz = spi->max_speed_hz;
243 if (t) {
244 if (t->bits_per_word)
245 bpw = t->bits_per_word;
246 if (t->speed_hz)
247 hz = t->speed_hz;
248 }
242 249
243 if (bpw < 4 || bpw > 24) { 250 if (bpw < 4 || bpw > 24) {
244 dev_err(&spi->dev, "setupxfer: invalid bits_per_word=%d\n", 251 dev_err(&spi->dev, "setupxfer: invalid bits_per_word=%d\n",
@@ -406,11 +413,13 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
406 } 413 }
407 414
408 /* put buffers on the ring */ 415 /* put buffers on the ring */
409 res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, hw->rx, t->len); 416 res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, virt_to_phys(hw->rx),
417 t->len, DDMA_FLAGS_IE);
410 if (!res) 418 if (!res)
411 dev_err(hw->dev, "rx dma put dest error\n"); 419 dev_err(hw->dev, "rx dma put dest error\n");
412 420
413 res = au1xxx_dbdma_put_source(hw->dma_tx_ch, (void *)hw->tx, t->len); 421 res = au1xxx_dbdma_put_source(hw->dma_tx_ch, virt_to_phys(hw->tx),
422 t->len, DDMA_FLAGS_IE);
414 if (!res) 423 if (!res)
415 dev_err(hw->dev, "tx dma put source error\n"); 424 dev_err(hw->dev, "tx dma put source error\n");
416 425
diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/coldfire_qspi.c
new file mode 100644
index 000000000000..59be3efe0636
--- /dev/null
+++ b/drivers/spi/coldfire_qspi.c
@@ -0,0 +1,640 @@
1/*
2 * Freescale/Motorola Coldfire Queued SPI driver
3 *
4 * Copyright 2010 Steven King <sfking@fdwdc.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
19 *
20*/
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/interrupt.h>
25#include <linux/errno.h>
26#include <linux/platform_device.h>
27#include <linux/workqueue.h>
28#include <linux/delay.h>
29#include <linux/io.h>
30#include <linux/clk.h>
31#include <linux/err.h>
32#include <linux/spi/spi.h>
33
34#include <asm/coldfire.h>
35#include <asm/mcfqspi.h>
36
37#define DRIVER_NAME "mcfqspi"
38
39#define MCFQSPI_BUSCLK (MCF_BUSCLK / 2)
40
41#define MCFQSPI_QMR 0x00
42#define MCFQSPI_QMR_MSTR 0x8000
43#define MCFQSPI_QMR_CPOL 0x0200
44#define MCFQSPI_QMR_CPHA 0x0100
45#define MCFQSPI_QDLYR 0x04
46#define MCFQSPI_QDLYR_SPE 0x8000
47#define MCFQSPI_QWR 0x08
48#define MCFQSPI_QWR_HALT 0x8000
49#define MCFQSPI_QWR_WREN 0x4000
50#define MCFQSPI_QWR_CSIV 0x1000
51#define MCFQSPI_QIR 0x0C
52#define MCFQSPI_QIR_WCEFB 0x8000
53#define MCFQSPI_QIR_ABRTB 0x4000
54#define MCFQSPI_QIR_ABRTL 0x1000
55#define MCFQSPI_QIR_WCEFE 0x0800
56#define MCFQSPI_QIR_ABRTE 0x0400
57#define MCFQSPI_QIR_SPIFE 0x0100
58#define MCFQSPI_QIR_WCEF 0x0008
59#define MCFQSPI_QIR_ABRT 0x0004
60#define MCFQSPI_QIR_SPIF 0x0001
61#define MCFQSPI_QAR 0x010
62#define MCFQSPI_QAR_TXBUF 0x00
63#define MCFQSPI_QAR_RXBUF 0x10
64#define MCFQSPI_QAR_CMDBUF 0x20
65#define MCFQSPI_QDR 0x014
66#define MCFQSPI_QCR 0x014
67#define MCFQSPI_QCR_CONT 0x8000
68#define MCFQSPI_QCR_BITSE 0x4000
69#define MCFQSPI_QCR_DT 0x2000
70
71struct mcfqspi {
72 void __iomem *iobase;
73 int irq;
74 struct clk *clk;
75 struct mcfqspi_cs_control *cs_control;
76
77 wait_queue_head_t waitq;
78
79 struct work_struct work;
80 struct workqueue_struct *workq;
81 spinlock_t lock;
82 struct list_head msgq;
83};
84
85static void mcfqspi_wr_qmr(struct mcfqspi *mcfqspi, u16 val)
86{
87 writew(val, mcfqspi->iobase + MCFQSPI_QMR);
88}
89
90static void mcfqspi_wr_qdlyr(struct mcfqspi *mcfqspi, u16 val)
91{
92 writew(val, mcfqspi->iobase + MCFQSPI_QDLYR);
93}
94
95static u16 mcfqspi_rd_qdlyr(struct mcfqspi *mcfqspi)
96{
97 return readw(mcfqspi->iobase + MCFQSPI_QDLYR);
98}
99
100static void mcfqspi_wr_qwr(struct mcfqspi *mcfqspi, u16 val)
101{
102 writew(val, mcfqspi->iobase + MCFQSPI_QWR);
103}
104
105static void mcfqspi_wr_qir(struct mcfqspi *mcfqspi, u16 val)
106{
107 writew(val, mcfqspi->iobase + MCFQSPI_QIR);
108}
109
110static void mcfqspi_wr_qar(struct mcfqspi *mcfqspi, u16 val)
111{
112 writew(val, mcfqspi->iobase + MCFQSPI_QAR);
113}
114
115static void mcfqspi_wr_qdr(struct mcfqspi *mcfqspi, u16 val)
116{
117 writew(val, mcfqspi->iobase + MCFQSPI_QDR);
118}
119
120static u16 mcfqspi_rd_qdr(struct mcfqspi *mcfqspi)
121{
122 return readw(mcfqspi->iobase + MCFQSPI_QDR);
123}
124
125static void mcfqspi_cs_select(struct mcfqspi *mcfqspi, u8 chip_select,
126 bool cs_high)
127{
128 mcfqspi->cs_control->select(mcfqspi->cs_control, chip_select, cs_high);
129}
130
131static void mcfqspi_cs_deselect(struct mcfqspi *mcfqspi, u8 chip_select,
132 bool cs_high)
133{
134 mcfqspi->cs_control->deselect(mcfqspi->cs_control, chip_select, cs_high);
135}
136
137static int mcfqspi_cs_setup(struct mcfqspi *mcfqspi)
138{
139 return (mcfqspi->cs_control && mcfqspi->cs_control->setup) ?
140 mcfqspi->cs_control->setup(mcfqspi->cs_control) : 0;
141}
142
143static void mcfqspi_cs_teardown(struct mcfqspi *mcfqspi)
144{
145 if (mcfqspi->cs_control && mcfqspi->cs_control->teardown)
146 mcfqspi->cs_control->teardown(mcfqspi->cs_control);
147}
148
149static u8 mcfqspi_qmr_baud(u32 speed_hz)
150{
151 return clamp((MCFQSPI_BUSCLK + speed_hz - 1) / speed_hz, 2u, 255u);
152}
153
154static bool mcfqspi_qdlyr_spe(struct mcfqspi *mcfqspi)
155{
156 return mcfqspi_rd_qdlyr(mcfqspi) & MCFQSPI_QDLYR_SPE;
157}
158
159static irqreturn_t mcfqspi_irq_handler(int this_irq, void *dev_id)
160{
161 struct mcfqspi *mcfqspi = dev_id;
162
163 /* clear interrupt */
164 mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE | MCFQSPI_QIR_SPIF);
165 wake_up(&mcfqspi->waitq);
166
167 return IRQ_HANDLED;
168}
169
170static void mcfqspi_transfer_msg8(struct mcfqspi *mcfqspi, unsigned count,
171 const u8 *txbuf, u8 *rxbuf)
172{
173 unsigned i, n, offset = 0;
174
175 n = min(count, 16u);
176
177 mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF);
178 for (i = 0; i < n; ++i)
179 mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE);
180
181 mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF);
182 if (txbuf)
183 for (i = 0; i < n; ++i)
184 mcfqspi_wr_qdr(mcfqspi, *txbuf++);
185 else
186 for (i = 0; i < count; ++i)
187 mcfqspi_wr_qdr(mcfqspi, 0);
188
189 count -= n;
190 if (count) {
191 u16 qwr = 0xf08;
192 mcfqspi_wr_qwr(mcfqspi, 0x700);
193 mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
194
195 do {
196 wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
197 mcfqspi_wr_qwr(mcfqspi, qwr);
198 mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
199 if (rxbuf) {
200 mcfqspi_wr_qar(mcfqspi,
201 MCFQSPI_QAR_RXBUF + offset);
202 for (i = 0; i < 8; ++i)
203 *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
204 }
205 n = min(count, 8u);
206 if (txbuf) {
207 mcfqspi_wr_qar(mcfqspi,
208 MCFQSPI_QAR_TXBUF + offset);
209 for (i = 0; i < n; ++i)
210 mcfqspi_wr_qdr(mcfqspi, *txbuf++);
211 }
212 qwr = (offset ? 0x808 : 0) + ((n - 1) << 8);
213 offset ^= 8;
214 count -= n;
215 } while (count);
216 wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
217 mcfqspi_wr_qwr(mcfqspi, qwr);
218 mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
219 if (rxbuf) {
220 mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
221 for (i = 0; i < 8; ++i)
222 *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
223 offset ^= 8;
224 }
225 } else {
226 mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8);
227 mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
228 }
229 wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
230 if (rxbuf) {
231 mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
232 for (i = 0; i < n; ++i)
233 *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
234 }
235}
236
237static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count,
238 const u16 *txbuf, u16 *rxbuf)
239{
240 unsigned i, n, offset = 0;
241
242 n = min(count, 16u);
243
244 mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF);
245 for (i = 0; i < n; ++i)
246 mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE);
247
248 mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF);
249 if (txbuf)
250 for (i = 0; i < n; ++i)
251 mcfqspi_wr_qdr(mcfqspi, *txbuf++);
252 else
253 for (i = 0; i < count; ++i)
254 mcfqspi_wr_qdr(mcfqspi, 0);
255
256 count -= n;
257 if (count) {
258 u16 qwr = 0xf08;
259 mcfqspi_wr_qwr(mcfqspi, 0x700);
260 mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
261
262 do {
263 wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
264 mcfqspi_wr_qwr(mcfqspi, qwr);
265 mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
266 if (rxbuf) {
267 mcfqspi_wr_qar(mcfqspi,
268 MCFQSPI_QAR_RXBUF + offset);
269 for (i = 0; i < 8; ++i)
270 *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
271 }
272 n = min(count, 8u);
273 if (txbuf) {
274 mcfqspi_wr_qar(mcfqspi,
275 MCFQSPI_QAR_TXBUF + offset);
276 for (i = 0; i < n; ++i)
277 mcfqspi_wr_qdr(mcfqspi, *txbuf++);
278 }
279 qwr = (offset ? 0x808 : 0x000) + ((n - 1) << 8);
280 offset ^= 8;
281 count -= n;
282 } while (count);
283 wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
284 mcfqspi_wr_qwr(mcfqspi, qwr);
285 mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
286 if (rxbuf) {
287 mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
288 for (i = 0; i < 8; ++i)
289 *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
290 offset ^= 8;
291 }
292 } else {
293 mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8);
294 mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
295 }
296 wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
297 if (rxbuf) {
298 mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
299 for (i = 0; i < n; ++i)
300 *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
301 }
302}
303
304static void mcfqspi_work(struct work_struct *work)
305{
306 struct mcfqspi *mcfqspi = container_of(work, struct mcfqspi, work);
307 unsigned long flags;
308
309 spin_lock_irqsave(&mcfqspi->lock, flags);
310 while (!list_empty(&mcfqspi->msgq)) {
311 struct spi_message *msg;
312 struct spi_device *spi;
313 struct spi_transfer *xfer;
314 int status = 0;
315
316 msg = container_of(mcfqspi->msgq.next, struct spi_message,
317 queue);
318
319 list_del_init(&mcfqspi->msgq);
320 spin_unlock_irqrestore(&mcfqspi->lock, flags);
321
322 spi = msg->spi;
323
324 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
325 bool cs_high = spi->mode & SPI_CS_HIGH;
326 u16 qmr = MCFQSPI_QMR_MSTR;
327
328 if (xfer->bits_per_word)
329 qmr |= xfer->bits_per_word << 10;
330 else
331 qmr |= spi->bits_per_word << 10;
332 if (spi->mode & SPI_CPHA)
333 qmr |= MCFQSPI_QMR_CPHA;
334 if (spi->mode & SPI_CPOL)
335 qmr |= MCFQSPI_QMR_CPOL;
336 if (xfer->speed_hz)
337 qmr |= mcfqspi_qmr_baud(xfer->speed_hz);
338 else
339 qmr |= mcfqspi_qmr_baud(spi->max_speed_hz);
340 mcfqspi_wr_qmr(mcfqspi, qmr);
341
342 mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high);
343
344 mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
345 if ((xfer->bits_per_word ? xfer->bits_per_word :
346 spi->bits_per_word) == 8)
347 mcfqspi_transfer_msg8(mcfqspi, xfer->len,
348 xfer->tx_buf,
349 xfer->rx_buf);
350 else
351 mcfqspi_transfer_msg16(mcfqspi, xfer->len / 2,
352 xfer->tx_buf,
353 xfer->rx_buf);
354 mcfqspi_wr_qir(mcfqspi, 0);
355
356 if (xfer->delay_usecs)
357 udelay(xfer->delay_usecs);
358 if (xfer->cs_change) {
359 if (!list_is_last(&xfer->transfer_list,
360 &msg->transfers))
361 mcfqspi_cs_deselect(mcfqspi,
362 spi->chip_select,
363 cs_high);
364 } else {
365 if (list_is_last(&xfer->transfer_list,
366 &msg->transfers))
367 mcfqspi_cs_deselect(mcfqspi,
368 spi->chip_select,
369 cs_high);
370 }
371 msg->actual_length += xfer->len;
372 }
373 msg->status = status;
374 msg->complete(msg->context);
375
376 spin_lock_irqsave(&mcfqspi->lock, flags);
377 }
378 spin_unlock_irqrestore(&mcfqspi->lock, flags);
379}
380
381static int mcfqspi_transfer(struct spi_device *spi, struct spi_message *msg)
382{
383 struct mcfqspi *mcfqspi;
384 struct spi_transfer *xfer;
385 unsigned long flags;
386
387 mcfqspi = spi_master_get_devdata(spi->master);
388
389 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
390 if (xfer->bits_per_word && ((xfer->bits_per_word < 8)
391 || (xfer->bits_per_word > 16))) {
392 dev_dbg(&spi->dev,
393 "%d bits per word is not supported\n",
394 xfer->bits_per_word);
395 goto fail;
396 }
397 if (xfer->speed_hz) {
398 u32 real_speed = MCFQSPI_BUSCLK /
399 mcfqspi_qmr_baud(xfer->speed_hz);
400 if (real_speed != xfer->speed_hz)
401 dev_dbg(&spi->dev,
402 "using speed %d instead of %d\n",
403 real_speed, xfer->speed_hz);
404 }
405 }
406 msg->status = -EINPROGRESS;
407 msg->actual_length = 0;
408
409 spin_lock_irqsave(&mcfqspi->lock, flags);
410 list_add_tail(&msg->queue, &mcfqspi->msgq);
411 queue_work(mcfqspi->workq, &mcfqspi->work);
412 spin_unlock_irqrestore(&mcfqspi->lock, flags);
413
414 return 0;
415fail:
416 msg->status = -EINVAL;
417 return -EINVAL;
418}
419
420static int mcfqspi_setup(struct spi_device *spi)
421{
422 if ((spi->bits_per_word < 8) || (spi->bits_per_word > 16)) {
423 dev_dbg(&spi->dev, "%d bits per word is not supported\n",
424 spi->bits_per_word);
425 return -EINVAL;
426 }
427 if (spi->chip_select >= spi->master->num_chipselect) {
428 dev_dbg(&spi->dev, "%d chip select is out of range\n",
429 spi->chip_select);
430 return -EINVAL;
431 }
432
433 mcfqspi_cs_deselect(spi_master_get_devdata(spi->master),
434 spi->chip_select, spi->mode & SPI_CS_HIGH);
435
436 dev_dbg(&spi->dev,
437 "bits per word %d, chip select %d, speed %d KHz\n",
438 spi->bits_per_word, spi->chip_select,
439 (MCFQSPI_BUSCLK / mcfqspi_qmr_baud(spi->max_speed_hz))
440 / 1000);
441
442 return 0;
443}
444
445static int __devinit mcfqspi_probe(struct platform_device *pdev)
446{
447 struct spi_master *master;
448 struct mcfqspi *mcfqspi;
449 struct resource *res;
450 struct mcfqspi_platform_data *pdata;
451 int status;
452
453 master = spi_alloc_master(&pdev->dev, sizeof(*mcfqspi));
454 if (master == NULL) {
455 dev_dbg(&pdev->dev, "spi_alloc_master failed\n");
456 return -ENOMEM;
457 }
458
459 mcfqspi = spi_master_get_devdata(master);
460
461 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
462 if (!res) {
463 dev_dbg(&pdev->dev, "platform_get_resource failed\n");
464 status = -ENXIO;
465 goto fail0;
466 }
467
468 if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
469 dev_dbg(&pdev->dev, "request_mem_region failed\n");
470 status = -EBUSY;
471 goto fail0;
472 }
473
474 mcfqspi->iobase = ioremap(res->start, resource_size(res));
475 if (!mcfqspi->iobase) {
476 dev_dbg(&pdev->dev, "ioremap failed\n");
477 status = -ENOMEM;
478 goto fail1;
479 }
480
481 mcfqspi->irq = platform_get_irq(pdev, 0);
482 if (mcfqspi->irq < 0) {
483 dev_dbg(&pdev->dev, "platform_get_irq failed\n");
484 status = -ENXIO;
485 goto fail2;
486 }
487
488 status = request_irq(mcfqspi->irq, mcfqspi_irq_handler, IRQF_DISABLED,
489 pdev->name, mcfqspi);
490 if (status) {
491 dev_dbg(&pdev->dev, "request_irq failed\n");
492 goto fail2;
493 }
494
495 mcfqspi->clk = clk_get(&pdev->dev, "qspi_clk");
496 if (IS_ERR(mcfqspi->clk)) {
497 dev_dbg(&pdev->dev, "clk_get failed\n");
498 status = PTR_ERR(mcfqspi->clk);
499 goto fail3;
500 }
501 clk_enable(mcfqspi->clk);
502
503 mcfqspi->workq = create_singlethread_workqueue(dev_name(master->dev.parent));
504 if (!mcfqspi->workq) {
505 dev_dbg(&pdev->dev, "create_workqueue failed\n");
506 status = -ENOMEM;
507 goto fail4;
508 }
509 INIT_WORK(&mcfqspi->work, mcfqspi_work);
510 spin_lock_init(&mcfqspi->lock);
511 INIT_LIST_HEAD(&mcfqspi->msgq);
512 init_waitqueue_head(&mcfqspi->waitq);
513
514 pdata = pdev->dev.platform_data;
515 if (!pdata) {
516 dev_dbg(&pdev->dev, "platform data is missing\n");
517 goto fail5;
518 }
519 master->bus_num = pdata->bus_num;
520 master->num_chipselect = pdata->num_chipselect;
521
522 mcfqspi->cs_control = pdata->cs_control;
523 status = mcfqspi_cs_setup(mcfqspi);
524 if (status) {
525 dev_dbg(&pdev->dev, "error initializing cs_control\n");
526 goto fail5;
527 }
528
529 master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
530 master->setup = mcfqspi_setup;
531 master->transfer = mcfqspi_transfer;
532
533 platform_set_drvdata(pdev, master);
534
535 status = spi_register_master(master);
536 if (status) {
537 dev_dbg(&pdev->dev, "spi_register_master failed\n");
538 goto fail6;
539 }
540 dev_info(&pdev->dev, "Coldfire QSPI bus driver\n");
541
542 return 0;
543
544fail6:
545 mcfqspi_cs_teardown(mcfqspi);
546fail5:
547 destroy_workqueue(mcfqspi->workq);
548fail4:
549 clk_disable(mcfqspi->clk);
550 clk_put(mcfqspi->clk);
551fail3:
552 free_irq(mcfqspi->irq, mcfqspi);
553fail2:
554 iounmap(mcfqspi->iobase);
555fail1:
556 release_mem_region(res->start, resource_size(res));
557fail0:
558 spi_master_put(master);
559
560 dev_dbg(&pdev->dev, "Coldfire QSPI probe failed\n");
561
562 return status;
563}
564
565static int __devexit mcfqspi_remove(struct platform_device *pdev)
566{
567 struct spi_master *master = platform_get_drvdata(pdev);
568 struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
569 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
570
571 /* disable the hardware (set the baud rate to 0) */
572 mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
573
574 platform_set_drvdata(pdev, NULL);
575 mcfqspi_cs_teardown(mcfqspi);
576 destroy_workqueue(mcfqspi->workq);
577 clk_disable(mcfqspi->clk);
578 clk_put(mcfqspi->clk);
579 free_irq(mcfqspi->irq, mcfqspi);
580 iounmap(mcfqspi->iobase);
581 release_mem_region(res->start, resource_size(res));
582 spi_unregister_master(master);
583 spi_master_put(master);
584
585 return 0;
586}
587
588#ifdef CONFIG_PM
589
590static int mcfqspi_suspend(struct device *dev)
591{
592 struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev));
593
594 clk_disable(mcfqspi->clk);
595
596 return 0;
597}
598
599static int mcfqspi_resume(struct device *dev)
600{
601 struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev));
602
603 clk_enable(mcfqspi->clk);
604
605 return 0;
606}
607
608static struct dev_pm_ops mcfqspi_dev_pm_ops = {
609 .suspend = mcfqspi_suspend,
610 .resume = mcfqspi_resume,
611};
612
613#define MCFQSPI_DEV_PM_OPS (&mcfqspi_dev_pm_ops)
614#else
615#define MCFQSPI_DEV_PM_OPS NULL
616#endif
617
618static struct platform_driver mcfqspi_driver = {
619 .driver.name = DRIVER_NAME,
620 .driver.owner = THIS_MODULE,
621 .driver.pm = MCFQSPI_DEV_PM_OPS,
622 .remove = __devexit_p(mcfqspi_remove),
623};
624
625static int __init mcfqspi_init(void)
626{
627 return platform_driver_probe(&mcfqspi_driver, mcfqspi_probe);
628}
629module_init(mcfqspi_init);
630
631static void __exit mcfqspi_exit(void)
632{
633 platform_driver_unregister(&mcfqspi_driver);
634}
635module_exit(mcfqspi_exit);
636
637MODULE_AUTHOR("Steven King <sfking@fdwdc.com>");
638MODULE_DESCRIPTION("Coldfire QSPI Controller Driver");
639MODULE_LICENSE("GPL");
640MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c
new file mode 100644
index 000000000000..95afb6b77395
--- /dev/null
+++ b/drivers/spi/davinci_spi.c
@@ -0,0 +1,1256 @@
1/*
2 * Copyright (C) 2009 Texas Instruments.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/gpio.h>
22#include <linux/module.h>
23#include <linux/delay.h>
24#include <linux/platform_device.h>
25#include <linux/err.h>
26#include <linux/clk.h>
27#include <linux/dma-mapping.h>
28#include <linux/spi/spi.h>
29#include <linux/spi/spi_bitbang.h>
30#include <linux/slab.h>
31
32#include <mach/spi.h>
33#include <mach/edma.h>
34
35#define SPI_NO_RESOURCE ((resource_size_t)-1)
36
37#define SPI_MAX_CHIPSELECT 2
38
39#define CS_DEFAULT 0xFF
40
41#define SPI_BUFSIZ (SMP_CACHE_BYTES + 1)
42#define DAVINCI_DMA_DATA_TYPE_S8 0x01
43#define DAVINCI_DMA_DATA_TYPE_S16 0x02
44#define DAVINCI_DMA_DATA_TYPE_S32 0x04
45
46#define SPIFMT_PHASE_MASK BIT(16)
47#define SPIFMT_POLARITY_MASK BIT(17)
48#define SPIFMT_DISTIMER_MASK BIT(18)
49#define SPIFMT_SHIFTDIR_MASK BIT(20)
50#define SPIFMT_WAITENA_MASK BIT(21)
51#define SPIFMT_PARITYENA_MASK BIT(22)
52#define SPIFMT_ODD_PARITY_MASK BIT(23)
53#define SPIFMT_WDELAY_MASK 0x3f000000u
54#define SPIFMT_WDELAY_SHIFT 24
55#define SPIFMT_CHARLEN_MASK 0x0000001Fu
56
57/* SPIGCR1 */
58#define SPIGCR1_SPIENA_MASK 0x01000000u
59
60/* SPIPC0 */
61#define SPIPC0_DIFUN_MASK BIT(11) /* MISO */
62#define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */
63#define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
64#define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */
65#define SPIPC0_EN1FUN_MASK BIT(1)
66#define SPIPC0_EN0FUN_MASK BIT(0)
67
68#define SPIINT_MASKALL 0x0101035F
69#define SPI_INTLVL_1 0x000001FFu
70#define SPI_INTLVL_0 0x00000000u
71
72/* SPIDAT1 */
73#define SPIDAT1_CSHOLD_SHIFT 28
74#define SPIDAT1_CSNR_SHIFT 16
75#define SPIGCR1_CLKMOD_MASK BIT(1)
76#define SPIGCR1_MASTER_MASK BIT(0)
77#define SPIGCR1_LOOPBACK_MASK BIT(16)
78
79/* SPIBUF */
80#define SPIBUF_TXFULL_MASK BIT(29)
81#define SPIBUF_RXEMPTY_MASK BIT(31)
82
83/* Error Masks */
84#define SPIFLG_DLEN_ERR_MASK BIT(0)
85#define SPIFLG_TIMEOUT_MASK BIT(1)
86#define SPIFLG_PARERR_MASK BIT(2)
87#define SPIFLG_DESYNC_MASK BIT(3)
88#define SPIFLG_BITERR_MASK BIT(4)
89#define SPIFLG_OVRRUN_MASK BIT(6)
90#define SPIFLG_RX_INTR_MASK BIT(8)
91#define SPIFLG_TX_INTR_MASK BIT(9)
92#define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24)
93#define SPIFLG_MASK (SPIFLG_DLEN_ERR_MASK \
94 | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \
95 | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \
96 | SPIFLG_OVRRUN_MASK | SPIFLG_RX_INTR_MASK \
97 | SPIFLG_TX_INTR_MASK \
98 | SPIFLG_BUF_INIT_ACTIVE_MASK)
99
100#define SPIINT_DLEN_ERR_INTR BIT(0)
101#define SPIINT_TIMEOUT_INTR BIT(1)
102#define SPIINT_PARERR_INTR BIT(2)
103#define SPIINT_DESYNC_INTR BIT(3)
104#define SPIINT_BITERR_INTR BIT(4)
105#define SPIINT_OVRRUN_INTR BIT(6)
106#define SPIINT_RX_INTR BIT(8)
107#define SPIINT_TX_INTR BIT(9)
108#define SPIINT_DMA_REQ_EN BIT(16)
109#define SPIINT_ENABLE_HIGHZ BIT(24)
110
111#define SPI_T2CDELAY_SHIFT 16
112#define SPI_C2TDELAY_SHIFT 24
113
114/* SPI Controller registers */
115#define SPIGCR0 0x00
116#define SPIGCR1 0x04
117#define SPIINT 0x08
118#define SPILVL 0x0c
119#define SPIFLG 0x10
120#define SPIPC0 0x14
121#define SPIPC1 0x18
122#define SPIPC2 0x1c
123#define SPIPC3 0x20
124#define SPIPC4 0x24
125#define SPIPC5 0x28
126#define SPIPC6 0x2c
127#define SPIPC7 0x30
128#define SPIPC8 0x34
129#define SPIDAT0 0x38
130#define SPIDAT1 0x3c
131#define SPIBUF 0x40
132#define SPIEMU 0x44
133#define SPIDELAY 0x48
134#define SPIDEF 0x4c
135#define SPIFMT0 0x50
136#define SPIFMT1 0x54
137#define SPIFMT2 0x58
138#define SPIFMT3 0x5c
139#define TGINTVEC0 0x60
140#define TGINTVEC1 0x64
141
142struct davinci_spi_slave {
143 u32 cmd_to_write;
144 u32 clk_ctrl_to_write;
145 u32 bytes_per_word;
146 u8 active_cs;
147};
148
149/* We have 2 DMA channels per CS, one for RX and one for TX */
150struct davinci_spi_dma {
151 int dma_tx_channel;
152 int dma_rx_channel;
153 int dma_tx_sync_dev;
154 int dma_rx_sync_dev;
155 enum dma_event_q eventq;
156
157 struct completion dma_tx_completion;
158 struct completion dma_rx_completion;
159};
160
161/* SPI Controller driver's private data. */
162struct davinci_spi {
163 struct spi_bitbang bitbang;
164 struct clk *clk;
165
166 u8 version;
167 resource_size_t pbase;
168 void __iomem *base;
169 size_t region_size;
170 u32 irq;
171 struct completion done;
172
173 const void *tx;
174 void *rx;
175 u8 *tmp_buf;
176 int count;
177 struct davinci_spi_dma *dma_channels;
178 struct davinci_spi_platform_data *pdata;
179
180 void (*get_rx)(u32 rx_data, struct davinci_spi *);
181 u32 (*get_tx)(struct davinci_spi *);
182
183 struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT];
184};
185
186static unsigned use_dma;
187
188static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi)
189{
190 u8 *rx = davinci_spi->rx;
191
192 *rx++ = (u8)data;
193 davinci_spi->rx = rx;
194}
195
196static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi)
197{
198 u16 *rx = davinci_spi->rx;
199
200 *rx++ = (u16)data;
201 davinci_spi->rx = rx;
202}
203
204static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi)
205{
206 u32 data;
207 const u8 *tx = davinci_spi->tx;
208
209 data = *tx++;
210 davinci_spi->tx = tx;
211 return data;
212}
213
214static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi)
215{
216 u32 data;
217 const u16 *tx = davinci_spi->tx;
218
219 data = *tx++;
220 davinci_spi->tx = tx;
221 return data;
222}
223
224static inline void set_io_bits(void __iomem *addr, u32 bits)
225{
226 u32 v = ioread32(addr);
227
228 v |= bits;
229 iowrite32(v, addr);
230}
231
232static inline void clear_io_bits(void __iomem *addr, u32 bits)
233{
234 u32 v = ioread32(addr);
235
236 v &= ~bits;
237 iowrite32(v, addr);
238}
239
240static inline void set_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
241{
242 set_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
243}
244
245static inline void clear_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
246{
247 clear_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
248}
249
250static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable)
251{
252 struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
253
254 if (enable)
255 set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
256 else
257 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
258}
259
260/*
261 * Interface to control the chip select signal
262 */
263static void davinci_spi_chipselect(struct spi_device *spi, int value)
264{
265 struct davinci_spi *davinci_spi;
266 struct davinci_spi_platform_data *pdata;
267 u32 data1_reg_val = 0;
268
269 davinci_spi = spi_master_get_devdata(spi->master);
270 pdata = davinci_spi->pdata;
271
272 /*
273 * Board specific chip select logic decides the polarity and cs
274 * line for the controller
275 */
276 if (value == BITBANG_CS_INACTIVE) {
277 set_io_bits(davinci_spi->base + SPIDEF, CS_DEFAULT);
278
279 data1_reg_val |= CS_DEFAULT << SPIDAT1_CSNR_SHIFT;
280 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
281
282 while ((ioread32(davinci_spi->base + SPIBUF)
283 & SPIBUF_RXEMPTY_MASK) == 0)
284 cpu_relax();
285 }
286}
287
288/**
289 * davinci_spi_setup_transfer - This functions will determine transfer method
290 * @spi: spi device on which data transfer to be done
291 * @t: spi transfer in which transfer info is filled
292 *
293 * This function determines data transfer method (8/16/32 bit transfer).
294 * It will also set the SPI Clock Control register according to
295 * SPI slave device freq.
296 */
297static int davinci_spi_setup_transfer(struct spi_device *spi,
298 struct spi_transfer *t)
299{
300
301 struct davinci_spi *davinci_spi;
302 struct davinci_spi_platform_data *pdata;
303 u8 bits_per_word = 0;
304 u32 hz = 0, prescale;
305
306 davinci_spi = spi_master_get_devdata(spi->master);
307 pdata = davinci_spi->pdata;
308
309 if (t) {
310 bits_per_word = t->bits_per_word;
311 hz = t->speed_hz;
312 }
313
314 /* if bits_per_word is not set then set it default */
315 if (!bits_per_word)
316 bits_per_word = spi->bits_per_word;
317
318 /*
319 * Assign function pointer to appropriate transfer method
320 * 8bit, 16bit or 32bit transfer
321 */
322 if (bits_per_word <= 8 && bits_per_word >= 2) {
323 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
324 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
325 davinci_spi->slave[spi->chip_select].bytes_per_word = 1;
326 } else if (bits_per_word <= 16 && bits_per_word >= 2) {
327 davinci_spi->get_rx = davinci_spi_rx_buf_u16;
328 davinci_spi->get_tx = davinci_spi_tx_buf_u16;
329 davinci_spi->slave[spi->chip_select].bytes_per_word = 2;
330 } else
331 return -EINVAL;
332
333 if (!hz)
334 hz = spi->max_speed_hz;
335
336 clear_fmt_bits(davinci_spi->base, SPIFMT_CHARLEN_MASK,
337 spi->chip_select);
338 set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f,
339 spi->chip_select);
340
341 prescale = ((clk_get_rate(davinci_spi->clk) / hz) - 1) & 0xff;
342
343 clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select);
344 set_fmt_bits(davinci_spi->base, prescale << 8, spi->chip_select);
345
346 return 0;
347}
348
349static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data)
350{
351 struct spi_device *spi = (struct spi_device *)data;
352 struct davinci_spi *davinci_spi;
353 struct davinci_spi_dma *davinci_spi_dma;
354 struct davinci_spi_platform_data *pdata;
355
356 davinci_spi = spi_master_get_devdata(spi->master);
357 davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
358 pdata = davinci_spi->pdata;
359
360 if (ch_status == DMA_COMPLETE)
361 edma_stop(davinci_spi_dma->dma_rx_channel);
362 else
363 edma_clean_channel(davinci_spi_dma->dma_rx_channel);
364
365 complete(&davinci_spi_dma->dma_rx_completion);
366 /* We must disable the DMA RX request */
367 davinci_spi_set_dma_req(spi, 0);
368}
369
370static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data)
371{
372 struct spi_device *spi = (struct spi_device *)data;
373 struct davinci_spi *davinci_spi;
374 struct davinci_spi_dma *davinci_spi_dma;
375 struct davinci_spi_platform_data *pdata;
376
377 davinci_spi = spi_master_get_devdata(spi->master);
378 davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]);
379 pdata = davinci_spi->pdata;
380
381 if (ch_status == DMA_COMPLETE)
382 edma_stop(davinci_spi_dma->dma_tx_channel);
383 else
384 edma_clean_channel(davinci_spi_dma->dma_tx_channel);
385
386 complete(&davinci_spi_dma->dma_tx_completion);
387 /* We must disable the DMA TX request */
388 davinci_spi_set_dma_req(spi, 0);
389}
390
391static int davinci_spi_request_dma(struct spi_device *spi)
392{
393 struct davinci_spi *davinci_spi;
394 struct davinci_spi_dma *davinci_spi_dma;
395 struct davinci_spi_platform_data *pdata;
396 struct device *sdev;
397 int r;
398
399 davinci_spi = spi_master_get_devdata(spi->master);
400 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
401 pdata = davinci_spi->pdata;
402 sdev = davinci_spi->bitbang.master->dev.parent;
403
404 r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev,
405 davinci_spi_dma_rx_callback, spi,
406 davinci_spi_dma->eventq);
407 if (r < 0) {
408 dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n");
409 return -EAGAIN;
410 }
411 davinci_spi_dma->dma_rx_channel = r;
412 r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev,
413 davinci_spi_dma_tx_callback, spi,
414 davinci_spi_dma->eventq);
415 if (r < 0) {
416 edma_free_channel(davinci_spi_dma->dma_rx_channel);
417 davinci_spi_dma->dma_rx_channel = -1;
418 dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n");
419 return -EAGAIN;
420 }
421 davinci_spi_dma->dma_tx_channel = r;
422
423 return 0;
424}
425
426/**
427 * davinci_spi_setup - This functions will set default transfer method
428 * @spi: spi device on which data transfer to be done
429 *
430 * This functions sets the default transfer method.
431 */
432
433static int davinci_spi_setup(struct spi_device *spi)
434{
435 int retval;
436 struct davinci_spi *davinci_spi;
437 struct davinci_spi_dma *davinci_spi_dma;
438 struct device *sdev;
439
440 davinci_spi = spi_master_get_devdata(spi->master);
441 sdev = davinci_spi->bitbang.master->dev.parent;
442
443 /* if bits per word length is zero then set it default 8 */
444 if (!spi->bits_per_word)
445 spi->bits_per_word = 8;
446
447 davinci_spi->slave[spi->chip_select].cmd_to_write = 0;
448
449 if (use_dma && davinci_spi->dma_channels) {
450 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
451
452 if ((davinci_spi_dma->dma_rx_channel == -1)
453 || (davinci_spi_dma->dma_tx_channel == -1)) {
454 retval = davinci_spi_request_dma(spi);
455 if (retval < 0)
456 return retval;
457 }
458 }
459
460 /*
461 * SPI in DaVinci and DA8xx operate between
462 * 600 KHz and 50 MHz
463 */
464 if (spi->max_speed_hz < 600000 || spi->max_speed_hz > 50000000) {
465 dev_dbg(sdev, "Operating frequency is not in acceptable "
466 "range\n");
467 return -EINVAL;
468 }
469
470 /*
471 * Set up SPIFMTn register, unique to this chipselect.
472 *
473 * NOTE: we could do all of these with one write. Also, some
474 * of the "version 2" features are found in chips that don't
475 * support all of them...
476 */
477 if (spi->mode & SPI_LSB_FIRST)
478 set_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
479 spi->chip_select);
480 else
481 clear_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
482 spi->chip_select);
483
484 if (spi->mode & SPI_CPOL)
485 set_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
486 spi->chip_select);
487 else
488 clear_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
489 spi->chip_select);
490
491 if (!(spi->mode & SPI_CPHA))
492 set_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
493 spi->chip_select);
494 else
495 clear_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
496 spi->chip_select);
497
498 /*
499 * Version 1 hardware supports two basic SPI modes:
500 * - Standard SPI mode uses 4 pins, with chipselect
501 * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
502 * (distinct from SPI_3WIRE, with just one data wire;
503 * or similar variants without MOSI or without MISO)
504 *
505 * Version 2 hardware supports an optional handshaking signal,
506 * so it can support two more modes:
507 * - 5 pin SPI variant is standard SPI plus SPI_READY
508 * - 4 pin with enable is (SPI_READY | SPI_NO_CS)
509 */
510
511 if (davinci_spi->version == SPI_VERSION_2) {
512 clear_fmt_bits(davinci_spi->base, SPIFMT_WDELAY_MASK,
513 spi->chip_select);
514 set_fmt_bits(davinci_spi->base,
515 (davinci_spi->pdata->wdelay
516 << SPIFMT_WDELAY_SHIFT)
517 & SPIFMT_WDELAY_MASK,
518 spi->chip_select);
519
520 if (davinci_spi->pdata->odd_parity)
521 set_fmt_bits(davinci_spi->base,
522 SPIFMT_ODD_PARITY_MASK,
523 spi->chip_select);
524 else
525 clear_fmt_bits(davinci_spi->base,
526 SPIFMT_ODD_PARITY_MASK,
527 spi->chip_select);
528
529 if (davinci_spi->pdata->parity_enable)
530 set_fmt_bits(davinci_spi->base,
531 SPIFMT_PARITYENA_MASK,
532 spi->chip_select);
533 else
534 clear_fmt_bits(davinci_spi->base,
535 SPIFMT_PARITYENA_MASK,
536 spi->chip_select);
537
538 if (davinci_spi->pdata->wait_enable)
539 set_fmt_bits(davinci_spi->base,
540 SPIFMT_WAITENA_MASK,
541 spi->chip_select);
542 else
543 clear_fmt_bits(davinci_spi->base,
544 SPIFMT_WAITENA_MASK,
545 spi->chip_select);
546
547 if (davinci_spi->pdata->timer_disable)
548 set_fmt_bits(davinci_spi->base,
549 SPIFMT_DISTIMER_MASK,
550 spi->chip_select);
551 else
552 clear_fmt_bits(davinci_spi->base,
553 SPIFMT_DISTIMER_MASK,
554 spi->chip_select);
555 }
556
557 retval = davinci_spi_setup_transfer(spi, NULL);
558
559 return retval;
560}
561
562static void davinci_spi_cleanup(struct spi_device *spi)
563{
564 struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
565 struct davinci_spi_dma *davinci_spi_dma;
566
567 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
568
569 if (use_dma && davinci_spi->dma_channels) {
570 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
571
572 if ((davinci_spi_dma->dma_rx_channel != -1)
573 && (davinci_spi_dma->dma_tx_channel != -1)) {
574 edma_free_channel(davinci_spi_dma->dma_tx_channel);
575 edma_free_channel(davinci_spi_dma->dma_rx_channel);
576 }
577 }
578}
579
580static int davinci_spi_bufs_prep(struct spi_device *spi,
581 struct davinci_spi *davinci_spi)
582{
583 int op_mode = 0;
584
585 /*
586 * REVISIT unless devices disagree about SPI_LOOP or
587 * SPI_READY (SPI_NO_CS only allows one device!), this
588 * should not need to be done before each message...
589 * optimize for both flags staying cleared.
590 */
591
592 op_mode = SPIPC0_DIFUN_MASK
593 | SPIPC0_DOFUN_MASK
594 | SPIPC0_CLKFUN_MASK;
595 if (!(spi->mode & SPI_NO_CS))
596 op_mode |= 1 << spi->chip_select;
597 if (spi->mode & SPI_READY)
598 op_mode |= SPIPC0_SPIENA_MASK;
599
600 iowrite32(op_mode, davinci_spi->base + SPIPC0);
601
602 if (spi->mode & SPI_LOOP)
603 set_io_bits(davinci_spi->base + SPIGCR1,
604 SPIGCR1_LOOPBACK_MASK);
605 else
606 clear_io_bits(davinci_spi->base + SPIGCR1,
607 SPIGCR1_LOOPBACK_MASK);
608
609 return 0;
610}
611
612static int davinci_spi_check_error(struct davinci_spi *davinci_spi,
613 int int_status)
614{
615 struct device *sdev = davinci_spi->bitbang.master->dev.parent;
616
617 if (int_status & SPIFLG_TIMEOUT_MASK) {
618 dev_dbg(sdev, "SPI Time-out Error\n");
619 return -ETIMEDOUT;
620 }
621 if (int_status & SPIFLG_DESYNC_MASK) {
622 dev_dbg(sdev, "SPI Desynchronization Error\n");
623 return -EIO;
624 }
625 if (int_status & SPIFLG_BITERR_MASK) {
626 dev_dbg(sdev, "SPI Bit error\n");
627 return -EIO;
628 }
629
630 if (davinci_spi->version == SPI_VERSION_2) {
631 if (int_status & SPIFLG_DLEN_ERR_MASK) {
632 dev_dbg(sdev, "SPI Data Length Error\n");
633 return -EIO;
634 }
635 if (int_status & SPIFLG_PARERR_MASK) {
636 dev_dbg(sdev, "SPI Parity Error\n");
637 return -EIO;
638 }
639 if (int_status & SPIFLG_OVRRUN_MASK) {
640 dev_dbg(sdev, "SPI Data Overrun error\n");
641 return -EIO;
642 }
643 if (int_status & SPIFLG_TX_INTR_MASK) {
644 dev_dbg(sdev, "SPI TX intr bit set\n");
645 return -EIO;
646 }
647 if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
648 dev_dbg(sdev, "SPI Buffer Init Active\n");
649 return -EBUSY;
650 }
651 }
652
653 return 0;
654}
655
656/**
657 * davinci_spi_bufs - functions which will handle transfer data
658 * @spi: spi device on which data transfer to be done
659 * @t: spi transfer in which transfer info is filled
660 *
661 * This function will put data to be transferred into data register
662 * of SPI controller and then wait until the completion will be marked
663 * by the IRQ Handler.
664 */
665static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t)
666{
667 struct davinci_spi *davinci_spi;
668 int int_status, count, ret;
669 u8 conv, tmp;
670 u32 tx_data, data1_reg_val;
671 u32 buf_val, flg_val;
672 struct davinci_spi_platform_data *pdata;
673
674 davinci_spi = spi_master_get_devdata(spi->master);
675 pdata = davinci_spi->pdata;
676
677 davinci_spi->tx = t->tx_buf;
678 davinci_spi->rx = t->rx_buf;
679
680 /* convert len to words based on bits_per_word */
681 conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
682 davinci_spi->count = t->len / conv;
683
684 INIT_COMPLETION(davinci_spi->done);
685
686 ret = davinci_spi_bufs_prep(spi, davinci_spi);
687 if (ret)
688 return ret;
689
690 /* Enable SPI */
691 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
692
693 iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
694 (pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
695 davinci_spi->base + SPIDELAY);
696
697 count = davinci_spi->count;
698 data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;
699 tmp = ~(0x1 << spi->chip_select);
700
701 clear_io_bits(davinci_spi->base + SPIDEF, ~tmp);
702
703 data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;
704
705 while ((ioread32(davinci_spi->base + SPIBUF)
706 & SPIBUF_RXEMPTY_MASK) == 0)
707 cpu_relax();
708
709 /* Determine the command to execute READ or WRITE */
710 if (t->tx_buf) {
711 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
712
713 while (1) {
714 tx_data = davinci_spi->get_tx(davinci_spi);
715
716 data1_reg_val &= ~(0xFFFF);
717 data1_reg_val |= (0xFFFF & tx_data);
718
719 buf_val = ioread32(davinci_spi->base + SPIBUF);
720 if ((buf_val & SPIBUF_TXFULL_MASK) == 0) {
721 iowrite32(data1_reg_val,
722 davinci_spi->base + SPIDAT1);
723
724 count--;
725 }
726 while (ioread32(davinci_spi->base + SPIBUF)
727 & SPIBUF_RXEMPTY_MASK)
728 cpu_relax();
729
730 /* getting the returned byte */
731 if (t->rx_buf) {
732 buf_val = ioread32(davinci_spi->base + SPIBUF);
733 davinci_spi->get_rx(buf_val, davinci_spi);
734 }
735 if (count <= 0)
736 break;
737 }
738 } else {
739 if (pdata->poll_mode) {
740 while (1) {
741 /* keeps the serial clock going */
742 if ((ioread32(davinci_spi->base + SPIBUF)
743 & SPIBUF_TXFULL_MASK) == 0)
744 iowrite32(data1_reg_val,
745 davinci_spi->base + SPIDAT1);
746
747 while (ioread32(davinci_spi->base + SPIBUF) &
748 SPIBUF_RXEMPTY_MASK)
749 cpu_relax();
750
751 flg_val = ioread32(davinci_spi->base + SPIFLG);
752 buf_val = ioread32(davinci_spi->base + SPIBUF);
753
754 davinci_spi->get_rx(buf_val, davinci_spi);
755
756 count--;
757 if (count <= 0)
758 break;
759 }
760 } else { /* Receive in Interrupt mode */
761 int i;
762
763 for (i = 0; i < davinci_spi->count; i++) {
764 set_io_bits(davinci_spi->base + SPIINT,
765 SPIINT_BITERR_INTR
766 | SPIINT_OVRRUN_INTR
767 | SPIINT_RX_INTR);
768
769 iowrite32(data1_reg_val,
770 davinci_spi->base + SPIDAT1);
771
772 while (ioread32(davinci_spi->base + SPIINT) &
773 SPIINT_RX_INTR)
774 cpu_relax();
775 }
776 iowrite32((data1_reg_val & 0x0ffcffff),
777 davinci_spi->base + SPIDAT1);
778 }
779 }
780
781 /*
782 * Check for bit error, desync error,parity error,timeout error and
783 * receive overflow errors
784 */
785 int_status = ioread32(davinci_spi->base + SPIFLG);
786
787 ret = davinci_spi_check_error(davinci_spi, int_status);
788 if (ret != 0)
789 return ret;
790
791 /* SPI Framework maintains the count only in bytes so convert back */
792 davinci_spi->count *= conv;
793
794 return t->len;
795}
796
797#define DAVINCI_DMA_DATA_TYPE_S8 0x01
798#define DAVINCI_DMA_DATA_TYPE_S16 0x02
799#define DAVINCI_DMA_DATA_TYPE_S32 0x04
800
801static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t)
802{
803 struct davinci_spi *davinci_spi;
804 int int_status = 0;
805 int count, temp_count;
806 u8 conv = 1;
807 u8 tmp;
808 u32 data1_reg_val;
809 struct davinci_spi_dma *davinci_spi_dma;
810 int word_len, data_type, ret;
811 unsigned long tx_reg, rx_reg;
812 struct davinci_spi_platform_data *pdata;
813 struct device *sdev;
814
815 davinci_spi = spi_master_get_devdata(spi->master);
816 pdata = davinci_spi->pdata;
817 sdev = davinci_spi->bitbang.master->dev.parent;
818
819 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
820
821 tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
822 rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;
823
824 davinci_spi->tx = t->tx_buf;
825 davinci_spi->rx = t->rx_buf;
826
827 /* convert len to words based on bits_per_word */
828 conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
829 davinci_spi->count = t->len / conv;
830
831 INIT_COMPLETION(davinci_spi->done);
832
833 init_completion(&davinci_spi_dma->dma_rx_completion);
834 init_completion(&davinci_spi_dma->dma_tx_completion);
835
836 word_len = conv * 8;
837
838 if (word_len <= 8)
839 data_type = DAVINCI_DMA_DATA_TYPE_S8;
840 else if (word_len <= 16)
841 data_type = DAVINCI_DMA_DATA_TYPE_S16;
842 else if (word_len <= 32)
843 data_type = DAVINCI_DMA_DATA_TYPE_S32;
844 else
845 return -EINVAL;
846
847 ret = davinci_spi_bufs_prep(spi, davinci_spi);
848 if (ret)
849 return ret;
850
851 /* Put delay val if required */
852 iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
853 (pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
854 davinci_spi->base + SPIDELAY);
855
856 count = davinci_spi->count; /* the number of elements */
857 data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;
858
859 /* CS default = 0xFF */
860 tmp = ~(0x1 << spi->chip_select);
861
862 clear_io_bits(davinci_spi->base + SPIDEF, ~tmp);
863
864 data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;
865
866 /* disable all interrupts for dma transfers */
867 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL);
868 /* Disable SPI to write configuration bits in SPIDAT */
869 clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
870 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
871 /* Enable SPI */
872 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
873
874 while ((ioread32(davinci_spi->base + SPIBUF)
875 & SPIBUF_RXEMPTY_MASK) == 0)
876 cpu_relax();
877
878
879 if (t->tx_buf) {
880 t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count,
881 DMA_TO_DEVICE);
882 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
883 dev_dbg(sdev, "Unable to DMA map a %d bytes"
884 " TX buffer\n", count);
885 return -ENOMEM;
886 }
887 temp_count = count;
888 } else {
889 /* We need TX clocking for RX transaction */
890 t->tx_dma = dma_map_single(&spi->dev,
891 (void *)davinci_spi->tmp_buf, count + 1,
892 DMA_TO_DEVICE);
893 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
894 dev_dbg(sdev, "Unable to DMA map a %d bytes"
895 " TX tmp buffer\n", count);
896 return -ENOMEM;
897 }
898 temp_count = count + 1;
899 }
900
901 edma_set_transfer_params(davinci_spi_dma->dma_tx_channel,
902 data_type, temp_count, 1, 0, ASYNC);
903 edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT);
904 edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT);
905 edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0);
906 edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0);
907
908 if (t->rx_buf) {
909 /* initiate transaction */
910 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
911
912 t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count,
913 DMA_FROM_DEVICE);
914 if (dma_mapping_error(&spi->dev, t->rx_dma)) {
915 dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
916 count);
917 if (t->tx_buf != NULL)
918 dma_unmap_single(NULL, t->tx_dma,
919 count, DMA_TO_DEVICE);
920 return -ENOMEM;
921 }
922 edma_set_transfer_params(davinci_spi_dma->dma_rx_channel,
923 data_type, count, 1, 0, ASYNC);
924 edma_set_src(davinci_spi_dma->dma_rx_channel,
925 rx_reg, INCR, W8BIT);
926 edma_set_dest(davinci_spi_dma->dma_rx_channel,
927 t->rx_dma, INCR, W8BIT);
928 edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0);
929 edma_set_dest_index(davinci_spi_dma->dma_rx_channel,
930 data_type, 0);
931 }
932
933 if ((t->tx_buf) || (t->rx_buf))
934 edma_start(davinci_spi_dma->dma_tx_channel);
935
936 if (t->rx_buf)
937 edma_start(davinci_spi_dma->dma_rx_channel);
938
939 if ((t->rx_buf) || (t->tx_buf))
940 davinci_spi_set_dma_req(spi, 1);
941
942 if (t->tx_buf)
943 wait_for_completion_interruptible(
944 &davinci_spi_dma->dma_tx_completion);
945
946 if (t->rx_buf)
947 wait_for_completion_interruptible(
948 &davinci_spi_dma->dma_rx_completion);
949
950 dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE);
951
952 if (t->rx_buf)
953 dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE);
954
955 /*
956 * Check for bit error, desync error,parity error,timeout error and
957 * receive overflow errors
958 */
959 int_status = ioread32(davinci_spi->base + SPIFLG);
960
961 ret = davinci_spi_check_error(davinci_spi, int_status);
962 if (ret != 0)
963 return ret;
964
965 /* SPI Framework maintains the count only in bytes so convert back */
966 davinci_spi->count *= conv;
967
968 return t->len;
969}
970
971/**
972 * davinci_spi_irq - IRQ handler for DaVinci SPI
973 * @irq: IRQ number for this SPI Master
974 * @context_data: structure for SPI Master controller davinci_spi
975 */
976static irqreturn_t davinci_spi_irq(s32 irq, void *context_data)
977{
978 struct davinci_spi *davinci_spi = context_data;
979 u32 int_status, rx_data = 0;
980 irqreturn_t ret = IRQ_NONE;
981
982 int_status = ioread32(davinci_spi->base + SPIFLG);
983
984 while ((int_status & SPIFLG_RX_INTR_MASK)) {
985 if (likely(int_status & SPIFLG_RX_INTR_MASK)) {
986 ret = IRQ_HANDLED;
987
988 rx_data = ioread32(davinci_spi->base + SPIBUF);
989 davinci_spi->get_rx(rx_data, davinci_spi);
990
991 /* Disable Receive Interrupt */
992 iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR),
993 davinci_spi->base + SPIINT);
994 } else
995 (void)davinci_spi_check_error(davinci_spi, int_status);
996
997 int_status = ioread32(davinci_spi->base + SPIFLG);
998 }
999
1000 return ret;
1001}
1002
1003/**
1004 * davinci_spi_probe - probe function for SPI Master Controller
1005 * @pdev: platform_device structure which contains plateform specific data
1006 */
1007static int davinci_spi_probe(struct platform_device *pdev)
1008{
1009 struct spi_master *master;
1010 struct davinci_spi *davinci_spi;
1011 struct davinci_spi_platform_data *pdata;
1012 struct resource *r, *mem;
1013 resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
1014 resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
1015 resource_size_t dma_eventq = SPI_NO_RESOURCE;
1016 int i = 0, ret = 0;
1017
1018 pdata = pdev->dev.platform_data;
1019 if (pdata == NULL) {
1020 ret = -ENODEV;
1021 goto err;
1022 }
1023
1024 master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
1025 if (master == NULL) {
1026 ret = -ENOMEM;
1027 goto err;
1028 }
1029
1030 dev_set_drvdata(&pdev->dev, master);
1031
1032 davinci_spi = spi_master_get_devdata(master);
1033 if (davinci_spi == NULL) {
1034 ret = -ENOENT;
1035 goto free_master;
1036 }
1037
1038 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1039 if (r == NULL) {
1040 ret = -ENOENT;
1041 goto free_master;
1042 }
1043
1044 davinci_spi->pbase = r->start;
1045 davinci_spi->region_size = resource_size(r);
1046 davinci_spi->pdata = pdata;
1047
1048 mem = request_mem_region(r->start, davinci_spi->region_size,
1049 pdev->name);
1050 if (mem == NULL) {
1051 ret = -EBUSY;
1052 goto free_master;
1053 }
1054
1055 davinci_spi->base = (struct davinci_spi_reg __iomem *)
1056 ioremap(r->start, davinci_spi->region_size);
1057 if (davinci_spi->base == NULL) {
1058 ret = -ENOMEM;
1059 goto release_region;
1060 }
1061
1062 davinci_spi->irq = platform_get_irq(pdev, 0);
1063 if (davinci_spi->irq <= 0) {
1064 ret = -EINVAL;
1065 goto unmap_io;
1066 }
1067
1068 ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED,
1069 dev_name(&pdev->dev), davinci_spi);
1070 if (ret)
1071 goto unmap_io;
1072
1073 /* Allocate tmp_buf for tx_buf */
1074 davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL);
1075 if (davinci_spi->tmp_buf == NULL) {
1076 ret = -ENOMEM;
1077 goto irq_free;
1078 }
1079
1080 davinci_spi->bitbang.master = spi_master_get(master);
1081 if (davinci_spi->bitbang.master == NULL) {
1082 ret = -ENODEV;
1083 goto free_tmp_buf;
1084 }
1085
1086 davinci_spi->clk = clk_get(&pdev->dev, NULL);
1087 if (IS_ERR(davinci_spi->clk)) {
1088 ret = -ENODEV;
1089 goto put_master;
1090 }
1091 clk_enable(davinci_spi->clk);
1092
1093
1094 master->bus_num = pdev->id;
1095 master->num_chipselect = pdata->num_chipselect;
1096 master->setup = davinci_spi_setup;
1097 master->cleanup = davinci_spi_cleanup;
1098
1099 davinci_spi->bitbang.chipselect = davinci_spi_chipselect;
1100 davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer;
1101
1102 davinci_spi->version = pdata->version;
1103 use_dma = pdata->use_dma;
1104
1105 davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
1106 if (davinci_spi->version == SPI_VERSION_2)
1107 davinci_spi->bitbang.flags |= SPI_READY;
1108
1109 if (use_dma) {
1110 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1111 if (r)
1112 dma_rx_chan = r->start;
1113 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1114 if (r)
1115 dma_tx_chan = r->start;
1116 r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
1117 if (r)
1118 dma_eventq = r->start;
1119 }
1120
1121 if (!use_dma ||
1122 dma_rx_chan == SPI_NO_RESOURCE ||
1123 dma_tx_chan == SPI_NO_RESOURCE ||
1124 dma_eventq == SPI_NO_RESOURCE) {
1125 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio;
1126 use_dma = 0;
1127 } else {
1128 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma;
1129 davinci_spi->dma_channels = kzalloc(master->num_chipselect
1130 * sizeof(struct davinci_spi_dma), GFP_KERNEL);
1131 if (davinci_spi->dma_channels == NULL) {
1132 ret = -ENOMEM;
1133 goto free_clk;
1134 }
1135
1136 for (i = 0; i < master->num_chipselect; i++) {
1137 davinci_spi->dma_channels[i].dma_rx_channel = -1;
1138 davinci_spi->dma_channels[i].dma_rx_sync_dev =
1139 dma_rx_chan;
1140 davinci_spi->dma_channels[i].dma_tx_channel = -1;
1141 davinci_spi->dma_channels[i].dma_tx_sync_dev =
1142 dma_tx_chan;
1143 davinci_spi->dma_channels[i].eventq = dma_eventq;
1144 }
1145 dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n"
1146 "Using RX channel = %d , TX channel = %d and "
1147 "event queue = %d", dma_rx_chan, dma_tx_chan,
1148 dma_eventq);
1149 }
1150
1151 davinci_spi->get_rx = davinci_spi_rx_buf_u8;
1152 davinci_spi->get_tx = davinci_spi_tx_buf_u8;
1153
1154 init_completion(&davinci_spi->done);
1155
1156 /* Reset In/OUT SPI module */
1157 iowrite32(0, davinci_spi->base + SPIGCR0);
1158 udelay(100);
1159 iowrite32(1, davinci_spi->base + SPIGCR0);
1160
1161 /* Clock internal */
1162 if (davinci_spi->pdata->clk_internal)
1163 set_io_bits(davinci_spi->base + SPIGCR1,
1164 SPIGCR1_CLKMOD_MASK);
1165 else
1166 clear_io_bits(davinci_spi->base + SPIGCR1,
1167 SPIGCR1_CLKMOD_MASK);
1168
1169 /* master mode default */
1170 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
1171
1172 if (davinci_spi->pdata->intr_level)
1173 iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL);
1174 else
1175 iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL);
1176
1177 ret = spi_bitbang_start(&davinci_spi->bitbang);
1178 if (ret)
1179 goto free_clk;
1180
1181 dev_info(&pdev->dev, "Controller at 0x%p \n", davinci_spi->base);
1182
1183 if (!pdata->poll_mode)
1184 dev_info(&pdev->dev, "Operating in interrupt mode"
1185 " using IRQ %d\n", davinci_spi->irq);
1186
1187 return ret;
1188
1189free_clk:
1190 clk_disable(davinci_spi->clk);
1191 clk_put(davinci_spi->clk);
1192put_master:
1193 spi_master_put(master);
1194free_tmp_buf:
1195 kfree(davinci_spi->tmp_buf);
1196irq_free:
1197 free_irq(davinci_spi->irq, davinci_spi);
1198unmap_io:
1199 iounmap(davinci_spi->base);
1200release_region:
1201 release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1202free_master:
1203 kfree(master);
1204err:
1205 return ret;
1206}
1207
1208/**
1209 * davinci_spi_remove - remove function for SPI Master Controller
1210 * @pdev: platform_device structure which contains plateform specific data
1211 *
1212 * This function will do the reverse action of davinci_spi_probe function
1213 * It will free the IRQ and SPI controller's memory region.
1214 * It will also call spi_bitbang_stop to destroy the work queue which was
1215 * created by spi_bitbang_start.
1216 */
1217static int __exit davinci_spi_remove(struct platform_device *pdev)
1218{
1219 struct davinci_spi *davinci_spi;
1220 struct spi_master *master;
1221
1222 master = dev_get_drvdata(&pdev->dev);
1223 davinci_spi = spi_master_get_devdata(master);
1224
1225 spi_bitbang_stop(&davinci_spi->bitbang);
1226
1227 clk_disable(davinci_spi->clk);
1228 clk_put(davinci_spi->clk);
1229 spi_master_put(master);
1230 kfree(davinci_spi->tmp_buf);
1231 free_irq(davinci_spi->irq, davinci_spi);
1232 iounmap(davinci_spi->base);
1233 release_mem_region(davinci_spi->pbase, davinci_spi->region_size);
1234
1235 return 0;
1236}
1237
1238static struct platform_driver davinci_spi_driver = {
1239 .driver.name = "spi_davinci",
1240 .remove = __exit_p(davinci_spi_remove),
1241};
1242
1243static int __init davinci_spi_init(void)
1244{
1245 return platform_driver_probe(&davinci_spi_driver, davinci_spi_probe);
1246}
1247module_init(davinci_spi_init);
1248
1249static void __exit davinci_spi_exit(void)
1250{
1251 platform_driver_unregister(&davinci_spi_driver);
1252}
1253module_exit(davinci_spi_exit);
1254
1255MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
1256MODULE_LICENSE("GPL");
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
new file mode 100644
index 000000000000..d256cb00604c
--- /dev/null
+++ b/drivers/spi/dw_spi.c
@@ -0,0 +1,990 @@
1/*
2 * dw_spi.c - Designware SPI core controller driver (refer pxa2xx_spi.c)
3 *
4 * Copyright (c) 2009, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#include <linux/dma-mapping.h>
21#include <linux/interrupt.h>
22#include <linux/highmem.h>
23#include <linux/delay.h>
24#include <linux/slab.h>
25
26#include <linux/spi/dw_spi.h>
27#include <linux/spi/spi.h>
28
29#ifdef CONFIG_DEBUG_FS
30#include <linux/debugfs.h>
31#endif
32
33#define START_STATE ((void *)0)
34#define RUNNING_STATE ((void *)1)
35#define DONE_STATE ((void *)2)
36#define ERROR_STATE ((void *)-1)
37
38#define QUEUE_RUNNING 0
39#define QUEUE_STOPPED 1
40
41#define MRST_SPI_DEASSERT 0
42#define MRST_SPI_ASSERT 1
43
44/* Slave spi_dev related */
45struct chip_data {
46 u16 cr0;
47 u8 cs; /* chip select pin */
48 u8 n_bytes; /* current is a 1/2/4 byte op */
49 u8 tmode; /* TR/TO/RO/EEPROM */
50 u8 type; /* SPI/SSP/MicroWire */
51
52 u8 poll_mode; /* 1 means use poll mode */
53
54 u32 dma_width;
55 u32 rx_threshold;
56 u32 tx_threshold;
57 u8 enable_dma;
58 u8 bits_per_word;
59 u16 clk_div; /* baud rate divider */
60 u32 speed_hz; /* baud rate */
61 int (*write)(struct dw_spi *dws);
62 int (*read)(struct dw_spi *dws);
63 void (*cs_control)(u32 command);
64};
65
66#ifdef CONFIG_DEBUG_FS
67static int spi_show_regs_open(struct inode *inode, struct file *file)
68{
69 file->private_data = inode->i_private;
70 return 0;
71}
72
73#define SPI_REGS_BUFSIZE 1024
74static ssize_t spi_show_regs(struct file *file, char __user *user_buf,
75 size_t count, loff_t *ppos)
76{
77 struct dw_spi *dws;
78 char *buf;
79 u32 len = 0;
80 ssize_t ret;
81
82 dws = file->private_data;
83
84 buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
85 if (!buf)
86 return 0;
87
88 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
89 "MRST SPI0 registers:\n");
90 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
91 "=================================\n");
92 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
93 "CTRL0: \t\t0x%08x\n", dw_readl(dws, ctrl0));
94 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
95 "CTRL1: \t\t0x%08x\n", dw_readl(dws, ctrl1));
96 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
97 "SSIENR: \t0x%08x\n", dw_readl(dws, ssienr));
98 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
99 "SER: \t\t0x%08x\n", dw_readl(dws, ser));
100 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
101 "BAUDR: \t\t0x%08x\n", dw_readl(dws, baudr));
102 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
103 "TXFTLR: \t0x%08x\n", dw_readl(dws, txfltr));
104 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
105 "RXFTLR: \t0x%08x\n", dw_readl(dws, rxfltr));
106 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
107 "TXFLR: \t\t0x%08x\n", dw_readl(dws, txflr));
108 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
109 "RXFLR: \t\t0x%08x\n", dw_readl(dws, rxflr));
110 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
111 "SR: \t\t0x%08x\n", dw_readl(dws, sr));
112 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
113 "IMR: \t\t0x%08x\n", dw_readl(dws, imr));
114 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
115 "ISR: \t\t0x%08x\n", dw_readl(dws, isr));
116 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
117 "DMACR: \t\t0x%08x\n", dw_readl(dws, dmacr));
118 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
119 "DMATDLR: \t0x%08x\n", dw_readl(dws, dmatdlr));
120 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
121 "DMARDLR: \t0x%08x\n", dw_readl(dws, dmardlr));
122 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
123 "=================================\n");
124
125 ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
126 kfree(buf);
127 return ret;
128}
129
130static const struct file_operations mrst_spi_regs_ops = {
131 .owner = THIS_MODULE,
132 .open = spi_show_regs_open,
133 .read = spi_show_regs,
134};
135
136static int mrst_spi_debugfs_init(struct dw_spi *dws)
137{
138 dws->debugfs = debugfs_create_dir("mrst_spi", NULL);
139 if (!dws->debugfs)
140 return -ENOMEM;
141
142 debugfs_create_file("registers", S_IFREG | S_IRUGO,
143 dws->debugfs, (void *)dws, &mrst_spi_regs_ops);
144 return 0;
145}
146
147static void mrst_spi_debugfs_remove(struct dw_spi *dws)
148{
149 if (dws->debugfs)
150 debugfs_remove_recursive(dws->debugfs);
151}
152
153#else
154static inline int mrst_spi_debugfs_init(struct dw_spi *dws)
155{
156 return 0;
157}
158
159static inline void mrst_spi_debugfs_remove(struct dw_spi *dws)
160{
161}
162#endif /* CONFIG_DEBUG_FS */
163
164static void wait_till_not_busy(struct dw_spi *dws)
165{
166 unsigned long end = jiffies + 1 + usecs_to_jiffies(1000);
167
168 while (time_before(jiffies, end)) {
169 if (!(dw_readw(dws, sr) & SR_BUSY))
170 return;
171 }
172 dev_err(&dws->master->dev,
173 "DW SPI: Status keeps busy for 1000us after a read/write!\n");
174}
175
176static void flush(struct dw_spi *dws)
177{
178 while (dw_readw(dws, sr) & SR_RF_NOT_EMPT)
179 dw_readw(dws, dr);
180
181 wait_till_not_busy(dws);
182}
183
184static void null_cs_control(u32 command)
185{
186}
187
188static int null_writer(struct dw_spi *dws)
189{
190 u8 n_bytes = dws->n_bytes;
191
192 if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
193 || (dws->tx == dws->tx_end))
194 return 0;
195 dw_writew(dws, dr, 0);
196 dws->tx += n_bytes;
197
198 wait_till_not_busy(dws);
199 return 1;
200}
201
202static int null_reader(struct dw_spi *dws)
203{
204 u8 n_bytes = dws->n_bytes;
205
206 while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
207 && (dws->rx < dws->rx_end)) {
208 dw_readw(dws, dr);
209 dws->rx += n_bytes;
210 }
211 wait_till_not_busy(dws);
212 return dws->rx == dws->rx_end;
213}
214
215static int u8_writer(struct dw_spi *dws)
216{
217 if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
218 || (dws->tx == dws->tx_end))
219 return 0;
220
221 dw_writew(dws, dr, *(u8 *)(dws->tx));
222 ++dws->tx;
223
224 wait_till_not_busy(dws);
225 return 1;
226}
227
228static int u8_reader(struct dw_spi *dws)
229{
230 while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
231 && (dws->rx < dws->rx_end)) {
232 *(u8 *)(dws->rx) = dw_readw(dws, dr);
233 ++dws->rx;
234 }
235
236 wait_till_not_busy(dws);
237 return dws->rx == dws->rx_end;
238}
239
240static int u16_writer(struct dw_spi *dws)
241{
242 if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
243 || (dws->tx == dws->tx_end))
244 return 0;
245
246 dw_writew(dws, dr, *(u16 *)(dws->tx));
247 dws->tx += 2;
248
249 wait_till_not_busy(dws);
250 return 1;
251}
252
253static int u16_reader(struct dw_spi *dws)
254{
255 u16 temp;
256
257 while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
258 && (dws->rx < dws->rx_end)) {
259 temp = dw_readw(dws, dr);
260 *(u16 *)(dws->rx) = temp;
261 dws->rx += 2;
262 }
263
264 wait_till_not_busy(dws);
265 return dws->rx == dws->rx_end;
266}
267
268static void *next_transfer(struct dw_spi *dws)
269{
270 struct spi_message *msg = dws->cur_msg;
271 struct spi_transfer *trans = dws->cur_transfer;
272
273 /* Move to next transfer */
274 if (trans->transfer_list.next != &msg->transfers) {
275 dws->cur_transfer =
276 list_entry(trans->transfer_list.next,
277 struct spi_transfer,
278 transfer_list);
279 return RUNNING_STATE;
280 } else
281 return DONE_STATE;
282}
283
284/*
285 * Note: first step is the protocol driver prepares
286 * a dma-capable memory, and this func just need translate
287 * the virt addr to physical
288 */
289static int map_dma_buffers(struct dw_spi *dws)
290{
291 if (!dws->cur_msg->is_dma_mapped || !dws->dma_inited
292 || !dws->cur_chip->enable_dma)
293 return 0;
294
295 if (dws->cur_transfer->tx_dma)
296 dws->tx_dma = dws->cur_transfer->tx_dma;
297
298 if (dws->cur_transfer->rx_dma)
299 dws->rx_dma = dws->cur_transfer->rx_dma;
300
301 return 1;
302}
303
304/* Caller already set message->status; dma and pio irqs are blocked */
305static void giveback(struct dw_spi *dws)
306{
307 struct spi_transfer *last_transfer;
308 unsigned long flags;
309 struct spi_message *msg;
310
311 spin_lock_irqsave(&dws->lock, flags);
312 msg = dws->cur_msg;
313 dws->cur_msg = NULL;
314 dws->cur_transfer = NULL;
315 dws->prev_chip = dws->cur_chip;
316 dws->cur_chip = NULL;
317 dws->dma_mapped = 0;
318 queue_work(dws->workqueue, &dws->pump_messages);
319 spin_unlock_irqrestore(&dws->lock, flags);
320
321 last_transfer = list_entry(msg->transfers.prev,
322 struct spi_transfer,
323 transfer_list);
324
325 if (!last_transfer->cs_change)
326 dws->cs_control(MRST_SPI_DEASSERT);
327
328 msg->state = NULL;
329 if (msg->complete)
330 msg->complete(msg->context);
331}
332
333static void int_error_stop(struct dw_spi *dws, const char *msg)
334{
335 /* Stop and reset hw */
336 flush(dws);
337 spi_enable_chip(dws, 0);
338
339 dev_err(&dws->master->dev, "%s\n", msg);
340 dws->cur_msg->state = ERROR_STATE;
341 tasklet_schedule(&dws->pump_transfers);
342}
343
344static void transfer_complete(struct dw_spi *dws)
345{
346 /* Update total byte transfered return count actual bytes read */
347 dws->cur_msg->actual_length += dws->len;
348
349 /* Move to next transfer */
350 dws->cur_msg->state = next_transfer(dws);
351
352 /* Handle end of message */
353 if (dws->cur_msg->state == DONE_STATE) {
354 dws->cur_msg->status = 0;
355 giveback(dws);
356 } else
357 tasklet_schedule(&dws->pump_transfers);
358}
359
360static irqreturn_t interrupt_transfer(struct dw_spi *dws)
361{
362 u16 irq_status, irq_mask = 0x3f;
363 u32 int_level = dws->fifo_len / 2;
364 u32 left;
365
366 irq_status = dw_readw(dws, isr) & irq_mask;
367 /* Error handling */
368 if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
369 dw_readw(dws, txoicr);
370 dw_readw(dws, rxoicr);
371 dw_readw(dws, rxuicr);
372 int_error_stop(dws, "interrupt_transfer: fifo overrun");
373 return IRQ_HANDLED;
374 }
375
376 if (irq_status & SPI_INT_TXEI) {
377 spi_mask_intr(dws, SPI_INT_TXEI);
378
379 left = (dws->tx_end - dws->tx) / dws->n_bytes;
380 left = (left > int_level) ? int_level : left;
381
382 while (left--)
383 dws->write(dws);
384 dws->read(dws);
385
386 /* Re-enable the IRQ if there is still data left to tx */
387 if (dws->tx_end > dws->tx)
388 spi_umask_intr(dws, SPI_INT_TXEI);
389 else
390 transfer_complete(dws);
391 }
392
393 return IRQ_HANDLED;
394}
395
396static irqreturn_t dw_spi_irq(int irq, void *dev_id)
397{
398 struct dw_spi *dws = dev_id;
399
400 if (!dws->cur_msg) {
401 spi_mask_intr(dws, SPI_INT_TXEI);
402 /* Never fail */
403 return IRQ_HANDLED;
404 }
405
406 return dws->transfer_handler(dws);
407}
408
409/* Must be called inside pump_transfers() */
410static void poll_transfer(struct dw_spi *dws)
411{
412 while (dws->write(dws))
413 dws->read(dws);
414
415 transfer_complete(dws);
416}
417
418static void dma_transfer(struct dw_spi *dws, int cs_change)
419{
420}
421
422static void pump_transfers(unsigned long data)
423{
424 struct dw_spi *dws = (struct dw_spi *)data;
425 struct spi_message *message = NULL;
426 struct spi_transfer *transfer = NULL;
427 struct spi_transfer *previous = NULL;
428 struct spi_device *spi = NULL;
429 struct chip_data *chip = NULL;
430 u8 bits = 0;
431 u8 imask = 0;
432 u8 cs_change = 0;
433 u16 txint_level = 0;
434 u16 clk_div = 0;
435 u32 speed = 0;
436 u32 cr0 = 0;
437
438 /* Get current state information */
439 message = dws->cur_msg;
440 transfer = dws->cur_transfer;
441 chip = dws->cur_chip;
442 spi = message->spi;
443
444 if (unlikely(!chip->clk_div))
445 chip->clk_div = dws->max_freq / chip->speed_hz;
446
447 if (message->state == ERROR_STATE) {
448 message->status = -EIO;
449 goto early_exit;
450 }
451
452 /* Handle end of message */
453 if (message->state == DONE_STATE) {
454 message->status = 0;
455 goto early_exit;
456 }
457
458 /* Delay if requested at end of transfer*/
459 if (message->state == RUNNING_STATE) {
460 previous = list_entry(transfer->transfer_list.prev,
461 struct spi_transfer,
462 transfer_list);
463 if (previous->delay_usecs)
464 udelay(previous->delay_usecs);
465 }
466
467 dws->n_bytes = chip->n_bytes;
468 dws->dma_width = chip->dma_width;
469 dws->cs_control = chip->cs_control;
470
471 dws->rx_dma = transfer->rx_dma;
472 dws->tx_dma = transfer->tx_dma;
473 dws->tx = (void *)transfer->tx_buf;
474 dws->tx_end = dws->tx + transfer->len;
475 dws->rx = transfer->rx_buf;
476 dws->rx_end = dws->rx + transfer->len;
477 dws->write = dws->tx ? chip->write : null_writer;
478 dws->read = dws->rx ? chip->read : null_reader;
479 dws->cs_change = transfer->cs_change;
480 dws->len = dws->cur_transfer->len;
481 if (chip != dws->prev_chip)
482 cs_change = 1;
483
484 cr0 = chip->cr0;
485
486 /* Handle per transfer options for bpw and speed */
487 if (transfer->speed_hz) {
488 speed = chip->speed_hz;
489
490 if (transfer->speed_hz != speed) {
491 speed = transfer->speed_hz;
492 if (speed > dws->max_freq) {
493 printk(KERN_ERR "MRST SPI0: unsupported"
494 "freq: %dHz\n", speed);
495 message->status = -EIO;
496 goto early_exit;
497 }
498
499 /* clk_div doesn't support odd number */
500 clk_div = dws->max_freq / speed;
501 clk_div = (clk_div + 1) & 0xfffe;
502
503 chip->speed_hz = speed;
504 chip->clk_div = clk_div;
505 }
506 }
507 if (transfer->bits_per_word) {
508 bits = transfer->bits_per_word;
509
510 switch (bits) {
511 case 8:
512 dws->n_bytes = 1;
513 dws->dma_width = 1;
514 dws->read = (dws->read != null_reader) ?
515 u8_reader : null_reader;
516 dws->write = (dws->write != null_writer) ?
517 u8_writer : null_writer;
518 break;
519 case 16:
520 dws->n_bytes = 2;
521 dws->dma_width = 2;
522 dws->read = (dws->read != null_reader) ?
523 u16_reader : null_reader;
524 dws->write = (dws->write != null_writer) ?
525 u16_writer : null_writer;
526 break;
527 default:
528 printk(KERN_ERR "MRST SPI0: unsupported bits:"
529 "%db\n", bits);
530 message->status = -EIO;
531 goto early_exit;
532 }
533
534 cr0 = (bits - 1)
535 | (chip->type << SPI_FRF_OFFSET)
536 | (spi->mode << SPI_MODE_OFFSET)
537 | (chip->tmode << SPI_TMOD_OFFSET);
538 }
539 message->state = RUNNING_STATE;
540
541 /*
542 * Adjust transfer mode if necessary. Requires platform dependent
543 * chipselect mechanism.
544 */
545 if (dws->cs_control) {
546 if (dws->rx && dws->tx)
547 chip->tmode = 0x00;
548 else if (dws->rx)
549 chip->tmode = 0x02;
550 else
551 chip->tmode = 0x01;
552
553 cr0 &= ~(0x3 << SPI_MODE_OFFSET);
554 cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
555 }
556
557 /* Check if current transfer is a DMA transaction */
558 dws->dma_mapped = map_dma_buffers(dws);
559
560 /*
561 * Interrupt mode
562 * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
563 */
564 if (!dws->dma_mapped && !chip->poll_mode) {
565 int templen = dws->len / dws->n_bytes;
566 txint_level = dws->fifo_len / 2;
567 txint_level = (templen > txint_level) ? txint_level : templen;
568
569 imask |= SPI_INT_TXEI;
570 dws->transfer_handler = interrupt_transfer;
571 }
572
573 /*
574 * Reprogram registers only if
575 * 1. chip select changes
576 * 2. clk_div is changed
577 * 3. control value changes
578 */
579 if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div || imask) {
580 spi_enable_chip(dws, 0);
581
582 if (dw_readw(dws, ctrl0) != cr0)
583 dw_writew(dws, ctrl0, cr0);
584
585 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
586 spi_chip_sel(dws, spi->chip_select);
587
588 /* Set the interrupt mask, for poll mode just diable all int */
589 spi_mask_intr(dws, 0xff);
590 if (imask)
591 spi_umask_intr(dws, imask);
592 if (txint_level)
593 dw_writew(dws, txfltr, txint_level);
594
595 spi_enable_chip(dws, 1);
596 if (cs_change)
597 dws->prev_chip = chip;
598 }
599
600 if (dws->dma_mapped)
601 dma_transfer(dws, cs_change);
602
603 if (chip->poll_mode)
604 poll_transfer(dws);
605
606 return;
607
608early_exit:
609 giveback(dws);
610 return;
611}
612
613static void pump_messages(struct work_struct *work)
614{
615 struct dw_spi *dws =
616 container_of(work, struct dw_spi, pump_messages);
617 unsigned long flags;
618
619 /* Lock queue and check for queue work */
620 spin_lock_irqsave(&dws->lock, flags);
621 if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {
622 dws->busy = 0;
623 spin_unlock_irqrestore(&dws->lock, flags);
624 return;
625 }
626
627 /* Make sure we are not already running a message */
628 if (dws->cur_msg) {
629 spin_unlock_irqrestore(&dws->lock, flags);
630 return;
631 }
632
633 /* Extract head of queue */
634 dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);
635 list_del_init(&dws->cur_msg->queue);
636
637 /* Initial message state*/
638 dws->cur_msg->state = START_STATE;
639 dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
640 struct spi_transfer,
641 transfer_list);
642 dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
643
644 /* Mark as busy and launch transfers */
645 tasklet_schedule(&dws->pump_transfers);
646
647 dws->busy = 1;
648 spin_unlock_irqrestore(&dws->lock, flags);
649}
650
651/* spi_device use this to queue in their spi_msg */
652static int dw_spi_transfer(struct spi_device *spi, struct spi_message *msg)
653{
654 struct dw_spi *dws = spi_master_get_devdata(spi->master);
655 unsigned long flags;
656
657 spin_lock_irqsave(&dws->lock, flags);
658
659 if (dws->run == QUEUE_STOPPED) {
660 spin_unlock_irqrestore(&dws->lock, flags);
661 return -ESHUTDOWN;
662 }
663
664 msg->actual_length = 0;
665 msg->status = -EINPROGRESS;
666 msg->state = START_STATE;
667
668 list_add_tail(&msg->queue, &dws->queue);
669
670 if (dws->run == QUEUE_RUNNING && !dws->busy) {
671
672 if (dws->cur_transfer || dws->cur_msg)
673 queue_work(dws->workqueue,
674 &dws->pump_messages);
675 else {
676 /* If no other data transaction in air, just go */
677 spin_unlock_irqrestore(&dws->lock, flags);
678 pump_messages(&dws->pump_messages);
679 return 0;
680 }
681 }
682
683 spin_unlock_irqrestore(&dws->lock, flags);
684 return 0;
685}
686
687/* This may be called twice for each spi dev */
688static int dw_spi_setup(struct spi_device *spi)
689{
690 struct dw_spi_chip *chip_info = NULL;
691 struct chip_data *chip;
692
693 if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
694 return -EINVAL;
695
696 /* Only alloc on first setup */
697 chip = spi_get_ctldata(spi);
698 if (!chip) {
699 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
700 if (!chip)
701 return -ENOMEM;
702
703 chip->cs_control = null_cs_control;
704 chip->enable_dma = 0;
705 }
706
707 /*
708 * Protocol drivers may change the chip settings, so...
709 * if chip_info exists, use it
710 */
711 chip_info = spi->controller_data;
712
713 /* chip_info doesn't always exist */
714 if (chip_info) {
715 if (chip_info->cs_control)
716 chip->cs_control = chip_info->cs_control;
717
718 chip->poll_mode = chip_info->poll_mode;
719 chip->type = chip_info->type;
720
721 chip->rx_threshold = 0;
722 chip->tx_threshold = 0;
723
724 chip->enable_dma = chip_info->enable_dma;
725 }
726
727 if (spi->bits_per_word <= 8) {
728 chip->n_bytes = 1;
729 chip->dma_width = 1;
730 chip->read = u8_reader;
731 chip->write = u8_writer;
732 } else if (spi->bits_per_word <= 16) {
733 chip->n_bytes = 2;
734 chip->dma_width = 2;
735 chip->read = u16_reader;
736 chip->write = u16_writer;
737 } else {
738 /* Never take >16b case for MRST SPIC */
739 dev_err(&spi->dev, "invalid wordsize\n");
740 return -EINVAL;
741 }
742 chip->bits_per_word = spi->bits_per_word;
743
744 if (!spi->max_speed_hz) {
745 dev_err(&spi->dev, "No max speed HZ parameter\n");
746 return -EINVAL;
747 }
748 chip->speed_hz = spi->max_speed_hz;
749
750 chip->tmode = 0; /* Tx & Rx */
751 /* Default SPI mode is SCPOL = 0, SCPH = 0 */
752 chip->cr0 = (chip->bits_per_word - 1)
753 | (chip->type << SPI_FRF_OFFSET)
754 | (spi->mode << SPI_MODE_OFFSET)
755 | (chip->tmode << SPI_TMOD_OFFSET);
756
757 spi_set_ctldata(spi, chip);
758 return 0;
759}
760
761static void dw_spi_cleanup(struct spi_device *spi)
762{
763 struct chip_data *chip = spi_get_ctldata(spi);
764 kfree(chip);
765}
766
767static int __devinit init_queue(struct dw_spi *dws)
768{
769 INIT_LIST_HEAD(&dws->queue);
770 spin_lock_init(&dws->lock);
771
772 dws->run = QUEUE_STOPPED;
773 dws->busy = 0;
774
775 tasklet_init(&dws->pump_transfers,
776 pump_transfers, (unsigned long)dws);
777
778 INIT_WORK(&dws->pump_messages, pump_messages);
779 dws->workqueue = create_singlethread_workqueue(
780 dev_name(dws->master->dev.parent));
781 if (dws->workqueue == NULL)
782 return -EBUSY;
783
784 return 0;
785}
786
787static int start_queue(struct dw_spi *dws)
788{
789 unsigned long flags;
790
791 spin_lock_irqsave(&dws->lock, flags);
792
793 if (dws->run == QUEUE_RUNNING || dws->busy) {
794 spin_unlock_irqrestore(&dws->lock, flags);
795 return -EBUSY;
796 }
797
798 dws->run = QUEUE_RUNNING;
799 dws->cur_msg = NULL;
800 dws->cur_transfer = NULL;
801 dws->cur_chip = NULL;
802 dws->prev_chip = NULL;
803 spin_unlock_irqrestore(&dws->lock, flags);
804
805 queue_work(dws->workqueue, &dws->pump_messages);
806
807 return 0;
808}
809
810static int stop_queue(struct dw_spi *dws)
811{
812 unsigned long flags;
813 unsigned limit = 50;
814 int status = 0;
815
816 spin_lock_irqsave(&dws->lock, flags);
817 dws->run = QUEUE_STOPPED;
818 while (!list_empty(&dws->queue) && dws->busy && limit--) {
819 spin_unlock_irqrestore(&dws->lock, flags);
820 msleep(10);
821 spin_lock_irqsave(&dws->lock, flags);
822 }
823
824 if (!list_empty(&dws->queue) || dws->busy)
825 status = -EBUSY;
826 spin_unlock_irqrestore(&dws->lock, flags);
827
828 return status;
829}
830
831static int destroy_queue(struct dw_spi *dws)
832{
833 int status;
834
835 status = stop_queue(dws);
836 if (status != 0)
837 return status;
838 destroy_workqueue(dws->workqueue);
839 return 0;
840}
841
842/* Restart the controller, disable all interrupts, clean rx fifo */
843static void spi_hw_init(struct dw_spi *dws)
844{
845 spi_enable_chip(dws, 0);
846 spi_mask_intr(dws, 0xff);
847 spi_enable_chip(dws, 1);
848 flush(dws);
849
850 /*
851 * Try to detect the FIFO depth if not set by interface driver,
852 * the depth could be from 2 to 256 from HW spec
853 */
854 if (!dws->fifo_len) {
855 u32 fifo;
856 for (fifo = 2; fifo <= 257; fifo++) {
857 dw_writew(dws, txfltr, fifo);
858 if (fifo != dw_readw(dws, txfltr))
859 break;
860 }
861
862 dws->fifo_len = (fifo == 257) ? 0 : fifo;
863 dw_writew(dws, txfltr, 0);
864 }
865}
866
867int __devinit dw_spi_add_host(struct dw_spi *dws)
868{
869 struct spi_master *master;
870 int ret;
871
872 BUG_ON(dws == NULL);
873
874 master = spi_alloc_master(dws->parent_dev, 0);
875 if (!master) {
876 ret = -ENOMEM;
877 goto exit;
878 }
879
880 dws->master = master;
881 dws->type = SSI_MOTO_SPI;
882 dws->prev_chip = NULL;
883 dws->dma_inited = 0;
884 dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
885
886 ret = request_irq(dws->irq, dw_spi_irq, 0,
887 "dw_spi", dws);
888 if (ret < 0) {
889 dev_err(&master->dev, "can not get IRQ\n");
890 goto err_free_master;
891 }
892
893 master->mode_bits = SPI_CPOL | SPI_CPHA;
894 master->bus_num = dws->bus_num;
895 master->num_chipselect = dws->num_cs;
896 master->cleanup = dw_spi_cleanup;
897 master->setup = dw_spi_setup;
898 master->transfer = dw_spi_transfer;
899
900 dws->dma_inited = 0;
901
902 /* Basic HW init */
903 spi_hw_init(dws);
904
905 /* Initial and start queue */
906 ret = init_queue(dws);
907 if (ret) {
908 dev_err(&master->dev, "problem initializing queue\n");
909 goto err_diable_hw;
910 }
911 ret = start_queue(dws);
912 if (ret) {
913 dev_err(&master->dev, "problem starting queue\n");
914 goto err_diable_hw;
915 }
916
917 spi_master_set_devdata(master, dws);
918 ret = spi_register_master(master);
919 if (ret) {
920 dev_err(&master->dev, "problem registering spi master\n");
921 goto err_queue_alloc;
922 }
923
924 mrst_spi_debugfs_init(dws);
925 return 0;
926
927err_queue_alloc:
928 destroy_queue(dws);
929err_diable_hw:
930 spi_enable_chip(dws, 0);
931 free_irq(dws->irq, dws);
932err_free_master:
933 spi_master_put(master);
934exit:
935 return ret;
936}
937EXPORT_SYMBOL(dw_spi_add_host);
938
939void __devexit dw_spi_remove_host(struct dw_spi *dws)
940{
941 int status = 0;
942
943 if (!dws)
944 return;
945 mrst_spi_debugfs_remove(dws);
946
947 /* Remove the queue */
948 status = destroy_queue(dws);
949 if (status != 0)
950 dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not "
951 "complete, message memory not freed\n");
952
953 spi_enable_chip(dws, 0);
954 /* Disable clk */
955 spi_set_clk(dws, 0);
956 free_irq(dws->irq, dws);
957
958 /* Disconnect from the SPI framework */
959 spi_unregister_master(dws->master);
960}
961EXPORT_SYMBOL(dw_spi_remove_host);
962
963int dw_spi_suspend_host(struct dw_spi *dws)
964{
965 int ret = 0;
966
967 ret = stop_queue(dws);
968 if (ret)
969 return ret;
970 spi_enable_chip(dws, 0);
971 spi_set_clk(dws, 0);
972 return ret;
973}
974EXPORT_SYMBOL(dw_spi_suspend_host);
975
976int dw_spi_resume_host(struct dw_spi *dws)
977{
978 int ret;
979
980 spi_hw_init(dws);
981 ret = start_queue(dws);
982 if (ret)
983 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
984 return ret;
985}
986EXPORT_SYMBOL(dw_spi_resume_host);
987
988MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
989MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
990MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/dw_spi_mmio.c
new file mode 100644
index 000000000000..db35bd9c1b24
--- /dev/null
+++ b/drivers/spi/dw_spi_mmio.c
@@ -0,0 +1,148 @@
1/*
2 * dw_spi_mmio.c - Memory-mapped interface driver for DW SPI Core
3 *
4 * Copyright (c) 2010, Octasic semiconductor.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 */
10
11#include <linux/clk.h>
12#include <linux/interrupt.h>
13#include <linux/platform_device.h>
14#include <linux/slab.h>
15#include <linux/spi/dw_spi.h>
16#include <linux/spi/spi.h>
17
18#define DRIVER_NAME "dw_spi_mmio"
19
20struct dw_spi_mmio {
21 struct dw_spi dws;
22 struct clk *clk;
23};
24
25static int __devinit dw_spi_mmio_probe(struct platform_device *pdev)
26{
27 struct dw_spi_mmio *dwsmmio;
28 struct dw_spi *dws;
29 struct resource *mem, *ioarea;
30 int ret;
31
32 dwsmmio = kzalloc(sizeof(struct dw_spi_mmio), GFP_KERNEL);
33 if (!dwsmmio) {
34 ret = -ENOMEM;
35 goto err_end;
36 }
37
38 dws = &dwsmmio->dws;
39
40 /* Get basic io resource and map it */
41 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
42 if (!mem) {
43 dev_err(&pdev->dev, "no mem resource?\n");
44 ret = -EINVAL;
45 goto err_kfree;
46 }
47
48 ioarea = request_mem_region(mem->start, resource_size(mem),
49 pdev->name);
50 if (!ioarea) {
51 dev_err(&pdev->dev, "SPI region already claimed\n");
52 ret = -EBUSY;
53 goto err_kfree;
54 }
55
56 dws->regs = ioremap_nocache(mem->start, resource_size(mem));
57 if (!dws->regs) {
58 dev_err(&pdev->dev, "SPI region already mapped\n");
59 ret = -ENOMEM;
60 goto err_release_reg;
61 }
62
63 dws->irq = platform_get_irq(pdev, 0);
64 if (dws->irq < 0) {
65 dev_err(&pdev->dev, "no irq resource?\n");
66 ret = dws->irq; /* -ENXIO */
67 goto err_unmap;
68 }
69
70 dwsmmio->clk = clk_get(&pdev->dev, NULL);
71 if (!dwsmmio->clk) {
72 ret = -ENODEV;
73 goto err_irq;
74 }
75 clk_enable(dwsmmio->clk);
76
77 dws->parent_dev = &pdev->dev;
78 dws->bus_num = 0;
79 dws->num_cs = 4;
80 dws->max_freq = clk_get_rate(dwsmmio->clk);
81
82 ret = dw_spi_add_host(dws);
83 if (ret)
84 goto err_clk;
85
86 platform_set_drvdata(pdev, dwsmmio);
87 return 0;
88
89err_clk:
90 clk_disable(dwsmmio->clk);
91 clk_put(dwsmmio->clk);
92 dwsmmio->clk = NULL;
93err_irq:
94 free_irq(dws->irq, dws);
95err_unmap:
96 iounmap(dws->regs);
97err_release_reg:
98 release_mem_region(mem->start, resource_size(mem));
99err_kfree:
100 kfree(dwsmmio);
101err_end:
102 return ret;
103}
104
105static int __devexit dw_spi_mmio_remove(struct platform_device *pdev)
106{
107 struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
108 struct resource *mem;
109
110 platform_set_drvdata(pdev, NULL);
111
112 clk_disable(dwsmmio->clk);
113 clk_put(dwsmmio->clk);
114 dwsmmio->clk = NULL;
115
116 free_irq(dwsmmio->dws.irq, &dwsmmio->dws);
117 dw_spi_remove_host(&dwsmmio->dws);
118 iounmap(dwsmmio->dws.regs);
119 kfree(dwsmmio);
120
121 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
122 release_mem_region(mem->start, resource_size(mem));
123 return 0;
124}
125
126static struct platform_driver dw_spi_mmio_driver = {
127 .remove = __devexit_p(dw_spi_mmio_remove),
128 .driver = {
129 .name = DRIVER_NAME,
130 .owner = THIS_MODULE,
131 },
132};
133
134static int __init dw_spi_mmio_init(void)
135{
136 return platform_driver_probe(&dw_spi_mmio_driver, dw_spi_mmio_probe);
137}
138module_init(dw_spi_mmio_init);
139
140static void __exit dw_spi_mmio_exit(void)
141{
142 platform_driver_unregister(&dw_spi_mmio_driver);
143}
144module_exit(dw_spi_mmio_exit);
145
146MODULE_AUTHOR("Jean-Hugues Deschenes <jean-hugues.deschenes@octasic.com>");
147MODULE_DESCRIPTION("Memory-mapped I/O interface driver for DW SPI Core");
148MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/dw_spi_pci.c b/drivers/spi/dw_spi_pci.c
new file mode 100644
index 000000000000..1f52755dc878
--- /dev/null
+++ b/drivers/spi/dw_spi_pci.c
@@ -0,0 +1,172 @@
1/*
2 * mrst_spi_pci.c - PCI interface driver for DW SPI Core
3 *
4 * Copyright (c) 2009, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#include <linux/slab.h>
23#include <linux/spi/dw_spi.h>
24#include <linux/spi/spi.h>
25
26#define DRIVER_NAME "dw_spi_pci"
27
28struct dw_spi_pci {
29 struct pci_dev *pdev;
30 struct dw_spi dws;
31};
32
33static int __devinit spi_pci_probe(struct pci_dev *pdev,
34 const struct pci_device_id *ent)
35{
36 struct dw_spi_pci *dwpci;
37 struct dw_spi *dws;
38 int pci_bar = 0;
39 int ret;
40
41 printk(KERN_INFO "DW: found PCI SPI controller(ID: %04x:%04x)\n",
42 pdev->vendor, pdev->device);
43
44 ret = pci_enable_device(pdev);
45 if (ret)
46 return ret;
47
48 dwpci = kzalloc(sizeof(struct dw_spi_pci), GFP_KERNEL);
49 if (!dwpci) {
50 ret = -ENOMEM;
51 goto err_disable;
52 }
53
54 dwpci->pdev = pdev;
55 dws = &dwpci->dws;
56
57 /* Get basic io resource and map it */
58 dws->paddr = pci_resource_start(pdev, pci_bar);
59 dws->iolen = pci_resource_len(pdev, pci_bar);
60
61 ret = pci_request_region(pdev, pci_bar, dev_name(&pdev->dev));
62 if (ret)
63 goto err_kfree;
64
65 dws->regs = ioremap_nocache((unsigned long)dws->paddr,
66 pci_resource_len(pdev, pci_bar));
67 if (!dws->regs) {
68 ret = -ENOMEM;
69 goto err_release_reg;
70 }
71
72 dws->parent_dev = &pdev->dev;
73 dws->bus_num = 0;
74 dws->num_cs = 4;
75 dws->max_freq = 25000000; /* for Moorestwon */
76 dws->irq = pdev->irq;
77 dws->fifo_len = 40; /* FIFO has 40 words buffer */
78
79 ret = dw_spi_add_host(dws);
80 if (ret)
81 goto err_unmap;
82
83 /* PCI hook and SPI hook use the same drv data */
84 pci_set_drvdata(pdev, dwpci);
85 return 0;
86
87err_unmap:
88 iounmap(dws->regs);
89err_release_reg:
90 pci_release_region(pdev, pci_bar);
91err_kfree:
92 kfree(dwpci);
93err_disable:
94 pci_disable_device(pdev);
95 return ret;
96}
97
98static void __devexit spi_pci_remove(struct pci_dev *pdev)
99{
100 struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
101
102 pci_set_drvdata(pdev, NULL);
103 dw_spi_remove_host(&dwpci->dws);
104 iounmap(dwpci->dws.regs);
105 pci_release_region(pdev, 0);
106 kfree(dwpci);
107 pci_disable_device(pdev);
108}
109
110#ifdef CONFIG_PM
111static int spi_suspend(struct pci_dev *pdev, pm_message_t state)
112{
113 struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
114 int ret;
115
116 ret = dw_spi_suspend_host(&dwpci->dws);
117 if (ret)
118 return ret;
119 pci_save_state(pdev);
120 pci_disable_device(pdev);
121 pci_set_power_state(pdev, pci_choose_state(pdev, state));
122 return ret;
123}
124
125static int spi_resume(struct pci_dev *pdev)
126{
127 struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
128 int ret;
129
130 pci_set_power_state(pdev, PCI_D0);
131 pci_restore_state(pdev);
132 ret = pci_enable_device(pdev);
133 if (ret)
134 return ret;
135 return dw_spi_resume_host(&dwpci->dws);
136}
137#else
138#define spi_suspend NULL
139#define spi_resume NULL
140#endif
141
142static const struct pci_device_id pci_ids[] __devinitdata = {
143 /* Intel Moorestown platform SPI controller 0 */
144 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
145 {},
146};
147
148static struct pci_driver dw_spi_driver = {
149 .name = DRIVER_NAME,
150 .id_table = pci_ids,
151 .probe = spi_pci_probe,
152 .remove = __devexit_p(spi_pci_remove),
153 .suspend = spi_suspend,
154 .resume = spi_resume,
155};
156
157static int __init mrst_spi_init(void)
158{
159 return pci_register_driver(&dw_spi_driver);
160}
161
162static void __exit mrst_spi_exit(void)
163{
164 pci_unregister_driver(&dw_spi_driver);
165}
166
167module_init(mrst_spi_init);
168module_exit(mrst_spi_exit);
169
170MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
171MODULE_DESCRIPTION("PCI interface driver for DW SPI Core");
172MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 1b74d5ca03f3..77d4cc88edea 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -17,12 +17,14 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/of_platform.h> 19#include <linux/of_platform.h>
20#include <linux/of_spi.h>
20#include <linux/workqueue.h> 21#include <linux/workqueue.h>
21#include <linux/completion.h> 22#include <linux/completion.h>
22#include <linux/io.h> 23#include <linux/io.h>
23#include <linux/delay.h> 24#include <linux/delay.h>
24#include <linux/spi/spi.h> 25#include <linux/spi/spi.h>
25#include <linux/fsl_devices.h> 26#include <linux/fsl_devices.h>
27#include <linux/slab.h>
26 28
27#include <asm/mpc52xx.h> 29#include <asm/mpc52xx.h>
28#include <asm/mpc52xx_psc.h> 30#include <asm/mpc52xx_psc.h>
@@ -313,11 +315,13 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps)
313 struct mpc52xx_psc __iomem *psc = mps->psc; 315 struct mpc52xx_psc __iomem *psc = mps->psc;
314 struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo; 316 struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo;
315 u32 mclken_div; 317 u32 mclken_div;
316 int ret = 0; 318 int ret;
317 319
318 /* default sysclk is 512MHz */ 320 /* default sysclk is 512MHz */
319 mclken_div = (mps->sysclk ? mps->sysclk : 512000000) / MCLK; 321 mclken_div = (mps->sysclk ? mps->sysclk : 512000000) / MCLK;
320 mpc52xx_set_psc_clkdiv(psc_id, mclken_div); 322 ret = mpc52xx_set_psc_clkdiv(psc_id, mclken_div);
323 if (ret)
324 return ret;
321 325
322 /* Reset the PSC into a known state */ 326 /* Reset the PSC into a known state */
323 out_8(&psc->command, MPC52xx_PSC_RST_RX); 327 out_8(&psc->command, MPC52xx_PSC_RST_RX);
@@ -341,7 +345,7 @@ static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps)
341 345
342 mps->bits_per_word = 8; 346 mps->bits_per_word = 8;
343 347
344 return ret; 348 return 0;
345} 349}
346 350
347static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id) 351static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id)
@@ -410,8 +414,10 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
410 goto free_master; 414 goto free_master;
411 415
412 ret = mpc52xx_psc_spi_port_config(master->bus_num, mps); 416 ret = mpc52xx_psc_spi_port_config(master->bus_num, mps);
413 if (ret < 0) 417 if (ret < 0) {
418 dev_err(dev, "can't configure PSC! Is it capable of SPI?\n");
414 goto free_irq; 419 goto free_irq;
420 }
415 421
416 spin_lock_init(&mps->lock); 422 spin_lock_init(&mps->lock);
417 init_completion(&mps->done); 423 init_completion(&mps->done);
@@ -464,10 +470,11 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op,
464 const u32 *regaddr_p; 470 const u32 *regaddr_p;
465 u64 regaddr64, size64; 471 u64 regaddr64, size64;
466 s16 id = -1; 472 s16 id = -1;
473 int rc;
467 474
468 regaddr_p = of_get_address(op->node, 0, &size64, NULL); 475 regaddr_p = of_get_address(op->node, 0, &size64, NULL);
469 if (!regaddr_p) { 476 if (!regaddr_p) {
470 printk(KERN_ERR "Invalid PSC address\n"); 477 dev_err(&op->dev, "Invalid PSC address\n");
471 return -EINVAL; 478 return -EINVAL;
472 } 479 }
473 regaddr64 = of_translate_address(op->node, regaddr_p); 480 regaddr64 = of_translate_address(op->node, regaddr_p);
@@ -478,15 +485,18 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op,
478 485
479 psc_nump = of_get_property(op->node, "cell-index", NULL); 486 psc_nump = of_get_property(op->node, "cell-index", NULL);
480 if (!psc_nump || *psc_nump > 5) { 487 if (!psc_nump || *psc_nump > 5) {
481 printk(KERN_ERR "mpc52xx_psc_spi: Device node %s has invalid " 488 dev_err(&op->dev, "Invalid cell-index property\n");
482 "cell-index property\n", op->node->full_name);
483 return -EINVAL; 489 return -EINVAL;
484 } 490 }
485 id = *psc_nump + 1; 491 id = *psc_nump + 1;
486 } 492 }
487 493
488 return mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64, 494 rc = mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64,
489 irq_of_parse_and_map(op->node, 0), id); 495 irq_of_parse_and_map(op->node, 0), id);
496 if (rc == 0)
497 of_register_spi_devices(dev_get_drvdata(&op->dev), op->node);
498
499 return rc;
490} 500}
491 501
492static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op) 502static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op)
@@ -494,7 +504,7 @@ static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op)
494 return mpc52xx_psc_spi_do_remove(&op->dev); 504 return mpc52xx_psc_spi_do_remove(&op->dev);
495} 505}
496 506
497static struct of_device_id mpc52xx_psc_spi_of_match[] = { 507static const struct of_device_id mpc52xx_psc_spi_of_match[] = {
498 { .compatible = "fsl,mpc5200-psc-spi", }, 508 { .compatible = "fsl,mpc5200-psc-spi", },
499 { .compatible = "mpc5200-psc-spi", }, /* old */ 509 { .compatible = "mpc5200-psc-spi", }, /* old */
500 {} 510 {}
diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c
new file mode 100644
index 000000000000..cd68f1ce5cc3
--- /dev/null
+++ b/drivers/spi/mpc52xx_spi.c
@@ -0,0 +1,579 @@
1/*
2 * MPC52xx SPI bus driver.
3 *
4 * Copyright (C) 2008 Secret Lab Technologies Ltd.
5 *
6 * This file is released under the GPLv2
7 *
8 * This is the driver for the MPC5200's dedicated SPI controller.
9 *
10 * Note: this driver does not support the MPC5200 PSC in SPI mode. For
11 * that driver see drivers/spi/mpc52xx_psc_spi.c
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/errno.h>
17#include <linux/of_platform.h>
18#include <linux/interrupt.h>
19#include <linux/delay.h>
20#include <linux/spi/spi.h>
21#include <linux/of_spi.h>
22#include <linux/io.h>
23#include <linux/of_gpio.h>
24#include <linux/slab.h>
25#include <asm/time.h>
26#include <asm/mpc52xx.h>
27
28MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
29MODULE_DESCRIPTION("MPC52xx SPI (non-PSC) Driver");
30MODULE_LICENSE("GPL");
31
32/* Register offsets */
33#define SPI_CTRL1 0x00
34#define SPI_CTRL1_SPIE (1 << 7)
35#define SPI_CTRL1_SPE (1 << 6)
36#define SPI_CTRL1_MSTR (1 << 4)
37#define SPI_CTRL1_CPOL (1 << 3)
38#define SPI_CTRL1_CPHA (1 << 2)
39#define SPI_CTRL1_SSOE (1 << 1)
40#define SPI_CTRL1_LSBFE (1 << 0)
41
42#define SPI_CTRL2 0x01
43#define SPI_BRR 0x04
44
45#define SPI_STATUS 0x05
46#define SPI_STATUS_SPIF (1 << 7)
47#define SPI_STATUS_WCOL (1 << 6)
48#define SPI_STATUS_MODF (1 << 4)
49
50#define SPI_DATA 0x09
51#define SPI_PORTDATA 0x0d
52#define SPI_DATADIR 0x10
53
54/* FSM state return values */
55#define FSM_STOP 0 /* Nothing more for the state machine to */
56 /* do. If something interesting happens */
57 /* then an IRQ will be received */
58#define FSM_POLL 1 /* need to poll for completion, an IRQ is */
59 /* not expected */
60#define FSM_CONTINUE 2 /* Keep iterating the state machine */
61
62/* Driver internal data */
63struct mpc52xx_spi {
64 struct spi_master *master;
65 void __iomem *regs;
66 int irq0; /* MODF irq */
67 int irq1; /* SPIF irq */
68 unsigned int ipb_freq;
69
70 /* Statistics; not used now, but will be reintroduced for debugfs */
71 int msg_count;
72 int wcol_count;
73 int wcol_ticks;
74 u32 wcol_tx_timestamp;
75 int modf_count;
76 int byte_count;
77
78 struct list_head queue; /* queue of pending messages */
79 spinlock_t lock;
80 struct work_struct work;
81
82 /* Details of current transfer (length, and buffer pointers) */
83 struct spi_message *message; /* current message */
84 struct spi_transfer *transfer; /* current transfer */
85 int (*state)(int irq, struct mpc52xx_spi *ms, u8 status, u8 data);
86 int len;
87 int timestamp;
88 u8 *rx_buf;
89 const u8 *tx_buf;
90 int cs_change;
91 int gpio_cs_count;
92 unsigned int *gpio_cs;
93};
94
95/*
96 * CS control function
97 */
98static void mpc52xx_spi_chipsel(struct mpc52xx_spi *ms, int value)
99{
100 int cs;
101
102 if (ms->gpio_cs_count > 0) {
103 cs = ms->message->spi->chip_select;
104 gpio_set_value(ms->gpio_cs[cs], value ? 0 : 1);
105 } else
106 out_8(ms->regs + SPI_PORTDATA, value ? 0 : 0x08);
107}
108
109/*
110 * Start a new transfer. This is called both by the idle state
111 * for the first transfer in a message, and by the wait state when the
112 * previous transfer in a message is complete.
113 */
114static void mpc52xx_spi_start_transfer(struct mpc52xx_spi *ms)
115{
116 ms->rx_buf = ms->transfer->rx_buf;
117 ms->tx_buf = ms->transfer->tx_buf;
118 ms->len = ms->transfer->len;
119
120 /* Activate the chip select */
121 if (ms->cs_change)
122 mpc52xx_spi_chipsel(ms, 1);
123 ms->cs_change = ms->transfer->cs_change;
124
125 /* Write out the first byte */
126 ms->wcol_tx_timestamp = get_tbl();
127 if (ms->tx_buf)
128 out_8(ms->regs + SPI_DATA, *ms->tx_buf++);
129 else
130 out_8(ms->regs + SPI_DATA, 0);
131}
132
133/* Forward declaration of state handlers */
134static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
135 u8 status, u8 data);
136static int mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms,
137 u8 status, u8 data);
138
139/*
140 * IDLE state
141 *
142 * No transfers are in progress; if another transfer is pending then retrieve
143 * it and kick it off. Otherwise, stop processing the state machine
144 */
145static int
146mpc52xx_spi_fsmstate_idle(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
147{
148 struct spi_device *spi;
149 int spr, sppr;
150 u8 ctrl1;
151
152 if (status && (irq != NO_IRQ))
153 dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n",
154 status);
155
156 /* Check if there is another transfer waiting. */
157 if (list_empty(&ms->queue))
158 return FSM_STOP;
159
160 /* get the head of the queue */
161 ms->message = list_first_entry(&ms->queue, struct spi_message, queue);
162 list_del_init(&ms->message->queue);
163
164 /* Setup the controller parameters */
165 ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR;
166 spi = ms->message->spi;
167 if (spi->mode & SPI_CPHA)
168 ctrl1 |= SPI_CTRL1_CPHA;
169 if (spi->mode & SPI_CPOL)
170 ctrl1 |= SPI_CTRL1_CPOL;
171 if (spi->mode & SPI_LSB_FIRST)
172 ctrl1 |= SPI_CTRL1_LSBFE;
173 out_8(ms->regs + SPI_CTRL1, ctrl1);
174
175 /* Setup the controller speed */
176 /* minimum divider is '2'. Also, add '1' to force rounding the
177 * divider up. */
178 sppr = ((ms->ipb_freq / ms->message->spi->max_speed_hz) + 1) >> 1;
179 spr = 0;
180 if (sppr < 1)
181 sppr = 1;
182 while (((sppr - 1) & ~0x7) != 0) {
183 sppr = (sppr + 1) >> 1; /* add '1' to force rounding up */
184 spr++;
185 }
186 sppr--; /* sppr quantity in register is offset by 1 */
187 if (spr > 7) {
188 /* Don't overrun limits of SPI baudrate register */
189 spr = 7;
190 sppr = 7;
191 }
192 out_8(ms->regs + SPI_BRR, sppr << 4 | spr); /* Set speed */
193
194 ms->cs_change = 1;
195 ms->transfer = container_of(ms->message->transfers.next,
196 struct spi_transfer, transfer_list);
197
198 mpc52xx_spi_start_transfer(ms);
199 ms->state = mpc52xx_spi_fsmstate_transfer;
200
201 return FSM_CONTINUE;
202}
203
204/*
205 * TRANSFER state
206 *
207 * In the middle of a transfer. If the SPI core has completed processing
208 * a byte, then read out the received data and write out the next byte
209 * (unless this transfer is finished; in which case go on to the wait
210 * state)
211 */
212static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
213 u8 status, u8 data)
214{
215 if (!status)
216 return ms->irq0 ? FSM_STOP : FSM_POLL;
217
218 if (status & SPI_STATUS_WCOL) {
219 /* The SPI controller is stoopid. At slower speeds, it may
220 * raise the SPIF flag before the state machine is actually
221 * finished, which causes a collision (internal to the state
222 * machine only). The manual recommends inserting a delay
223 * between receiving the interrupt and sending the next byte,
224 * but it can also be worked around simply by retrying the
225 * transfer which is what we do here. */
226 ms->wcol_count++;
227 ms->wcol_ticks += get_tbl() - ms->wcol_tx_timestamp;
228 ms->wcol_tx_timestamp = get_tbl();
229 data = 0;
230 if (ms->tx_buf)
231 data = *(ms->tx_buf - 1);
232 out_8(ms->regs + SPI_DATA, data); /* try again */
233 return FSM_CONTINUE;
234 } else if (status & SPI_STATUS_MODF) {
235 ms->modf_count++;
236 dev_err(&ms->master->dev, "mode fault\n");
237 mpc52xx_spi_chipsel(ms, 0);
238 ms->message->status = -EIO;
239 ms->message->complete(ms->message->context);
240 ms->state = mpc52xx_spi_fsmstate_idle;
241 return FSM_CONTINUE;
242 }
243
244 /* Read data out of the spi device */
245 ms->byte_count++;
246 if (ms->rx_buf)
247 *ms->rx_buf++ = data;
248
249 /* Is the transfer complete? */
250 ms->len--;
251 if (ms->len == 0) {
252 ms->timestamp = get_tbl();
253 ms->timestamp += ms->transfer->delay_usecs * tb_ticks_per_usec;
254 ms->state = mpc52xx_spi_fsmstate_wait;
255 return FSM_CONTINUE;
256 }
257
258 /* Write out the next byte */
259 ms->wcol_tx_timestamp = get_tbl();
260 if (ms->tx_buf)
261 out_8(ms->regs + SPI_DATA, *ms->tx_buf++);
262 else
263 out_8(ms->regs + SPI_DATA, 0);
264
265 return FSM_CONTINUE;
266}
267
268/*
269 * WAIT state
270 *
271 * A transfer has completed; need to wait for the delay period to complete
272 * before starting the next transfer
273 */
274static int
275mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
276{
277 if (status && irq)
278 dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n",
279 status);
280
281 if (((int)get_tbl()) - ms->timestamp < 0)
282 return FSM_POLL;
283
284 ms->message->actual_length += ms->transfer->len;
285
286 /* Check if there is another transfer in this message. If there
287 * aren't then deactivate CS, notify sender, and drop back to idle
288 * to start the next message. */
289 if (ms->transfer->transfer_list.next == &ms->message->transfers) {
290 ms->msg_count++;
291 mpc52xx_spi_chipsel(ms, 0);
292 ms->message->status = 0;
293 ms->message->complete(ms->message->context);
294 ms->state = mpc52xx_spi_fsmstate_idle;
295 return FSM_CONTINUE;
296 }
297
298 /* There is another transfer; kick it off */
299
300 if (ms->cs_change)
301 mpc52xx_spi_chipsel(ms, 0);
302
303 ms->transfer = container_of(ms->transfer->transfer_list.next,
304 struct spi_transfer, transfer_list);
305 mpc52xx_spi_start_transfer(ms);
306 ms->state = mpc52xx_spi_fsmstate_transfer;
307 return FSM_CONTINUE;
308}
309
310/**
311 * mpc52xx_spi_fsm_process - Finite State Machine iteration function
312 * @irq: irq number that triggered the FSM or 0 for polling
313 * @ms: pointer to mpc52xx_spi driver data
314 */
315static void mpc52xx_spi_fsm_process(int irq, struct mpc52xx_spi *ms)
316{
317 int rc = FSM_CONTINUE;
318 u8 status, data;
319
320 while (rc == FSM_CONTINUE) {
321 /* Interrupt cleared by read of STATUS followed by
322 * read of DATA registers */
323 status = in_8(ms->regs + SPI_STATUS);
324 data = in_8(ms->regs + SPI_DATA);
325 rc = ms->state(irq, ms, status, data);
326 }
327
328 if (rc == FSM_POLL)
329 schedule_work(&ms->work);
330}
331
332/**
333 * mpc52xx_spi_irq - IRQ handler
334 */
335static irqreturn_t mpc52xx_spi_irq(int irq, void *_ms)
336{
337 struct mpc52xx_spi *ms = _ms;
338 spin_lock(&ms->lock);
339 mpc52xx_spi_fsm_process(irq, ms);
340 spin_unlock(&ms->lock);
341 return IRQ_HANDLED;
342}
343
344/**
345 * mpc52xx_spi_wq - Workqueue function for polling the state machine
346 */
347static void mpc52xx_spi_wq(struct work_struct *work)
348{
349 struct mpc52xx_spi *ms = container_of(work, struct mpc52xx_spi, work);
350 unsigned long flags;
351
352 spin_lock_irqsave(&ms->lock, flags);
353 mpc52xx_spi_fsm_process(0, ms);
354 spin_unlock_irqrestore(&ms->lock, flags);
355}
356
357/*
358 * spi_master ops
359 */
360
361static int mpc52xx_spi_setup(struct spi_device *spi)
362{
363 if (spi->bits_per_word % 8)
364 return -EINVAL;
365
366 if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST))
367 return -EINVAL;
368
369 if (spi->chip_select >= spi->master->num_chipselect)
370 return -EINVAL;
371
372 return 0;
373}
374
375static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m)
376{
377 struct mpc52xx_spi *ms = spi_master_get_devdata(spi->master);
378 unsigned long flags;
379
380 m->actual_length = 0;
381 m->status = -EINPROGRESS;
382
383 spin_lock_irqsave(&ms->lock, flags);
384 list_add_tail(&m->queue, &ms->queue);
385 spin_unlock_irqrestore(&ms->lock, flags);
386 schedule_work(&ms->work);
387
388 return 0;
389}
390
391/*
392 * OF Platform Bus Binding
393 */
394static int __devinit mpc52xx_spi_probe(struct of_device *op,
395 const struct of_device_id *match)
396{
397 struct spi_master *master;
398 struct mpc52xx_spi *ms;
399 void __iomem *regs;
400 u8 ctrl1;
401 int rc, i = 0;
402 int gpio_cs;
403
404 /* MMIO registers */
405 dev_dbg(&op->dev, "probing mpc5200 SPI device\n");
406 regs = of_iomap(op->node, 0);
407 if (!regs)
408 return -ENODEV;
409
410 /* initialize the device */
411 ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR;
412 out_8(regs + SPI_CTRL1, ctrl1);
413 out_8(regs + SPI_CTRL2, 0x0);
414 out_8(regs + SPI_DATADIR, 0xe); /* Set output pins */
415 out_8(regs + SPI_PORTDATA, 0x8); /* Deassert /SS signal */
416
417 /* Clear the status register and re-read it to check for a MODF
418 * failure. This driver cannot currently handle multiple masters
419 * on the SPI bus. This fault will also occur if the SPI signals
420 * are not connected to any pins (port_config setting) */
421 in_8(regs + SPI_STATUS);
422 out_8(regs + SPI_CTRL1, ctrl1);
423
424 in_8(regs + SPI_DATA);
425 if (in_8(regs + SPI_STATUS) & SPI_STATUS_MODF) {
426 dev_err(&op->dev, "mode fault; is port_config correct?\n");
427 rc = -EIO;
428 goto err_init;
429 }
430
431 dev_dbg(&op->dev, "allocating spi_master struct\n");
432 master = spi_alloc_master(&op->dev, sizeof *ms);
433 if (!master) {
434 rc = -ENOMEM;
435 goto err_alloc;
436 }
437
438 master->bus_num = -1;
439 master->setup = mpc52xx_spi_setup;
440 master->transfer = mpc52xx_spi_transfer;
441 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
442
443 dev_set_drvdata(&op->dev, master);
444
445 ms = spi_master_get_devdata(master);
446 ms->master = master;
447 ms->regs = regs;
448 ms->irq0 = irq_of_parse_and_map(op->node, 0);
449 ms->irq1 = irq_of_parse_and_map(op->node, 1);
450 ms->state = mpc52xx_spi_fsmstate_idle;
451 ms->ipb_freq = mpc5xxx_get_bus_frequency(op->node);
452 ms->gpio_cs_count = of_gpio_count(op->node);
453 if (ms->gpio_cs_count > 0) {
454 master->num_chipselect = ms->gpio_cs_count;
455 ms->gpio_cs = kmalloc(ms->gpio_cs_count * sizeof(unsigned int),
456 GFP_KERNEL);
457 if (!ms->gpio_cs) {
458 rc = -ENOMEM;
459 goto err_alloc;
460 }
461
462 for (i = 0; i < ms->gpio_cs_count; i++) {
463 gpio_cs = of_get_gpio(op->node, i);
464 if (gpio_cs < 0) {
465 dev_err(&op->dev,
466 "could not parse the gpio field "
467 "in oftree\n");
468 rc = -ENODEV;
469 goto err_gpio;
470 }
471
472 rc = gpio_request(gpio_cs, dev_name(&op->dev));
473 if (rc) {
474 dev_err(&op->dev,
475 "can't request spi cs gpio #%d "
476 "on gpio line %d\n", i, gpio_cs);
477 goto err_gpio;
478 }
479
480 gpio_direction_output(gpio_cs, 1);
481 ms->gpio_cs[i] = gpio_cs;
482 }
483 } else {
484 master->num_chipselect = 1;
485 }
486
487 spin_lock_init(&ms->lock);
488 INIT_LIST_HEAD(&ms->queue);
489 INIT_WORK(&ms->work, mpc52xx_spi_wq);
490
491 /* Decide if interrupts can be used */
492 if (ms->irq0 && ms->irq1) {
493 rc = request_irq(ms->irq0, mpc52xx_spi_irq, 0,
494 "mpc5200-spi-modf", ms);
495 rc |= request_irq(ms->irq1, mpc52xx_spi_irq, 0,
496 "mpc5200-spi-spif", ms);
497 if (rc) {
498 free_irq(ms->irq0, ms);
499 free_irq(ms->irq1, ms);
500 ms->irq0 = ms->irq1 = 0;
501 }
502 } else {
503 /* operate in polled mode */
504 ms->irq0 = ms->irq1 = 0;
505 }
506
507 if (!ms->irq0)
508 dev_info(&op->dev, "using polled mode\n");
509
510 dev_dbg(&op->dev, "registering spi_master struct\n");
511 rc = spi_register_master(master);
512 if (rc)
513 goto err_register;
514
515 of_register_spi_devices(master, op->node);
516 dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n");
517
518 return rc;
519
520 err_register:
521 dev_err(&ms->master->dev, "initialization failed\n");
522 spi_master_put(master);
523 err_gpio:
524 while (i-- > 0)
525 gpio_free(ms->gpio_cs[i]);
526
527 kfree(ms->gpio_cs);
528 err_alloc:
529 err_init:
530 iounmap(regs);
531 return rc;
532}
533
534static int __devexit mpc52xx_spi_remove(struct of_device *op)
535{
536 struct spi_master *master = dev_get_drvdata(&op->dev);
537 struct mpc52xx_spi *ms = spi_master_get_devdata(master);
538 int i;
539
540 free_irq(ms->irq0, ms);
541 free_irq(ms->irq1, ms);
542
543 for (i = 0; i < ms->gpio_cs_count; i++)
544 gpio_free(ms->gpio_cs[i]);
545
546 kfree(ms->gpio_cs);
547 spi_unregister_master(master);
548 spi_master_put(master);
549 iounmap(ms->regs);
550
551 return 0;
552}
553
554static const struct of_device_id mpc52xx_spi_match[] __devinitconst = {
555 { .compatible = "fsl,mpc5200-spi", },
556 {}
557};
558MODULE_DEVICE_TABLE(of, mpc52xx_spi_match);
559
560static struct of_platform_driver mpc52xx_spi_of_driver = {
561 .owner = THIS_MODULE,
562 .name = "mpc52xx-spi",
563 .match_table = mpc52xx_spi_match,
564 .probe = mpc52xx_spi_probe,
565 .remove = __exit_p(mpc52xx_spi_remove),
566};
567
568static int __init mpc52xx_spi_init(void)
569{
570 return of_register_platform_driver(&mpc52xx_spi_of_driver);
571}
572module_init(mpc52xx_spi_init);
573
574static void __exit mpc52xx_spi_exit(void)
575{
576 of_unregister_platform_driver(&mpc52xx_spi_of_driver);
577}
578module_exit(mpc52xx_spi_exit);
579
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index ba1a872b221e..e0de0d0eedea 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -32,11 +32,12 @@
32#include <linux/err.h> 32#include <linux/err.h>
33#include <linux/clk.h> 33#include <linux/clk.h>
34#include <linux/io.h> 34#include <linux/io.h>
35#include <linux/slab.h>
35 36
36#include <linux/spi/spi.h> 37#include <linux/spi/spi.h>
37 38
38#include <mach/dma.h> 39#include <plat/dma.h>
39#include <mach/clock.h> 40#include <plat/clock.h>
40 41
41 42
42#define OMAP2_MCSPI_MAX_FREQ 48000000 43#define OMAP2_MCSPI_MAX_FREQ 48000000
@@ -203,6 +204,7 @@ static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
203 204
204 cs->chconf0 = val; 205 cs->chconf0 = val;
205 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val); 206 mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val);
207 mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
206} 208}
207 209
208static void omap2_mcspi_set_dma_req(const struct spi_device *spi, 210static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
@@ -531,7 +533,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
531 goto out; 533 goto out;
532 } 534 }
533#ifdef VERBOSE 535#ifdef VERBOSE
534 dev_dbg(&spi->dev, "write-%d %04x\n", 536 dev_dbg(&spi->dev, "write-%d %08x\n",
535 word_len, *tx); 537 word_len, *tx);
536#endif 538#endif
537 __raw_writel(*tx++, tx_reg); 539 __raw_writel(*tx++, tx_reg);
@@ -549,7 +551,7 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
549 mcspi_write_chconf0(spi, l); 551 mcspi_write_chconf0(spi, l);
550 *rx++ = __raw_readl(rx_reg); 552 *rx++ = __raw_readl(rx_reg);
551#ifdef VERBOSE 553#ifdef VERBOSE
552 dev_dbg(&spi->dev, "read-%d %04x\n", 554 dev_dbg(&spi->dev, "read-%d %08x\n",
553 word_len, *(rx - 1)); 555 word_len, *(rx - 1));
554#endif 556#endif
555 } 557 }
@@ -578,6 +580,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
578 struct spi_master *spi_cntrl; 580 struct spi_master *spi_cntrl;
579 u32 l = 0, div = 0; 581 u32 l = 0, div = 0;
580 u8 word_len = spi->bits_per_word; 582 u8 word_len = spi->bits_per_word;
583 u32 speed_hz = spi->max_speed_hz;
581 584
582 mcspi = spi_master_get_devdata(spi->master); 585 mcspi = spi_master_get_devdata(spi->master);
583 spi_cntrl = mcspi->master; 586 spi_cntrl = mcspi->master;
@@ -587,9 +590,12 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
587 590
588 cs->word_len = word_len; 591 cs->word_len = word_len;
589 592
590 if (spi->max_speed_hz) { 593 if (t && t->speed_hz)
594 speed_hz = t->speed_hz;
595
596 if (speed_hz) {
591 while (div <= 15 && (OMAP2_MCSPI_MAX_FREQ / (1 << div)) 597 while (div <= 15 && (OMAP2_MCSPI_MAX_FREQ / (1 << div))
592 > spi->max_speed_hz) 598 > speed_hz)
593 div++; 599 div++;
594 } else 600 } else
595 div = 15; 601 div = 15;
@@ -751,11 +757,13 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
751 mcspi = spi_master_get_devdata(spi->master); 757 mcspi = spi_master_get_devdata(spi->master);
752 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 758 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
753 759
754 /* Unlink controller state from context save list */ 760 if (spi->controller_state) {
755 cs = spi->controller_state; 761 /* Unlink controller state from context save list */
756 list_del(&cs->node); 762 cs = spi->controller_state;
763 list_del(&cs->node);
757 764
758 kfree(spi->controller_state); 765 kfree(spi->controller_state);
766 }
759 767
760 if (mcspi_dma->dma_rx_channel != -1) { 768 if (mcspi_dma->dma_rx_channel != -1) {
761 omap_free_dma(mcspi_dma->dma_rx_channel); 769 omap_free_dma(mcspi_dma->dma_rx_channel);
@@ -1014,7 +1022,7 @@ static u8 __initdata spi2_txdma_id[] = {
1014 OMAP24XX_DMA_SPI2_TX1, 1022 OMAP24XX_DMA_SPI2_TX1,
1015}; 1023};
1016 1024
1017#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX) \ 1025#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \
1018 || defined(CONFIG_ARCH_OMAP4) 1026 || defined(CONFIG_ARCH_OMAP4)
1019static u8 __initdata spi3_rxdma_id[] = { 1027static u8 __initdata spi3_rxdma_id[] = {
1020 OMAP24XX_DMA_SPI3_RX0, 1028 OMAP24XX_DMA_SPI3_RX0,
diff --git a/drivers/spi/omap_spi_100k.c b/drivers/spi/omap_spi_100k.c
new file mode 100644
index 000000000000..24668b30a52d
--- /dev/null
+++ b/drivers/spi/omap_spi_100k.c
@@ -0,0 +1,636 @@
1/*
2 * OMAP7xx SPI 100k controller driver
3 * Author: Fabrice Crohas <fcrohas@gmail.com>
4 * from original omap1_mcspi driver
5 *
6 * Copyright (C) 2005, 2006 Nokia Corporation
7 * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
8 * Juha Yrj�l� <juha.yrjola@nokia.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 */
25#include <linux/kernel.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/module.h>
29#include <linux/device.h>
30#include <linux/delay.h>
31#include <linux/platform_device.h>
32#include <linux/err.h>
33#include <linux/clk.h>
34#include <linux/io.h>
35#include <linux/gpio.h>
36#include <linux/slab.h>
37
38#include <linux/spi/spi.h>
39
40#include <plat/clock.h>
41
42#define OMAP1_SPI100K_MAX_FREQ 48000000
43
44#define ICR_SPITAS (OMAP7XX_ICR_BASE + 0x12)
45
46#define SPI_SETUP1 0x00
47#define SPI_SETUP2 0x02
48#define SPI_CTRL 0x04
49#define SPI_STATUS 0x06
50#define SPI_TX_LSB 0x08
51#define SPI_TX_MSB 0x0a
52#define SPI_RX_LSB 0x0c
53#define SPI_RX_MSB 0x0e
54
55#define SPI_SETUP1_INT_READ_ENABLE (1UL << 5)
56#define SPI_SETUP1_INT_WRITE_ENABLE (1UL << 4)
57#define SPI_SETUP1_CLOCK_DIVISOR(x) ((x) << 1)
58#define SPI_SETUP1_CLOCK_ENABLE (1UL << 0)
59
60#define SPI_SETUP2_ACTIVE_EDGE_FALLING (0UL << 0)
61#define SPI_SETUP2_ACTIVE_EDGE_RISING (1UL << 0)
62#define SPI_SETUP2_NEGATIVE_LEVEL (0UL << 5)
63#define SPI_SETUP2_POSITIVE_LEVEL (1UL << 5)
64#define SPI_SETUP2_LEVEL_TRIGGER (0UL << 10)
65#define SPI_SETUP2_EDGE_TRIGGER (1UL << 10)
66
67#define SPI_CTRL_SEN(x) ((x) << 7)
68#define SPI_CTRL_WORD_SIZE(x) (((x) - 1) << 2)
69#define SPI_CTRL_WR (1UL << 1)
70#define SPI_CTRL_RD (1UL << 0)
71
72#define SPI_STATUS_WE (1UL << 1)
73#define SPI_STATUS_RD (1UL << 0)
74
75#define WRITE 0
76#define READ 1
77
78
79/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
80 * cache operations; better heuristics consider wordsize and bitrate.
81 */
82#define DMA_MIN_BYTES 8
83
84#define SPI_RUNNING 0
85#define SPI_SHUTDOWN 1
86
87struct omap1_spi100k {
88 struct work_struct work;
89
90 /* lock protects queue and registers */
91 spinlock_t lock;
92 struct list_head msg_queue;
93 struct spi_master *master;
94 struct clk *ick;
95 struct clk *fck;
96
97 /* Virtual base address of the controller */
98 void __iomem *base;
99
100 /* State of the SPI */
101 unsigned int state;
102};
103
104struct omap1_spi100k_cs {
105 void __iomem *base;
106 int word_len;
107};
108
109static struct workqueue_struct *omap1_spi100k_wq;
110
111#define MOD_REG_BIT(val, mask, set) do { \
112 if (set) \
113 val |= mask; \
114 else \
115 val &= ~mask; \
116} while (0)
117
118static void spi100k_enable_clock(struct spi_master *master)
119{
120 unsigned int val;
121 struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
122
123 /* enable SPI */
124 val = readw(spi100k->base + SPI_SETUP1);
125 val |= SPI_SETUP1_CLOCK_ENABLE;
126 writew(val, spi100k->base + SPI_SETUP1);
127}
128
129static void spi100k_disable_clock(struct spi_master *master)
130{
131 unsigned int val;
132 struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
133
134 /* disable SPI */
135 val = readw(spi100k->base + SPI_SETUP1);
136 val &= ~SPI_SETUP1_CLOCK_ENABLE;
137 writew(val, spi100k->base + SPI_SETUP1);
138}
139
140static void spi100k_write_data(struct spi_master *master, int len, int data)
141{
142 struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
143
144 /* write 16-bit word */
145 spi100k_enable_clock(master);
146 writew( data , spi100k->base + SPI_TX_MSB);
147
148 writew(SPI_CTRL_SEN(0) |
149 SPI_CTRL_WORD_SIZE(len) |
150 SPI_CTRL_WR,
151 spi100k->base + SPI_CTRL);
152
153 /* Wait for bit ack send change */
154 while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_WE) != SPI_STATUS_WE);
155 udelay(1000);
156
157 spi100k_disable_clock(master);
158}
159
160static int spi100k_read_data(struct spi_master *master, int len)
161{
162 int dataH,dataL;
163 struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
164
165 spi100k_enable_clock(master);
166 writew(SPI_CTRL_SEN(0) |
167 SPI_CTRL_WORD_SIZE(len) |
168 SPI_CTRL_RD,
169 spi100k->base + SPI_CTRL);
170
171 while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_RD) != SPI_STATUS_RD);
172 udelay(1000);
173
174 dataL = readw(spi100k->base + SPI_RX_LSB);
175 dataH = readw(spi100k->base + SPI_RX_MSB);
176 spi100k_disable_clock(master);
177
178 return dataL;
179}
180
181static void spi100k_open(struct spi_master *master)
182{
183 /* get control of SPI */
184 struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
185
186 writew(SPI_SETUP1_INT_READ_ENABLE |
187 SPI_SETUP1_INT_WRITE_ENABLE |
188 SPI_SETUP1_CLOCK_DIVISOR(0), spi100k->base + SPI_SETUP1);
189
190 /* configure clock and interrupts */
191 writew(SPI_SETUP2_ACTIVE_EDGE_FALLING |
192 SPI_SETUP2_NEGATIVE_LEVEL |
193 SPI_SETUP2_LEVEL_TRIGGER, spi100k->base + SPI_SETUP2);
194}
195
196static void omap1_spi100k_force_cs(struct omap1_spi100k *spi100k, int enable)
197{
198 if (enable)
199 writew(0x05fc, spi100k->base + SPI_CTRL);
200 else
201 writew(0x05fd, spi100k->base + SPI_CTRL);
202}
203
204static unsigned
205omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
206{
207 struct omap1_spi100k *spi100k;
208 struct omap1_spi100k_cs *cs = spi->controller_state;
209 unsigned int count, c;
210 int word_len;
211
212 spi100k = spi_master_get_devdata(spi->master);
213 count = xfer->len;
214 c = count;
215 word_len = cs->word_len;
216
217 /* RX_ONLY mode needs dummy data in TX reg */
218 if (xfer->tx_buf == NULL)
219 spi100k_write_data(spi->master,word_len, 0);
220
221 if (word_len <= 8) {
222 u8 *rx;
223 const u8 *tx;
224
225 rx = xfer->rx_buf;
226 tx = xfer->tx_buf;
227 do {
228 c-=1;
229 if (xfer->tx_buf != NULL)
230 spi100k_write_data(spi->master,word_len, *tx);
231 if (xfer->rx_buf != NULL)
232 *rx = spi100k_read_data(spi->master,word_len);
233 } while(c);
234 } else if (word_len <= 16) {
235 u16 *rx;
236 const u16 *tx;
237
238 rx = xfer->rx_buf;
239 tx = xfer->tx_buf;
240 do {
241 c-=2;
242 if (xfer->tx_buf != NULL)
243 spi100k_write_data(spi->master,word_len, *tx++);
244 if (xfer->rx_buf != NULL)
245 *rx++ = spi100k_read_data(spi->master,word_len);
246 } while(c);
247 } else if (word_len <= 32) {
248 u32 *rx;
249 const u32 *tx;
250
251 rx = xfer->rx_buf;
252 tx = xfer->tx_buf;
253 do {
254 c-=4;
255 if (xfer->tx_buf != NULL)
256 spi100k_write_data(spi->master,word_len, *tx);
257 if (xfer->rx_buf != NULL)
258 *rx = spi100k_read_data(spi->master,word_len);
259 } while(c);
260 }
261 return count - c;
262}
263
264/* called only when no transfer is active to this device */
265static int omap1_spi100k_setup_transfer(struct spi_device *spi,
266 struct spi_transfer *t)
267{
268 struct omap1_spi100k *spi100k = spi_master_get_devdata(spi->master);
269 struct omap1_spi100k_cs *cs = spi->controller_state;
270 u8 word_len = spi->bits_per_word;
271
272 if (t != NULL && t->bits_per_word)
273 word_len = t->bits_per_word;
274 if (!word_len)
275 word_len = 8;
276
277 if (spi->bits_per_word > 32)
278 return -EINVAL;
279 cs->word_len = word_len;
280
281 /* SPI init before transfer */
282 writew(0x3e , spi100k->base + SPI_SETUP1);
283 writew(0x00 , spi100k->base + SPI_STATUS);
284 writew(0x3e , spi100k->base + SPI_CTRL);
285
286 return 0;
287}
288
289/* the spi->mode bits understood by this driver: */
290#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
291
292static int omap1_spi100k_setup(struct spi_device *spi)
293{
294 int ret;
295 struct omap1_spi100k *spi100k;
296 struct omap1_spi100k_cs *cs = spi->controller_state;
297
298 if (spi->bits_per_word < 4 || spi->bits_per_word > 32) {
299 dev_dbg(&spi->dev, "setup: unsupported %d bit words\n",
300 spi->bits_per_word);
301 return -EINVAL;
302 }
303
304 spi100k = spi_master_get_devdata(spi->master);
305
306 if (!cs) {
307 cs = kzalloc(sizeof *cs, GFP_KERNEL);
308 if (!cs)
309 return -ENOMEM;
310 cs->base = spi100k->base + spi->chip_select * 0x14;
311 spi->controller_state = cs;
312 }
313
314 spi100k_open(spi->master);
315
316 clk_enable(spi100k->ick);
317 clk_enable(spi100k->fck);
318
319 ret = omap1_spi100k_setup_transfer(spi, NULL);
320
321 clk_disable(spi100k->ick);
322 clk_disable(spi100k->fck);
323
324 return ret;
325}
326
327static void omap1_spi100k_work(struct work_struct *work)
328{
329 struct omap1_spi100k *spi100k;
330 int status = 0;
331
332 spi100k = container_of(work, struct omap1_spi100k, work);
333 spin_lock_irq(&spi100k->lock);
334
335 clk_enable(spi100k->ick);
336 clk_enable(spi100k->fck);
337
338 /* We only enable one channel at a time -- the one whose message is
339 * at the head of the queue -- although this controller would gladly
340 * arbitrate among multiple channels. This corresponds to "single
341 * channel" master mode. As a side effect, we need to manage the
342 * chipselect with the FORCE bit ... CS != channel enable.
343 */
344 while (!list_empty(&spi100k->msg_queue)) {
345 struct spi_message *m;
346 struct spi_device *spi;
347 struct spi_transfer *t = NULL;
348 int cs_active = 0;
349 struct omap1_spi100k_cs *cs;
350 int par_override = 0;
351
352 m = container_of(spi100k->msg_queue.next, struct spi_message,
353 queue);
354
355 list_del_init(&m->queue);
356 spin_unlock_irq(&spi100k->lock);
357
358 spi = m->spi;
359 cs = spi->controller_state;
360
361 list_for_each_entry(t, &m->transfers, transfer_list) {
362 if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
363 status = -EINVAL;
364 break;
365 }
366 if (par_override || t->speed_hz || t->bits_per_word) {
367 par_override = 1;
368 status = omap1_spi100k_setup_transfer(spi, t);
369 if (status < 0)
370 break;
371 if (!t->speed_hz && !t->bits_per_word)
372 par_override = 0;
373 }
374
375 if (!cs_active) {
376 omap1_spi100k_force_cs(spi100k, 1);
377 cs_active = 1;
378 }
379
380 if (t->len) {
381 unsigned count;
382
383 /* RX_ONLY mode needs dummy data in TX reg */
384 if (t->tx_buf == NULL)
385 spi100k_write_data(spi->master, 8, 0);
386
387 count = omap1_spi100k_txrx_pio(spi, t);
388 m->actual_length += count;
389
390 if (count != t->len) {
391 status = -EIO;
392 break;
393 }
394 }
395
396 if (t->delay_usecs)
397 udelay(t->delay_usecs);
398
399 /* ignore the "leave it on after last xfer" hint */
400
401 if (t->cs_change) {
402 omap1_spi100k_force_cs(spi100k, 0);
403 cs_active = 0;
404 }
405 }
406
407 /* Restore defaults if they were overriden */
408 if (par_override) {
409 par_override = 0;
410 status = omap1_spi100k_setup_transfer(spi, NULL);
411 }
412
413 if (cs_active)
414 omap1_spi100k_force_cs(spi100k, 0);
415
416 m->status = status;
417 m->complete(m->context);
418
419 spin_lock_irq(&spi100k->lock);
420 }
421
422 clk_disable(spi100k->ick);
423 clk_disable(spi100k->fck);
424 spin_unlock_irq(&spi100k->lock);
425
426 if (status < 0)
427 printk(KERN_WARNING "spi transfer failed with %d\n", status);
428}
429
430static int omap1_spi100k_transfer(struct spi_device *spi, struct spi_message *m)
431{
432 struct omap1_spi100k *spi100k;
433 unsigned long flags;
434 struct spi_transfer *t;
435
436 m->actual_length = 0;
437 m->status = -EINPROGRESS;
438
439 spi100k = spi_master_get_devdata(spi->master);
440
441 /* Don't accept new work if we're shutting down */
442 if (spi100k->state == SPI_SHUTDOWN)
443 return -ESHUTDOWN;
444
445 /* reject invalid messages and transfers */
446 if (list_empty(&m->transfers) || !m->complete)
447 return -EINVAL;
448
449 list_for_each_entry(t, &m->transfers, transfer_list) {
450 const void *tx_buf = t->tx_buf;
451 void *rx_buf = t->rx_buf;
452 unsigned len = t->len;
453
454 if (t->speed_hz > OMAP1_SPI100K_MAX_FREQ
455 || (len && !(rx_buf || tx_buf))
456 || (t->bits_per_word &&
457 ( t->bits_per_word < 4
458 || t->bits_per_word > 32))) {
459 dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
460 t->speed_hz,
461 len,
462 tx_buf ? "tx" : "",
463 rx_buf ? "rx" : "",
464 t->bits_per_word);
465 return -EINVAL;
466 }
467
468 if (t->speed_hz && t->speed_hz < OMAP1_SPI100K_MAX_FREQ/(1<<16)) {
469 dev_dbg(&spi->dev, "%d Hz max exceeds %d\n",
470 t->speed_hz,
471 OMAP1_SPI100K_MAX_FREQ/(1<<16));
472 return -EINVAL;
473 }
474
475 }
476
477 spin_lock_irqsave(&spi100k->lock, flags);
478 list_add_tail(&m->queue, &spi100k->msg_queue);
479 queue_work(omap1_spi100k_wq, &spi100k->work);
480 spin_unlock_irqrestore(&spi100k->lock, flags);
481
482 return 0;
483}
484
485static int __init omap1_spi100k_reset(struct omap1_spi100k *spi100k)
486{
487 return 0;
488}
489
490static int __devinit omap1_spi100k_probe(struct platform_device *pdev)
491{
492 struct spi_master *master;
493 struct omap1_spi100k *spi100k;
494 int status = 0;
495
496 if (!pdev->id)
497 return -EINVAL;
498
499 master = spi_alloc_master(&pdev->dev, sizeof *spi100k);
500 if (master == NULL) {
501 dev_dbg(&pdev->dev, "master allocation failed\n");
502 return -ENOMEM;
503 }
504
505 if (pdev->id != -1)
506 master->bus_num = pdev->id;
507
508 master->setup = omap1_spi100k_setup;
509 master->transfer = omap1_spi100k_transfer;
510 master->cleanup = NULL;
511 master->num_chipselect = 2;
512 master->mode_bits = MODEBITS;
513
514 dev_set_drvdata(&pdev->dev, master);
515
516 spi100k = spi_master_get_devdata(master);
517 spi100k->master = master;
518
519 /*
520 * The memory region base address is taken as the platform_data.
521 * You should allocate this with ioremap() before initializing
522 * the SPI.
523 */
524 spi100k->base = (void __iomem *) pdev->dev.platform_data;
525
526 INIT_WORK(&spi100k->work, omap1_spi100k_work);
527
528 spin_lock_init(&spi100k->lock);
529 INIT_LIST_HEAD(&spi100k->msg_queue);
530 spi100k->ick = clk_get(&pdev->dev, "ick");
531 if (IS_ERR(spi100k->ick)) {
532 dev_dbg(&pdev->dev, "can't get spi100k_ick\n");
533 status = PTR_ERR(spi100k->ick);
534 goto err1;
535 }
536
537 spi100k->fck = clk_get(&pdev->dev, "fck");
538 if (IS_ERR(spi100k->fck)) {
539 dev_dbg(&pdev->dev, "can't get spi100k_fck\n");
540 status = PTR_ERR(spi100k->fck);
541 goto err2;
542 }
543
544 if (omap1_spi100k_reset(spi100k) < 0)
545 goto err3;
546
547 status = spi_register_master(master);
548 if (status < 0)
549 goto err3;
550
551 spi100k->state = SPI_RUNNING;
552
553 return status;
554
555err3:
556 clk_put(spi100k->fck);
557err2:
558 clk_put(spi100k->ick);
559err1:
560 spi_master_put(master);
561 return status;
562}
563
564static int __exit omap1_spi100k_remove(struct platform_device *pdev)
565{
566 struct spi_master *master;
567 struct omap1_spi100k *spi100k;
568 struct resource *r;
569 unsigned limit = 500;
570 unsigned long flags;
571 int status = 0;
572
573 master = dev_get_drvdata(&pdev->dev);
574 spi100k = spi_master_get_devdata(master);
575
576 spin_lock_irqsave(&spi100k->lock, flags);
577
578 spi100k->state = SPI_SHUTDOWN;
579 while (!list_empty(&spi100k->msg_queue) && limit--) {
580 spin_unlock_irqrestore(&spi100k->lock, flags);
581 msleep(10);
582 spin_lock_irqsave(&spi100k->lock, flags);
583 }
584
585 if (!list_empty(&spi100k->msg_queue))
586 status = -EBUSY;
587
588 spin_unlock_irqrestore(&spi100k->lock, flags);
589
590 if (status != 0)
591 return status;
592
593 clk_put(spi100k->fck);
594 clk_put(spi100k->ick);
595
596 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
597
598 spi_unregister_master(master);
599
600 return 0;
601}
602
603static struct platform_driver omap1_spi100k_driver = {
604 .driver = {
605 .name = "omap1_spi100k",
606 .owner = THIS_MODULE,
607 },
608 .remove = __exit_p(omap1_spi100k_remove),
609};
610
611
612static int __init omap1_spi100k_init(void)
613{
614 omap1_spi100k_wq = create_singlethread_workqueue(
615 omap1_spi100k_driver.driver.name);
616
617 if (omap1_spi100k_wq == NULL)
618 return -1;
619
620 return platform_driver_probe(&omap1_spi100k_driver, omap1_spi100k_probe);
621}
622
623static void __exit omap1_spi100k_exit(void)
624{
625 platform_driver_unregister(&omap1_spi100k_driver);
626
627 destroy_workqueue(omap1_spi100k_wq);
628}
629
630module_init(omap1_spi100k_init);
631module_exit(omap1_spi100k_exit);
632
633MODULE_DESCRIPTION("OMAP7xx SPI 100k controller driver");
634MODULE_AUTHOR("Fabrice Crohas <fcrohas@gmail.com>");
635MODULE_LICENSE("GPL");
636
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/omap_uwire.c
index e75ba9b28898..160d3266205f 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/omap_uwire.c
@@ -41,6 +41,7 @@
41#include <linux/interrupt.h> 41#include <linux/interrupt.h>
42#include <linux/err.h> 42#include <linux/err.h>
43#include <linux/clk.h> 43#include <linux/clk.h>
44#include <linux/slab.h>
44 45
45#include <linux/spi/spi.h> 46#include <linux/spi/spi.h>
46#include <linux/spi/spi_bitbang.h> 47#include <linux/spi/spi_bitbang.h>
@@ -51,8 +52,8 @@
51#include <asm/io.h> 52#include <asm/io.h>
52#include <asm/mach-types.h> 53#include <asm/mach-types.h>
53 54
54#include <mach/mux.h> 55#include <plat/mux.h>
55#include <mach/omap730.h> /* OMAP730_IO_CONF registers */ 56#include <plat/omap7xx.h> /* OMAP7XX_IO_CONF registers */
56 57
57 58
58/* FIXME address is now a platform device resource, 59/* FIXME address is now a platform device resource,
@@ -504,7 +505,7 @@ static int __init uwire_probe(struct platform_device *pdev)
504 } 505 }
505 clk_enable(uwire->ck); 506 clk_enable(uwire->ck);
506 507
507 if (cpu_is_omap730()) 508 if (cpu_is_omap7xx())
508 uwire_idx_shift = 1; 509 uwire_idx_shift = 1;
509 else 510 else
510 uwire_idx_shift = 2; 511 uwire_idx_shift = 2;
@@ -573,8 +574,8 @@ static int __init omap_uwire_init(void)
573 } 574 }
574 if (machine_is_omap_perseus2()) { 575 if (machine_is_omap_perseus2()) {
575 /* configure pins: MPU_UW_nSCS1, MPU_UW_SDO, MPU_UW_SCLK */ 576 /* configure pins: MPU_UW_nSCS1, MPU_UW_SDO, MPU_UW_SCLK */
576 int val = omap_readl(OMAP730_IO_CONF_9) & ~0x00EEE000; 577 int val = omap_readl(OMAP7XX_IO_CONF_9) & ~0x00EEE000;
577 omap_writel(val | 0x00AAA000, OMAP730_IO_CONF_9); 578 omap_writel(val | 0x00AAA000, OMAP7XX_IO_CONF_9);
578 } 579 }
579 580
580 return platform_driver_probe(&uwire_driver, uwire_probe); 581 return platform_driver_probe(&uwire_driver, uwire_probe);
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index c8c2b693ffac..36828358a4d8 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -29,6 +29,7 @@
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/clk.h> 30#include <linux/clk.h>
31#include <linux/gpio.h> 31#include <linux/gpio.h>
32#include <linux/slab.h>
32 33
33#include <asm/io.h> 34#include <asm/io.h>
34#include <asm/irq.h> 35#include <asm/irq.h>
@@ -1709,7 +1710,7 @@ static int pxa2xx_spi_resume(struct device *dev)
1709 return 0; 1710 return 0;
1710} 1711}
1711 1712
1712static struct dev_pm_ops pxa2xx_spi_pm_ops = { 1713static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
1713 .suspend = pxa2xx_spi_suspend, 1714 .suspend = pxa2xx_spi_suspend,
1714 .resume = pxa2xx_spi_resume, 1715 .resume = pxa2xx_spi_resume,
1715}; 1716};
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index b76f2468a84a..b3a1f9259b62 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -23,6 +23,7 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/cache.h> 24#include <linux/cache.h>
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/slab.h>
26#include <linux/mod_devicetable.h> 27#include <linux/mod_devicetable.h>
27#include <linux/spi/spi.h> 28#include <linux/spi/spi.h>
28 29
@@ -40,7 +41,7 @@ static void spidev_release(struct device *dev)
40 spi->master->cleanup(spi); 41 spi->master->cleanup(spi);
41 42
42 spi_master_put(spi->master); 43 spi_master_put(spi->master);
43 kfree(dev); 44 kfree(spi);
44} 45}
45 46
46static ssize_t 47static ssize_t
@@ -256,6 +257,7 @@ int spi_add_device(struct spi_device *spi)
256{ 257{
257 static DEFINE_MUTEX(spi_add_lock); 258 static DEFINE_MUTEX(spi_add_lock);
258 struct device *dev = spi->master->dev.parent; 259 struct device *dev = spi->master->dev.parent;
260 struct device *d;
259 int status; 261 int status;
260 262
261 /* Chipselects are numbered 0..max; validate. */ 263 /* Chipselects are numbered 0..max; validate. */
@@ -277,10 +279,11 @@ int spi_add_device(struct spi_device *spi)
277 */ 279 */
278 mutex_lock(&spi_add_lock); 280 mutex_lock(&spi_add_lock);
279 281
280 if (bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev)) 282 d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev));
281 != NULL) { 283 if (d != NULL) {
282 dev_err(dev, "chipselect %d already in use\n", 284 dev_err(dev, "chipselect %d already in use\n",
283 spi->chip_select); 285 spi->chip_select);
286 put_device(d);
284 status = -EBUSY; 287 status = -EBUSY;
285 goto done; 288 goto done;
286 } 289 }
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index 73e24ef5a2f9..10a6dc3d37ac 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/slab.h>
15#include <linux/io.h> 16#include <linux/io.h>
16#include <linux/ioport.h> 17#include <linux/ioport.h>
17#include <linux/irq.h> 18#include <linux/irq.h>
@@ -1294,7 +1295,7 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
1294 goto out_error_get_res; 1295 goto out_error_get_res;
1295 } 1296 }
1296 1297
1297 drv_data->regs_base = ioremap(res->start, (res->end - res->start + 1)); 1298 drv_data->regs_base = ioremap(res->start, resource_size(res));
1298 if (drv_data->regs_base == NULL) { 1299 if (drv_data->regs_base == NULL) {
1299 dev_err(dev, "Cannot map IO\n"); 1300 dev_err(dev, "Cannot map IO\n");
1300 status = -ENXIO; 1301 status = -ENXIO;
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
index f1db395dd889..5265330a528f 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi_bitbang.c
@@ -23,6 +23,7 @@
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/slab.h>
26 27
27#include <linux/spi/spi.h> 28#include <linux/spi/spi.h>
28#include <linux/spi/spi_bitbang.h> 29#include <linux/spi/spi_bitbang.h>
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 89c22efedfb0..7972e9077473 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -30,6 +30,7 @@
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/slab.h>
33#include <linux/spi/spi.h> 34#include <linux/spi/spi.h>
34#include <linux/spi/spi_bitbang.h> 35#include <linux/spi/spi_bitbang.h>
35#include <linux/types.h> 36#include <linux/types.h>
@@ -44,6 +45,9 @@
44#define MXC_CSPIINT 0x0c 45#define MXC_CSPIINT 0x0c
45#define MXC_RESET 0x1c 46#define MXC_RESET 0x1c
46 47
48#define MX3_CSPISTAT 0x14
49#define MX3_CSPISTAT_RR (1 << 3)
50
47/* generic defines to abstract from the different register layouts */ 51/* generic defines to abstract from the different register layouts */
48#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ 52#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
49#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ 53#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
@@ -205,7 +209,7 @@ static int mx31_config(struct spi_imx_data *spi_imx,
205 209
206 if (cpu_is_mx31()) 210 if (cpu_is_mx31())
207 reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT; 211 reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT;
208 else if (cpu_is_mx35()) { 212 else if (cpu_is_mx25() || cpu_is_mx35()) {
209 reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT; 213 reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT;
210 reg |= MX31_CSPICTRL_SSCTL; 214 reg |= MX31_CSPICTRL_SSCTL;
211 } 215 }
@@ -219,7 +223,7 @@ static int mx31_config(struct spi_imx_data *spi_imx,
219 if (config->cs < 0) { 223 if (config->cs < 0) {
220 if (cpu_is_mx31()) 224 if (cpu_is_mx31())
221 reg |= (config->cs + 32) << MX31_CSPICTRL_CS_SHIFT; 225 reg |= (config->cs + 32) << MX31_CSPICTRL_CS_SHIFT;
222 else if (cpu_is_mx35()) 226 else if (cpu_is_mx25() || cpu_is_mx35())
223 reg |= (config->cs + 32) << MX35_CSPICTRL_CS_SHIFT; 227 reg |= (config->cs + 32) << MX35_CSPICTRL_CS_SHIFT;
224 } 228 }
225 229
@@ -466,7 +470,7 @@ static int spi_imx_setup(struct spi_device *spi)
466 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 470 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
467 int gpio = spi_imx->chipselect[spi->chip_select]; 471 int gpio = spi_imx->chipselect[spi->chip_select];
468 472
469 pr_debug("%s: mode %d, %u bpw, %d hz\n", __func__, 473 dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
470 spi->mode, spi->bits_per_word, spi->max_speed_hz); 474 spi->mode, spi->bits_per_word, spi->max_speed_hz);
471 475
472 if (gpio >= 0) 476 if (gpio >= 0)
@@ -481,7 +485,7 @@ static void spi_imx_cleanup(struct spi_device *spi)
481{ 485{
482} 486}
483 487
484static int __init spi_imx_probe(struct platform_device *pdev) 488static int __devinit spi_imx_probe(struct platform_device *pdev)
485{ 489{
486 struct spi_imx_master *mxc_platform_info; 490 struct spi_imx_master *mxc_platform_info;
487 struct spi_master *master; 491 struct spi_master *master;
@@ -489,7 +493,7 @@ static int __init spi_imx_probe(struct platform_device *pdev)
489 struct resource *res; 493 struct resource *res;
490 int i, ret; 494 int i, ret;
491 495
492 mxc_platform_info = (struct spi_imx_master *)pdev->dev.platform_data; 496 mxc_platform_info = dev_get_platdata(&pdev->dev);
493 if (!mxc_platform_info) { 497 if (!mxc_platform_info) {
494 dev_err(&pdev->dev, "can't get the platform data\n"); 498 dev_err(&pdev->dev, "can't get the platform data\n");
495 return -EINVAL; 499 return -EINVAL;
@@ -513,11 +517,12 @@ static int __init spi_imx_probe(struct platform_device *pdev)
513 continue; 517 continue;
514 ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME); 518 ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
515 if (ret) { 519 if (ret) {
516 i--; 520 while (i > 0) {
517 while (i > 0) 521 i--;
518 if (spi_imx->chipselect[i] >= 0) 522 if (spi_imx->chipselect[i] >= 0)
519 gpio_free(spi_imx->chipselect[i--]); 523 gpio_free(spi_imx->chipselect[i]);
520 dev_err(&pdev->dev, "can't get cs gpios"); 524 }
525 dev_err(&pdev->dev, "can't get cs gpios\n");
521 goto out_master_put; 526 goto out_master_put;
522 } 527 }
523 } 528 }
@@ -551,7 +556,7 @@ static int __init spi_imx_probe(struct platform_device *pdev)
551 } 556 }
552 557
553 spi_imx->irq = platform_get_irq(pdev, 0); 558 spi_imx->irq = platform_get_irq(pdev, 0);
554 if (!spi_imx->irq) { 559 if (spi_imx->irq <= 0) {
555 ret = -EINVAL; 560 ret = -EINVAL;
556 goto out_iounmap; 561 goto out_iounmap;
557 } 562 }
@@ -562,7 +567,7 @@ static int __init spi_imx_probe(struct platform_device *pdev)
562 goto out_iounmap; 567 goto out_iounmap;
563 } 568 }
564 569
565 if (cpu_is_mx31() || cpu_is_mx35()) { 570 if (cpu_is_mx25() || cpu_is_mx31() || cpu_is_mx35()) {
566 spi_imx->intctrl = mx31_intctrl; 571 spi_imx->intctrl = mx31_intctrl;
567 spi_imx->config = mx31_config; 572 spi_imx->config = mx31_config;
568 spi_imx->trigger = mx31_trigger; 573 spi_imx->trigger = mx31_trigger;
@@ -590,9 +595,14 @@ static int __init spi_imx_probe(struct platform_device *pdev)
590 clk_enable(spi_imx->clk); 595 clk_enable(spi_imx->clk);
591 spi_imx->spi_clk = clk_get_rate(spi_imx->clk); 596 spi_imx->spi_clk = clk_get_rate(spi_imx->clk);
592 597
593 if (!cpu_is_mx31() || !cpu_is_mx35()) 598 if (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27())
594 writel(1, spi_imx->base + MXC_RESET); 599 writel(1, spi_imx->base + MXC_RESET);
595 600
601 /* drain receive buffer */
602 if (cpu_is_mx25() || cpu_is_mx31() || cpu_is_mx35())
603 while (readl(spi_imx->base + MX3_CSPISTAT) & MX3_CSPISTAT_RR)
604 readl(spi_imx->base + MXC_CSPIRXDATA);
605
596 spi_imx->intctrl(spi_imx, 0); 606 spi_imx->intctrl(spi_imx, 0);
597 607
598 ret = spi_bitbang_start(&spi_imx->bitbang); 608 ret = spi_bitbang_start(&spi_imx->bitbang);
@@ -625,7 +635,7 @@ out_master_put:
625 return ret; 635 return ret;
626} 636}
627 637
628static int __exit spi_imx_remove(struct platform_device *pdev) 638static int __devexit spi_imx_remove(struct platform_device *pdev)
629{ 639{
630 struct spi_master *master = platform_get_drvdata(pdev); 640 struct spi_master *master = platform_get_drvdata(pdev);
631 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 641 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -659,7 +669,7 @@ static struct platform_driver spi_imx_driver = {
659 .owner = THIS_MODULE, 669 .owner = THIS_MODULE,
660 }, 670 },
661 .probe = spi_imx_probe, 671 .probe = spi_imx_probe,
662 .remove = __exit_p(spi_imx_remove), 672 .remove = __devexit_p(spi_imx_remove),
663}; 673};
664 674
665static int __init spi_imx_init(void) 675static int __init spi_imx_init(void)
diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c
index 0fd0ec4d3a7d..14d052316502 100644
--- a/drivers/spi/spi_mpc8xxx.c
+++ b/drivers/spi/spi_mpc8xxx.c
@@ -5,6 +5,10 @@
5 * 5 *
6 * Copyright (C) 2006 Polycom, Inc. 6 * Copyright (C) 2006 Polycom, Inc.
7 * 7 *
8 * CPM SPI and QE buffer descriptors mode support:
9 * Copyright (c) 2009 MontaVista Software, Inc.
10 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
11 *
8 * This program is free software; you can redistribute it and/or modify it 12 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the 13 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your 14 * Free Software Foundation; either version 2 of the License, or (at your
@@ -27,15 +31,30 @@
27#include <linux/spi/spi_bitbang.h> 31#include <linux/spi/spi_bitbang.h>
28#include <linux/platform_device.h> 32#include <linux/platform_device.h>
29#include <linux/fsl_devices.h> 33#include <linux/fsl_devices.h>
34#include <linux/dma-mapping.h>
35#include <linux/mm.h>
36#include <linux/mutex.h>
30#include <linux/of.h> 37#include <linux/of.h>
31#include <linux/of_platform.h> 38#include <linux/of_platform.h>
32#include <linux/gpio.h> 39#include <linux/gpio.h>
33#include <linux/of_gpio.h> 40#include <linux/of_gpio.h>
34#include <linux/of_spi.h> 41#include <linux/of_spi.h>
42#include <linux/slab.h>
35 43
36#include <sysdev/fsl_soc.h> 44#include <sysdev/fsl_soc.h>
45#include <asm/cpm.h>
46#include <asm/qe.h>
37#include <asm/irq.h> 47#include <asm/irq.h>
38 48
49/* CPM1 and CPM2 are mutually exclusive. */
50#ifdef CONFIG_CPM1
51#include <asm/cpm1.h>
52#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0)
53#else
54#include <asm/cpm2.h>
55#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0)
56#endif
57
39/* SPI Controller registers */ 58/* SPI Controller registers */
40struct mpc8xxx_spi_reg { 59struct mpc8xxx_spi_reg {
41 u8 res1[0x20]; 60 u8 res1[0x20];
@@ -47,6 +66,28 @@ struct mpc8xxx_spi_reg {
47 __be32 receive; 66 __be32 receive;
48}; 67};
49 68
69/* SPI Parameter RAM */
70struct spi_pram {
71 __be16 rbase; /* Rx Buffer descriptor base address */
72 __be16 tbase; /* Tx Buffer descriptor base address */
73 u8 rfcr; /* Rx function code */
74 u8 tfcr; /* Tx function code */
75 __be16 mrblr; /* Max receive buffer length */
76 __be32 rstate; /* Internal */
77 __be32 rdp; /* Internal */
78 __be16 rbptr; /* Internal */
79 __be16 rbc; /* Internal */
80 __be32 rxtmp; /* Internal */
81 __be32 tstate; /* Internal */
82 __be32 tdp; /* Internal */
83 __be16 tbptr; /* Internal */
84 __be16 tbc; /* Internal */
85 __be32 txtmp; /* Internal */
86 __be32 res; /* Tx temp. */
87 __be16 rpbase; /* Relocation pointer (CPM1 only) */
88 __be16 res1; /* Reserved */
89};
90
50/* SPI Controller mode register definitions */ 91/* SPI Controller mode register definitions */
51#define SPMODE_LOOP (1 << 30) 92#define SPMODE_LOOP (1 << 30)
52#define SPMODE_CI_INACTIVEHIGH (1 << 29) 93#define SPMODE_CI_INACTIVEHIGH (1 << 29)
@@ -75,14 +116,40 @@ struct mpc8xxx_spi_reg {
75#define SPIM_NE 0x00000200 /* Not empty */ 116#define SPIM_NE 0x00000200 /* Not empty */
76#define SPIM_NF 0x00000100 /* Not full */ 117#define SPIM_NF 0x00000100 /* Not full */
77 118
119#define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */
120#define SPIE_RXB 0x00000100 /* Last char is written to rx buf */
121
122/* SPCOM register values */
123#define SPCOM_STR (1 << 23) /* Start transmit */
124
125#define SPI_PRAM_SIZE 0x100
126#define SPI_MRBLR ((unsigned int)PAGE_SIZE)
127
78/* SPI Controller driver's private data. */ 128/* SPI Controller driver's private data. */
79struct mpc8xxx_spi { 129struct mpc8xxx_spi {
130 struct device *dev;
80 struct mpc8xxx_spi_reg __iomem *base; 131 struct mpc8xxx_spi_reg __iomem *base;
81 132
82 /* rx & tx bufs from the spi_transfer */ 133 /* rx & tx bufs from the spi_transfer */
83 const void *tx; 134 const void *tx;
84 void *rx; 135 void *rx;
85 136
137 int subblock;
138 struct spi_pram __iomem *pram;
139 struct cpm_buf_desc __iomem *tx_bd;
140 struct cpm_buf_desc __iomem *rx_bd;
141
142 struct spi_transfer *xfer_in_progress;
143
144 /* dma addresses for CPM transfers */
145 dma_addr_t tx_dma;
146 dma_addr_t rx_dma;
147 bool map_tx_dma;
148 bool map_rx_dma;
149
150 dma_addr_t dma_dummy_tx;
151 dma_addr_t dma_dummy_rx;
152
86 /* functions to deal with different sized buffers */ 153 /* functions to deal with different sized buffers */
87 void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); 154 void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
88 u32(*get_tx) (struct mpc8xxx_spi *); 155 u32(*get_tx) (struct mpc8xxx_spi *);
@@ -96,7 +163,7 @@ struct mpc8xxx_spi {
96 u32 rx_shift; /* RX data reg shift when in qe mode */ 163 u32 rx_shift; /* RX data reg shift when in qe mode */
97 u32 tx_shift; /* TX data reg shift when in qe mode */ 164 u32 tx_shift; /* TX data reg shift when in qe mode */
98 165
99 bool qe_mode; 166 unsigned int flags;
100 167
101 struct workqueue_struct *workqueue; 168 struct workqueue_struct *workqueue;
102 struct work_struct work; 169 struct work_struct work;
@@ -107,6 +174,10 @@ struct mpc8xxx_spi {
107 struct completion done; 174 struct completion done;
108}; 175};
109 176
177static void *mpc8xxx_dummy_rx;
178static DEFINE_MUTEX(mpc8xxx_dummy_rx_lock);
179static int mpc8xxx_dummy_rx_refcnt;
180
110struct spi_mpc8xxx_cs { 181struct spi_mpc8xxx_cs {
111 /* functions to deal with different sized buffers */ 182 /* functions to deal with different sized buffers */
112 void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); 183 void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
@@ -155,6 +226,42 @@ MPC83XX_SPI_TX_BUF(u8)
155MPC83XX_SPI_TX_BUF(u16) 226MPC83XX_SPI_TX_BUF(u16)
156MPC83XX_SPI_TX_BUF(u32) 227MPC83XX_SPI_TX_BUF(u32)
157 228
229static void mpc8xxx_spi_change_mode(struct spi_device *spi)
230{
231 struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
232 struct spi_mpc8xxx_cs *cs = spi->controller_state;
233 __be32 __iomem *mode = &mspi->base->mode;
234 unsigned long flags;
235
236 if (cs->hw_mode == mpc8xxx_spi_read_reg(mode))
237 return;
238
239 /* Turn off IRQs locally to minimize time that SPI is disabled. */
240 local_irq_save(flags);
241
242 /* Turn off SPI unit prior changing mode */
243 mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE);
244 mpc8xxx_spi_write_reg(mode, cs->hw_mode);
245
246 /* When in CPM mode, we need to reinit tx and rx. */
247 if (mspi->flags & SPI_CPM_MODE) {
248 if (mspi->flags & SPI_QE) {
249 qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
250 QE_CR_PROTOCOL_UNSPECIFIED, 0);
251 } else {
252 cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
253 if (mspi->flags & SPI_CPM1) {
254 out_be16(&mspi->pram->rbptr,
255 in_be16(&mspi->pram->rbase));
256 out_be16(&mspi->pram->tbptr,
257 in_be16(&mspi->pram->tbase));
258 }
259 }
260 }
261
262 local_irq_restore(flags);
263}
264
158static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value) 265static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value)
159{ 266{
160 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); 267 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
@@ -168,27 +275,13 @@ static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value)
168 } 275 }
169 276
170 if (value == BITBANG_CS_ACTIVE) { 277 if (value == BITBANG_CS_ACTIVE) {
171 u32 regval = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode);
172
173 mpc8xxx_spi->rx_shift = cs->rx_shift; 278 mpc8xxx_spi->rx_shift = cs->rx_shift;
174 mpc8xxx_spi->tx_shift = cs->tx_shift; 279 mpc8xxx_spi->tx_shift = cs->tx_shift;
175 mpc8xxx_spi->get_rx = cs->get_rx; 280 mpc8xxx_spi->get_rx = cs->get_rx;
176 mpc8xxx_spi->get_tx = cs->get_tx; 281 mpc8xxx_spi->get_tx = cs->get_tx;
177 282
178 if (cs->hw_mode != regval) { 283 mpc8xxx_spi_change_mode(spi);
179 unsigned long flags; 284
180 __be32 __iomem *mode = &mpc8xxx_spi->base->mode;
181
182 regval = cs->hw_mode;
183 /* Turn off IRQs locally to minimize time that
184 * SPI is disabled
185 */
186 local_irq_save(flags);
187 /* Turn off SPI unit prior changing mode */
188 mpc8xxx_spi_write_reg(mode, regval & ~SPMODE_ENABLE);
189 mpc8xxx_spi_write_reg(mode, regval);
190 local_irq_restore(flags);
191 }
192 if (pdata->cs_control) 285 if (pdata->cs_control)
193 pdata->cs_control(spi, pol); 286 pdata->cs_control(spi, pol);
194 } 287 }
@@ -198,7 +291,6 @@ static
198int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) 291int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
199{ 292{
200 struct mpc8xxx_spi *mpc8xxx_spi; 293 struct mpc8xxx_spi *mpc8xxx_spi;
201 u32 regval;
202 u8 bits_per_word, pm; 294 u8 bits_per_word, pm;
203 u32 hz; 295 u32 hz;
204 struct spi_mpc8xxx_cs *cs = spi->controller_state; 296 struct spi_mpc8xxx_cs *cs = spi->controller_state;
@@ -230,14 +322,14 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
230 if (bits_per_word <= 8) { 322 if (bits_per_word <= 8) {
231 cs->get_rx = mpc8xxx_spi_rx_buf_u8; 323 cs->get_rx = mpc8xxx_spi_rx_buf_u8;
232 cs->get_tx = mpc8xxx_spi_tx_buf_u8; 324 cs->get_tx = mpc8xxx_spi_tx_buf_u8;
233 if (mpc8xxx_spi->qe_mode) { 325 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
234 cs->rx_shift = 16; 326 cs->rx_shift = 16;
235 cs->tx_shift = 24; 327 cs->tx_shift = 24;
236 } 328 }
237 } else if (bits_per_word <= 16) { 329 } else if (bits_per_word <= 16) {
238 cs->get_rx = mpc8xxx_spi_rx_buf_u16; 330 cs->get_rx = mpc8xxx_spi_rx_buf_u16;
239 cs->get_tx = mpc8xxx_spi_tx_buf_u16; 331 cs->get_tx = mpc8xxx_spi_tx_buf_u16;
240 if (mpc8xxx_spi->qe_mode) { 332 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
241 cs->rx_shift = 16; 333 cs->rx_shift = 16;
242 cs->tx_shift = 16; 334 cs->tx_shift = 16;
243 } 335 }
@@ -247,7 +339,8 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
247 } else 339 } else
248 return -EINVAL; 340 return -EINVAL;
249 341
250 if (mpc8xxx_spi->qe_mode && spi->mode & SPI_LSB_FIRST) { 342 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE &&
343 spi->mode & SPI_LSB_FIRST) {
251 cs->tx_shift = 0; 344 cs->tx_shift = 0;
252 if (bits_per_word <= 8) 345 if (bits_per_word <= 8)
253 cs->rx_shift = 8; 346 cs->rx_shift = 8;
@@ -273,7 +366,7 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
273 366
274 if ((mpc8xxx_spi->spibrg / hz) > 64) { 367 if ((mpc8xxx_spi->spibrg / hz) > 64) {
275 cs->hw_mode |= SPMODE_DIV16; 368 cs->hw_mode |= SPMODE_DIV16;
276 pm = mpc8xxx_spi->spibrg / (hz * 64); 369 pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1;
277 370
278 WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " 371 WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. "
279 "Will use %d Hz instead.\n", dev_name(&spi->dev), 372 "Will use %d Hz instead.\n", dev_name(&spi->dev),
@@ -281,42 +374,143 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
281 if (pm > 16) 374 if (pm > 16)
282 pm = 16; 375 pm = 16;
283 } else 376 } else
284 pm = mpc8xxx_spi->spibrg / (hz * 4); 377 pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1;
285 if (pm) 378 if (pm)
286 pm--; 379 pm--;
287 380
288 cs->hw_mode |= SPMODE_PM(pm); 381 cs->hw_mode |= SPMODE_PM(pm);
289 regval = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode); 382
290 if (cs->hw_mode != regval) { 383 mpc8xxx_spi_change_mode(spi);
291 unsigned long flags; 384 return 0;
292 __be32 __iomem *mode = &mpc8xxx_spi->base->mode; 385}
293 386
294 regval = cs->hw_mode; 387static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
295 /* Turn off IRQs locally to minimize time 388{
296 * that SPI is disabled 389 struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
297 */ 390 struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
298 local_irq_save(flags); 391 unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
299 /* Turn off SPI unit prior changing mode */ 392 unsigned int xfer_ofs;
300 mpc8xxx_spi_write_reg(mode, regval & ~SPMODE_ENABLE); 393
301 mpc8xxx_spi_write_reg(mode, regval); 394 xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
302 local_irq_restore(flags); 395
396 out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
397 out_be16(&rx_bd->cbd_datlen, 0);
398 out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
399
400 out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
401 out_be16(&tx_bd->cbd_datlen, xfer_len);
402 out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
403 BD_SC_LAST);
404
405 /* start transfer */
406 mpc8xxx_spi_write_reg(&mspi->base->command, SPCOM_STR);
407}
408
409static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
410 struct spi_transfer *t, bool is_dma_mapped)
411{
412 struct device *dev = mspi->dev;
413
414 if (is_dma_mapped) {
415 mspi->map_tx_dma = 0;
416 mspi->map_rx_dma = 0;
417 } else {
418 mspi->map_tx_dma = 1;
419 mspi->map_rx_dma = 1;
420 }
421
422 if (!t->tx_buf) {
423 mspi->tx_dma = mspi->dma_dummy_tx;
424 mspi->map_tx_dma = 0;
425 }
426
427 if (!t->rx_buf) {
428 mspi->rx_dma = mspi->dma_dummy_rx;
429 mspi->map_rx_dma = 0;
303 } 430 }
431
432 if (mspi->map_tx_dma) {
433 void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
434
435 mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len,
436 DMA_TO_DEVICE);
437 if (dma_mapping_error(dev, mspi->tx_dma)) {
438 dev_err(dev, "unable to map tx dma\n");
439 return -ENOMEM;
440 }
441 } else {
442 mspi->tx_dma = t->tx_dma;
443 }
444
445 if (mspi->map_rx_dma) {
446 mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len,
447 DMA_FROM_DEVICE);
448 if (dma_mapping_error(dev, mspi->rx_dma)) {
449 dev_err(dev, "unable to map rx dma\n");
450 goto err_rx_dma;
451 }
452 } else {
453 mspi->rx_dma = t->rx_dma;
454 }
455
456 /* enable rx ints */
457 mpc8xxx_spi_write_reg(&mspi->base->mask, SPIE_RXB);
458
459 mspi->xfer_in_progress = t;
460 mspi->count = t->len;
461
462 /* start CPM transfers */
463 mpc8xxx_spi_cpm_bufs_start(mspi);
464
304 return 0; 465 return 0;
466
467err_rx_dma:
468 if (mspi->map_tx_dma)
469 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
470 return -ENOMEM;
305} 471}
306 472
307static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t) 473static void mpc8xxx_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
308{ 474{
309 struct mpc8xxx_spi *mpc8xxx_spi; 475 struct device *dev = mspi->dev;
310 u32 word, len, bits_per_word; 476 struct spi_transfer *t = mspi->xfer_in_progress;
477
478 if (mspi->map_tx_dma)
479 dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
480 if (mspi->map_tx_dma)
481 dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
482 mspi->xfer_in_progress = NULL;
483}
311 484
312 mpc8xxx_spi = spi_master_get_devdata(spi->master); 485static int mpc8xxx_spi_cpu_bufs(struct mpc8xxx_spi *mspi,
486 struct spi_transfer *t, unsigned int len)
487{
488 u32 word;
489
490 mspi->count = len;
491
492 /* enable rx ints */
493 mpc8xxx_spi_write_reg(&mspi->base->mask, SPIM_NE);
494
495 /* transmit word */
496 word = mspi->get_tx(mspi);
497 mpc8xxx_spi_write_reg(&mspi->base->transmit, word);
498
499 return 0;
500}
501
502static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
503 bool is_dma_mapped)
504{
505 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
506 unsigned int len = t->len;
507 u8 bits_per_word;
508 int ret;
313 509
314 mpc8xxx_spi->tx = t->tx_buf;
315 mpc8xxx_spi->rx = t->rx_buf;
316 bits_per_word = spi->bits_per_word; 510 bits_per_word = spi->bits_per_word;
317 if (t->bits_per_word) 511 if (t->bits_per_word)
318 bits_per_word = t->bits_per_word; 512 bits_per_word = t->bits_per_word;
319 len = t->len; 513
320 if (bits_per_word > 8) { 514 if (bits_per_word > 8) {
321 /* invalid length? */ 515 /* invalid length? */
322 if (len & 1) 516 if (len & 1)
@@ -329,22 +523,27 @@ static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
329 return -EINVAL; 523 return -EINVAL;
330 len /= 2; 524 len /= 2;
331 } 525 }
332 mpc8xxx_spi->count = len;
333 526
334 INIT_COMPLETION(mpc8xxx_spi->done); 527 mpc8xxx_spi->tx = t->tx_buf;
528 mpc8xxx_spi->rx = t->rx_buf;
335 529
336 /* enable rx ints */ 530 INIT_COMPLETION(mpc8xxx_spi->done);
337 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, SPIM_NE);
338 531
339 /* transmit word */ 532 if (mpc8xxx_spi->flags & SPI_CPM_MODE)
340 word = mpc8xxx_spi->get_tx(mpc8xxx_spi); 533 ret = mpc8xxx_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped);
341 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->transmit, word); 534 else
535 ret = mpc8xxx_spi_cpu_bufs(mpc8xxx_spi, t, len);
536 if (ret)
537 return ret;
342 538
343 wait_for_completion(&mpc8xxx_spi->done); 539 wait_for_completion(&mpc8xxx_spi->done);
344 540
345 /* disable rx ints */ 541 /* disable rx ints */
346 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0); 542 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0);
347 543
544 if (mpc8xxx_spi->flags & SPI_CPM_MODE)
545 mpc8xxx_spi_cpm_bufs_complete(mpc8xxx_spi);
546
348 return mpc8xxx_spi->count; 547 return mpc8xxx_spi->count;
349} 548}
350 549
@@ -375,7 +574,7 @@ static void mpc8xxx_spi_do_one_msg(struct spi_message *m)
375 } 574 }
376 cs_change = t->cs_change; 575 cs_change = t->cs_change;
377 if (t->len) 576 if (t->len)
378 status = mpc8xxx_spi_bufs(spi, t); 577 status = mpc8xxx_spi_bufs(spi, t, m->is_dma_mapped);
379 if (status) { 578 if (status) {
380 status = -EMSGSIZE; 579 status = -EMSGSIZE;
381 break; 580 break;
@@ -464,45 +663,80 @@ static int mpc8xxx_spi_setup(struct spi_device *spi)
464 return 0; 663 return 0;
465} 664}
466 665
467static irqreturn_t mpc8xxx_spi_irq(s32 irq, void *context_data) 666static void mpc8xxx_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
468{ 667{
469 struct mpc8xxx_spi *mpc8xxx_spi = context_data; 668 u16 len;
470 u32 event;
471 irqreturn_t ret = IRQ_NONE;
472 669
473 /* Get interrupt events(tx/rx) */ 670 dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
474 event = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->event); 671 in_be16(&mspi->rx_bd->cbd_datlen), mspi->count);
475 672
476 /* We need handle RX first */ 673 len = in_be16(&mspi->rx_bd->cbd_datlen);
477 if (event & SPIE_NE) { 674 if (len > mspi->count) {
478 u32 rx_data = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->receive); 675 WARN_ON(1);
676 len = mspi->count;
677 }
479 678
480 if (mpc8xxx_spi->rx) 679 /* Clear the events */
481 mpc8xxx_spi->get_rx(rx_data, mpc8xxx_spi); 680 mpc8xxx_spi_write_reg(&mspi->base->event, events);
482 681
483 ret = IRQ_HANDLED; 682 mspi->count -= len;
683 if (mspi->count)
684 mpc8xxx_spi_cpm_bufs_start(mspi);
685 else
686 complete(&mspi->done);
687}
688
689static void mpc8xxx_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
690{
691 /* We need handle RX first */
692 if (events & SPIE_NE) {
693 u32 rx_data = mpc8xxx_spi_read_reg(&mspi->base->receive);
694
695 if (mspi->rx)
696 mspi->get_rx(rx_data, mspi);
484 } 697 }
485 698
486 if ((event & SPIE_NF) == 0) 699 if ((events & SPIE_NF) == 0)
487 /* spin until TX is done */ 700 /* spin until TX is done */
488 while (((event = 701 while (((events =
489 mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->event)) & 702 mpc8xxx_spi_read_reg(&mspi->base->event)) &
490 SPIE_NF) == 0) 703 SPIE_NF) == 0)
491 cpu_relax(); 704 cpu_relax();
492 705
493 mpc8xxx_spi->count -= 1; 706 /* Clear the events */
494 if (mpc8xxx_spi->count) { 707 mpc8xxx_spi_write_reg(&mspi->base->event, events);
495 u32 word = mpc8xxx_spi->get_tx(mpc8xxx_spi); 708
496 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->transmit, word); 709 mspi->count -= 1;
710 if (mspi->count) {
711 u32 word = mspi->get_tx(mspi);
712
713 mpc8xxx_spi_write_reg(&mspi->base->transmit, word);
497 } else { 714 } else {
498 complete(&mpc8xxx_spi->done); 715 complete(&mspi->done);
499 } 716 }
717}
500 718
501 /* Clear the events */ 719static irqreturn_t mpc8xxx_spi_irq(s32 irq, void *context_data)
502 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->event, event); 720{
721 struct mpc8xxx_spi *mspi = context_data;
722 irqreturn_t ret = IRQ_NONE;
723 u32 events;
724
725 /* Get interrupt events(tx/rx) */
726 events = mpc8xxx_spi_read_reg(&mspi->base->event);
727 if (events)
728 ret = IRQ_HANDLED;
729
730 dev_dbg(mspi->dev, "%s: events %x\n", __func__, events);
731
732 if (mspi->flags & SPI_CPM_MODE)
733 mpc8xxx_spi_cpm_irq(mspi, events);
734 else
735 mpc8xxx_spi_cpu_irq(mspi, events);
503 736
504 return ret; 737 return ret;
505} 738}
739
506static int mpc8xxx_spi_transfer(struct spi_device *spi, 740static int mpc8xxx_spi_transfer(struct spi_device *spi,
507 struct spi_message *m) 741 struct spi_message *m)
508{ 742{
@@ -526,6 +760,215 @@ static void mpc8xxx_spi_cleanup(struct spi_device *spi)
526 kfree(spi->controller_state); 760 kfree(spi->controller_state);
527} 761}
528 762
763static void *mpc8xxx_spi_alloc_dummy_rx(void)
764{
765 mutex_lock(&mpc8xxx_dummy_rx_lock);
766
767 if (!mpc8xxx_dummy_rx)
768 mpc8xxx_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
769 if (mpc8xxx_dummy_rx)
770 mpc8xxx_dummy_rx_refcnt++;
771
772 mutex_unlock(&mpc8xxx_dummy_rx_lock);
773
774 return mpc8xxx_dummy_rx;
775}
776
777static void mpc8xxx_spi_free_dummy_rx(void)
778{
779 mutex_lock(&mpc8xxx_dummy_rx_lock);
780
781 switch (mpc8xxx_dummy_rx_refcnt) {
782 case 0:
783 WARN_ON(1);
784 break;
785 case 1:
786 kfree(mpc8xxx_dummy_rx);
787 mpc8xxx_dummy_rx = NULL;
788 /* fall through */
789 default:
790 mpc8xxx_dummy_rx_refcnt--;
791 break;
792 }
793
794 mutex_unlock(&mpc8xxx_dummy_rx_lock);
795}
796
797static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
798{
799 struct device *dev = mspi->dev;
800 struct device_node *np = dev_archdata_get_node(&dev->archdata);
801 const u32 *iprop;
802 int size;
803 unsigned long spi_base_ofs;
804 unsigned long pram_ofs = -ENOMEM;
805
806 /* Can't use of_address_to_resource(), QE muram isn't at 0. */
807 iprop = of_get_property(np, "reg", &size);
808
809 /* QE with a fixed pram location? */
810 if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4)
811 return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE);
812
813 /* QE but with a dynamic pram location? */
814 if (mspi->flags & SPI_QE) {
815 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
816 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock,
817 QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs);
818 return pram_ofs;
819 }
820
821 /* CPM1 and CPM2 pram must be at a fixed addr. */
822 if (!iprop || size != sizeof(*iprop) * 4)
823 return -ENOMEM;
824
825 spi_base_ofs = cpm_muram_alloc_fixed(iprop[2], 2);
826 if (IS_ERR_VALUE(spi_base_ofs))
827 return -ENOMEM;
828
829 if (mspi->flags & SPI_CPM2) {
830 pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
831 if (!IS_ERR_VALUE(pram_ofs)) {
832 u16 __iomem *spi_base = cpm_muram_addr(spi_base_ofs);
833
834 out_be16(spi_base, pram_ofs);
835 }
836 } else {
837 struct spi_pram __iomem *pram = cpm_muram_addr(spi_base_ofs);
838 u16 rpbase = in_be16(&pram->rpbase);
839
840 /* Microcode relocation patch applied? */
841 if (rpbase)
842 pram_ofs = rpbase;
843 else
844 return spi_base_ofs;
845 }
846
847 cpm_muram_free(spi_base_ofs);
848 return pram_ofs;
849}
850
851static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi)
852{
853 struct device *dev = mspi->dev;
854 struct device_node *np = dev_archdata_get_node(&dev->archdata);
855 const u32 *iprop;
856 int size;
857 unsigned long pram_ofs;
858 unsigned long bds_ofs;
859
860 if (!(mspi->flags & SPI_CPM_MODE))
861 return 0;
862
863 if (!mpc8xxx_spi_alloc_dummy_rx())
864 return -ENOMEM;
865
866 if (mspi->flags & SPI_QE) {
867 iprop = of_get_property(np, "cell-index", &size);
868 if (iprop && size == sizeof(*iprop))
869 mspi->subblock = *iprop;
870
871 switch (mspi->subblock) {
872 default:
873 dev_warn(dev, "cell-index unspecified, assuming SPI1");
874 /* fall through */
875 case 0:
876 mspi->subblock = QE_CR_SUBBLOCK_SPI1;
877 break;
878 case 1:
879 mspi->subblock = QE_CR_SUBBLOCK_SPI2;
880 break;
881 }
882 }
883
884 pram_ofs = mpc8xxx_spi_cpm_get_pram(mspi);
885 if (IS_ERR_VALUE(pram_ofs)) {
886 dev_err(dev, "can't allocate spi parameter ram\n");
887 goto err_pram;
888 }
889
890 bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) +
891 sizeof(*mspi->rx_bd), 8);
892 if (IS_ERR_VALUE(bds_ofs)) {
893 dev_err(dev, "can't allocate bds\n");
894 goto err_bds;
895 }
896
897 mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE,
898 DMA_TO_DEVICE);
899 if (dma_mapping_error(dev, mspi->dma_dummy_tx)) {
900 dev_err(dev, "unable to map dummy tx buffer\n");
901 goto err_dummy_tx;
902 }
903
904 mspi->dma_dummy_rx = dma_map_single(dev, mpc8xxx_dummy_rx, SPI_MRBLR,
905 DMA_FROM_DEVICE);
906 if (dma_mapping_error(dev, mspi->dma_dummy_rx)) {
907 dev_err(dev, "unable to map dummy rx buffer\n");
908 goto err_dummy_rx;
909 }
910
911 mspi->pram = cpm_muram_addr(pram_ofs);
912
913 mspi->tx_bd = cpm_muram_addr(bds_ofs);
914 mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
915
916 /* Initialize parameter ram. */
917 out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd));
918 out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd));
919 out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL);
920 out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL);
921 out_be16(&mspi->pram->mrblr, SPI_MRBLR);
922 out_be32(&mspi->pram->rstate, 0);
923 out_be32(&mspi->pram->rdp, 0);
924 out_be16(&mspi->pram->rbptr, 0);
925 out_be16(&mspi->pram->rbc, 0);
926 out_be32(&mspi->pram->rxtmp, 0);
927 out_be32(&mspi->pram->tstate, 0);
928 out_be32(&mspi->pram->tdp, 0);
929 out_be16(&mspi->pram->tbptr, 0);
930 out_be16(&mspi->pram->tbc, 0);
931 out_be32(&mspi->pram->txtmp, 0);
932
933 return 0;
934
935err_dummy_rx:
936 dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
937err_dummy_tx:
938 cpm_muram_free(bds_ofs);
939err_bds:
940 cpm_muram_free(pram_ofs);
941err_pram:
942 mpc8xxx_spi_free_dummy_rx();
943 return -ENOMEM;
944}
945
946static void mpc8xxx_spi_cpm_free(struct mpc8xxx_spi *mspi)
947{
948 struct device *dev = mspi->dev;
949
950 dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
951 dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
952 cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
953 cpm_muram_free(cpm_muram_offset(mspi->pram));
954 mpc8xxx_spi_free_dummy_rx();
955}
956
957static const char *mpc8xxx_spi_strmode(unsigned int flags)
958{
959 if (flags & SPI_QE_CPU_MODE) {
960 return "QE CPU";
961 } else if (flags & SPI_CPM_MODE) {
962 if (flags & SPI_QE)
963 return "QE";
964 else if (flags & SPI_CPM2)
965 return "CPM2";
966 else
967 return "CPM1";
968 }
969 return "CPU";
970}
971
529static struct spi_master * __devinit 972static struct spi_master * __devinit
530mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) 973mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
531{ 974{
@@ -552,24 +995,29 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
552 master->cleanup = mpc8xxx_spi_cleanup; 995 master->cleanup = mpc8xxx_spi_cleanup;
553 996
554 mpc8xxx_spi = spi_master_get_devdata(master); 997 mpc8xxx_spi = spi_master_get_devdata(master);
555 mpc8xxx_spi->qe_mode = pdata->qe_mode; 998 mpc8xxx_spi->dev = dev;
556 mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8; 999 mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8;
557 mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8; 1000 mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8;
1001 mpc8xxx_spi->flags = pdata->flags;
558 mpc8xxx_spi->spibrg = pdata->sysclk; 1002 mpc8xxx_spi->spibrg = pdata->sysclk;
559 1003
1004 ret = mpc8xxx_spi_cpm_init(mpc8xxx_spi);
1005 if (ret)
1006 goto err_cpm_init;
1007
560 mpc8xxx_spi->rx_shift = 0; 1008 mpc8xxx_spi->rx_shift = 0;
561 mpc8xxx_spi->tx_shift = 0; 1009 mpc8xxx_spi->tx_shift = 0;
562 if (mpc8xxx_spi->qe_mode) { 1010 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
563 mpc8xxx_spi->rx_shift = 16; 1011 mpc8xxx_spi->rx_shift = 16;
564 mpc8xxx_spi->tx_shift = 24; 1012 mpc8xxx_spi->tx_shift = 24;
565 } 1013 }
566 1014
567 init_completion(&mpc8xxx_spi->done); 1015 init_completion(&mpc8xxx_spi->done);
568 1016
569 mpc8xxx_spi->base = ioremap(mem->start, mem->end - mem->start + 1); 1017 mpc8xxx_spi->base = ioremap(mem->start, resource_size(mem));
570 if (mpc8xxx_spi->base == NULL) { 1018 if (mpc8xxx_spi->base == NULL) {
571 ret = -ENOMEM; 1019 ret = -ENOMEM;
572 goto put_master; 1020 goto err_ioremap;
573 } 1021 }
574 1022
575 mpc8xxx_spi->irq = irq; 1023 mpc8xxx_spi->irq = irq;
@@ -592,7 +1040,7 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
592 1040
593 /* Enable SPI interface */ 1041 /* Enable SPI interface */
594 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; 1042 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
595 if (pdata->qe_mode) 1043 if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
596 regval |= SPMODE_OP; 1044 regval |= SPMODE_OP;
597 1045
598 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, regval); 1046 mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, regval);
@@ -612,9 +1060,8 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq)
612 if (ret < 0) 1060 if (ret < 0)
613 goto unreg_master; 1061 goto unreg_master;
614 1062
615 printk(KERN_INFO 1063 dev_info(dev, "at 0x%p (irq = %d), %s mode\n", mpc8xxx_spi->base,
616 "%s: MPC8xxx SPI Controller driver at 0x%p (irq = %d)\n", 1064 mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags));
617 dev_name(dev), mpc8xxx_spi->base, mpc8xxx_spi->irq);
618 1065
619 return master; 1066 return master;
620 1067
@@ -624,7 +1071,9 @@ free_irq:
624 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); 1071 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
625unmap_io: 1072unmap_io:
626 iounmap(mpc8xxx_spi->base); 1073 iounmap(mpc8xxx_spi->base);
627put_master: 1074err_ioremap:
1075 mpc8xxx_spi_cpm_free(mpc8xxx_spi);
1076err_cpm_init:
628 spi_master_put(master); 1077 spi_master_put(master);
629err: 1078err:
630 return ERR_PTR(ret); 1079 return ERR_PTR(ret);
@@ -644,6 +1093,7 @@ static int __devexit mpc8xxx_spi_remove(struct device *dev)
644 1093
645 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); 1094 free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
646 iounmap(mpc8xxx_spi->base); 1095 iounmap(mpc8xxx_spi->base);
1096 mpc8xxx_spi_cpm_free(mpc8xxx_spi);
647 1097
648 return 0; 1098 return 0;
649} 1099}
@@ -709,6 +1159,7 @@ static int of_mpc8xxx_spi_get_chipselects(struct device *dev)
709 gpio = of_get_gpio_flags(np, i, &flags); 1159 gpio = of_get_gpio_flags(np, i, &flags);
710 if (!gpio_is_valid(gpio)) { 1160 if (!gpio_is_valid(gpio)) {
711 dev_err(dev, "invalid gpio #%d: %d\n", i, gpio); 1161 dev_err(dev, "invalid gpio #%d: %d\n", i, gpio);
1162 ret = gpio;
712 goto err_loop; 1163 goto err_loop;
713 } 1164 }
714 1165
@@ -804,7 +1255,13 @@ static int __devinit of_mpc8xxx_spi_probe(struct of_device *ofdev,
804 1255
805 prop = of_get_property(np, "mode", NULL); 1256 prop = of_get_property(np, "mode", NULL);
806 if (prop && !strcmp(prop, "cpu-qe")) 1257 if (prop && !strcmp(prop, "cpu-qe"))
807 pdata->qe_mode = 1; 1258 pdata->flags = SPI_QE_CPU_MODE;
1259 else if (prop && !strcmp(prop, "qe"))
1260 pdata->flags = SPI_CPM_MODE | SPI_QE;
1261 else if (of_device_is_compatible(np, "fsl,cpm2-spi"))
1262 pdata->flags = SPI_CPM_MODE | SPI_CPM2;
1263 else if (of_device_is_compatible(np, "fsl,cpm1-spi"))
1264 pdata->flags = SPI_CPM_MODE | SPI_CPM1;
808 1265
809 ret = of_mpc8xxx_spi_get_chipselects(dev); 1266 ret = of_mpc8xxx_spi_get_chipselects(dev);
810 if (ret) 1267 if (ret)
@@ -872,7 +1329,7 @@ static struct of_platform_driver of_mpc8xxx_spi_driver = {
872static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev) 1329static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev)
873{ 1330{
874 struct resource *mem; 1331 struct resource *mem;
875 unsigned int irq; 1332 int irq;
876 struct spi_master *master; 1333 struct spi_master *master;
877 1334
878 if (!pdev->dev.platform_data) 1335 if (!pdev->dev.platform_data)
@@ -883,7 +1340,7 @@ static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev)
883 return -EINVAL; 1340 return -EINVAL;
884 1341
885 irq = platform_get_irq(pdev, 0); 1342 irq = platform_get_irq(pdev, 0);
886 if (!irq) 1343 if (irq <= 0)
887 return -EINVAL; 1344 return -EINVAL;
888 1345
889 master = mpc8xxx_spi_probe(&pdev->dev, mem, irq); 1346 master = mpc8xxx_spi_probe(&pdev->dev, mem, irq);
@@ -900,7 +1357,7 @@ static int __devexit plat_mpc8xxx_spi_remove(struct platform_device *pdev)
900MODULE_ALIAS("platform:mpc8xxx_spi"); 1357MODULE_ALIAS("platform:mpc8xxx_spi");
901static struct platform_driver mpc8xxx_spi_driver = { 1358static struct platform_driver mpc8xxx_spi_driver = {
902 .probe = plat_mpc8xxx_spi_probe, 1359 .probe = plat_mpc8xxx_spi_probe,
903 .remove = __exit_p(plat_mpc8xxx_spi_remove), 1360 .remove = __devexit_p(plat_mpc8xxx_spi_remove),
904 .driver = { 1361 .driver = {
905 .name = "mpc8xxx_spi", 1362 .name = "mpc8xxx_spi",
906 .owner = THIS_MODULE, 1363 .owner = THIS_MODULE,
diff --git a/drivers/spi/spi_nuc900.c b/drivers/spi/spi_nuc900.c
new file mode 100644
index 000000000000..dff63be0d0a8
--- /dev/null
+++ b/drivers/spi/spi_nuc900.c
@@ -0,0 +1,505 @@
1/* linux/drivers/spi/spi_nuc900.c
2 *
3 * Copyright (c) 2009 Nuvoton technology.
4 * Wan ZongShun <mcuos.com@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10*/
11
12#include <linux/init.h>
13#include <linux/spinlock.h>
14#include <linux/workqueue.h>
15#include <linux/interrupt.h>
16#include <linux/delay.h>
17#include <linux/errno.h>
18#include <linux/err.h>
19#include <linux/clk.h>
20#include <linux/device.h>
21#include <linux/platform_device.h>
22#include <linux/gpio.h>
23#include <linux/io.h>
24#include <linux/slab.h>
25
26#include <linux/spi/spi.h>
27#include <linux/spi/spi_bitbang.h>
28
29#include <mach/nuc900_spi.h>
30
31/* usi registers offset */
32#define USI_CNT 0x00
33#define USI_DIV 0x04
34#define USI_SSR 0x08
35#define USI_RX0 0x10
36#define USI_TX0 0x10
37
38/* usi register bit */
39#define ENINT (0x01 << 17)
40#define ENFLG (0x01 << 16)
41#define TXNUM (0x03 << 8)
42#define TXNEG (0x01 << 2)
43#define RXNEG (0x01 << 1)
44#define LSB (0x01 << 10)
45#define SELECTLEV (0x01 << 2)
46#define SELECTPOL (0x01 << 31)
47#define SELECTSLAVE 0x01
48#define GOBUSY 0x01
49
50struct nuc900_spi {
51 struct spi_bitbang bitbang;
52 struct completion done;
53 void __iomem *regs;
54 int irq;
55 int len;
56 int count;
57 const unsigned char *tx;
58 unsigned char *rx;
59 struct clk *clk;
60 struct resource *ioarea;
61 struct spi_master *master;
62 struct spi_device *curdev;
63 struct device *dev;
64 struct nuc900_spi_info *pdata;
65 spinlock_t lock;
66 struct resource *res;
67};
68
69static inline struct nuc900_spi *to_hw(struct spi_device *sdev)
70{
71 return spi_master_get_devdata(sdev->master);
72}
73
74static void nuc900_slave_select(struct spi_device *spi, unsigned int ssr)
75{
76 struct nuc900_spi *hw = to_hw(spi);
77 unsigned int val;
78 unsigned int cs = spi->mode & SPI_CS_HIGH ? 1 : 0;
79 unsigned int cpol = spi->mode & SPI_CPOL ? 1 : 0;
80 unsigned long flags;
81
82 spin_lock_irqsave(&hw->lock, flags);
83
84 val = __raw_readl(hw->regs + USI_SSR);
85
86 if (!cs)
87 val &= ~SELECTLEV;
88 else
89 val |= SELECTLEV;
90
91 if (!ssr)
92 val &= ~SELECTSLAVE;
93 else
94 val |= SELECTSLAVE;
95
96 __raw_writel(val, hw->regs + USI_SSR);
97
98 val = __raw_readl(hw->regs + USI_CNT);
99
100 if (!cpol)
101 val &= ~SELECTPOL;
102 else
103 val |= SELECTPOL;
104
105 __raw_writel(val, hw->regs + USI_CNT);
106
107 spin_unlock_irqrestore(&hw->lock, flags);
108}
109
110static void nuc900_spi_chipsel(struct spi_device *spi, int value)
111{
112 switch (value) {
113 case BITBANG_CS_INACTIVE:
114 nuc900_slave_select(spi, 0);
115 break;
116
117 case BITBANG_CS_ACTIVE:
118 nuc900_slave_select(spi, 1);
119 break;
120 }
121}
122
123static void nuc900_spi_setup_txnum(struct nuc900_spi *hw,
124 unsigned int txnum)
125{
126 unsigned int val;
127 unsigned long flags;
128
129 spin_lock_irqsave(&hw->lock, flags);
130
131 val = __raw_readl(hw->regs + USI_CNT);
132
133 if (!txnum)
134 val &= ~TXNUM;
135 else
136 val |= txnum << 0x08;
137
138 __raw_writel(val, hw->regs + USI_CNT);
139
140 spin_unlock_irqrestore(&hw->lock, flags);
141
142}
143
144static void nuc900_spi_setup_txbitlen(struct nuc900_spi *hw,
145 unsigned int txbitlen)
146{
147 unsigned int val;
148 unsigned long flags;
149
150 spin_lock_irqsave(&hw->lock, flags);
151
152 val = __raw_readl(hw->regs + USI_CNT);
153
154 val |= (txbitlen << 0x03);
155
156 __raw_writel(val, hw->regs + USI_CNT);
157
158 spin_unlock_irqrestore(&hw->lock, flags);
159}
160
161static void nuc900_spi_gobusy(struct nuc900_spi *hw)
162{
163 unsigned int val;
164 unsigned long flags;
165
166 spin_lock_irqsave(&hw->lock, flags);
167
168 val = __raw_readl(hw->regs + USI_CNT);
169
170 val |= GOBUSY;
171
172 __raw_writel(val, hw->regs + USI_CNT);
173
174 spin_unlock_irqrestore(&hw->lock, flags);
175}
176
177static int nuc900_spi_setupxfer(struct spi_device *spi,
178 struct spi_transfer *t)
179{
180 return 0;
181}
182
183static int nuc900_spi_setup(struct spi_device *spi)
184{
185 return 0;
186}
187
188static inline unsigned int hw_txbyte(struct nuc900_spi *hw, int count)
189{
190 return hw->tx ? hw->tx[count] : 0;
191}
192
193static int nuc900_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
194{
195 struct nuc900_spi *hw = to_hw(spi);
196
197 hw->tx = t->tx_buf;
198 hw->rx = t->rx_buf;
199 hw->len = t->len;
200 hw->count = 0;
201
202 __raw_writel(hw_txbyte(hw, 0x0), hw->regs + USI_TX0);
203
204 nuc900_spi_gobusy(hw);
205
206 wait_for_completion(&hw->done);
207
208 return hw->count;
209}
210
211static irqreturn_t nuc900_spi_irq(int irq, void *dev)
212{
213 struct nuc900_spi *hw = dev;
214 unsigned int status;
215 unsigned int count = hw->count;
216
217 status = __raw_readl(hw->regs + USI_CNT);
218 __raw_writel(status, hw->regs + USI_CNT);
219
220 if (status & ENFLG) {
221 hw->count++;
222
223 if (hw->rx)
224 hw->rx[count] = __raw_readl(hw->regs + USI_RX0);
225 count++;
226
227 if (count < hw->len) {
228 __raw_writel(hw_txbyte(hw, count), hw->regs + USI_TX0);
229 nuc900_spi_gobusy(hw);
230 } else {
231 complete(&hw->done);
232 }
233
234 return IRQ_HANDLED;
235 }
236
237 complete(&hw->done);
238 return IRQ_HANDLED;
239}
240
241static void nuc900_tx_edge(struct nuc900_spi *hw, unsigned int edge)
242{
243 unsigned int val;
244 unsigned long flags;
245
246 spin_lock_irqsave(&hw->lock, flags);
247
248 val = __raw_readl(hw->regs + USI_CNT);
249
250 if (edge)
251 val |= TXNEG;
252 else
253 val &= ~TXNEG;
254 __raw_writel(val, hw->regs + USI_CNT);
255
256 spin_unlock_irqrestore(&hw->lock, flags);
257}
258
259static void nuc900_rx_edge(struct nuc900_spi *hw, unsigned int edge)
260{
261 unsigned int val;
262 unsigned long flags;
263
264 spin_lock_irqsave(&hw->lock, flags);
265
266 val = __raw_readl(hw->regs + USI_CNT);
267
268 if (edge)
269 val |= RXNEG;
270 else
271 val &= ~RXNEG;
272 __raw_writel(val, hw->regs + USI_CNT);
273
274 spin_unlock_irqrestore(&hw->lock, flags);
275}
276
277static void nuc900_send_first(struct nuc900_spi *hw, unsigned int lsb)
278{
279 unsigned int val;
280 unsigned long flags;
281
282 spin_lock_irqsave(&hw->lock, flags);
283
284 val = __raw_readl(hw->regs + USI_CNT);
285
286 if (lsb)
287 val |= LSB;
288 else
289 val &= ~LSB;
290 __raw_writel(val, hw->regs + USI_CNT);
291
292 spin_unlock_irqrestore(&hw->lock, flags);
293}
294
295static void nuc900_set_sleep(struct nuc900_spi *hw, unsigned int sleep)
296{
297 unsigned int val;
298 unsigned long flags;
299
300 spin_lock_irqsave(&hw->lock, flags);
301
302 val = __raw_readl(hw->regs + USI_CNT);
303
304 if (sleep)
305 val |= (sleep << 12);
306 else
307 val &= ~(0x0f << 12);
308 __raw_writel(val, hw->regs + USI_CNT);
309
310 spin_unlock_irqrestore(&hw->lock, flags);
311}
312
313static void nuc900_enable_int(struct nuc900_spi *hw)
314{
315 unsigned int val;
316 unsigned long flags;
317
318 spin_lock_irqsave(&hw->lock, flags);
319
320 val = __raw_readl(hw->regs + USI_CNT);
321
322 val |= ENINT;
323
324 __raw_writel(val, hw->regs + USI_CNT);
325
326 spin_unlock_irqrestore(&hw->lock, flags);
327}
328
329static void nuc900_set_divider(struct nuc900_spi *hw)
330{
331 __raw_writel(hw->pdata->divider, hw->regs + USI_DIV);
332}
333
334static void nuc900_init_spi(struct nuc900_spi *hw)
335{
336 clk_enable(hw->clk);
337 spin_lock_init(&hw->lock);
338
339 nuc900_tx_edge(hw, hw->pdata->txneg);
340 nuc900_rx_edge(hw, hw->pdata->rxneg);
341 nuc900_send_first(hw, hw->pdata->lsb);
342 nuc900_set_sleep(hw, hw->pdata->sleep);
343 nuc900_spi_setup_txbitlen(hw, hw->pdata->txbitlen);
344 nuc900_spi_setup_txnum(hw, hw->pdata->txnum);
345 nuc900_set_divider(hw);
346 nuc900_enable_int(hw);
347}
348
349static int __devinit nuc900_spi_probe(struct platform_device *pdev)
350{
351 struct nuc900_spi *hw;
352 struct spi_master *master;
353 int err = 0;
354
355 master = spi_alloc_master(&pdev->dev, sizeof(struct nuc900_spi));
356 if (master == NULL) {
357 dev_err(&pdev->dev, "No memory for spi_master\n");
358 err = -ENOMEM;
359 goto err_nomem;
360 }
361
362 hw = spi_master_get_devdata(master);
363 memset(hw, 0, sizeof(struct nuc900_spi));
364
365 hw->master = spi_master_get(master);
366 hw->pdata = pdev->dev.platform_data;
367 hw->dev = &pdev->dev;
368
369 if (hw->pdata == NULL) {
370 dev_err(&pdev->dev, "No platform data supplied\n");
371 err = -ENOENT;
372 goto err_pdata;
373 }
374
375 platform_set_drvdata(pdev, hw);
376 init_completion(&hw->done);
377
378 master->mode_bits = SPI_MODE_0;
379 master->num_chipselect = hw->pdata->num_cs;
380 master->bus_num = hw->pdata->bus_num;
381 hw->bitbang.master = hw->master;
382 hw->bitbang.setup_transfer = nuc900_spi_setupxfer;
383 hw->bitbang.chipselect = nuc900_spi_chipsel;
384 hw->bitbang.txrx_bufs = nuc900_spi_txrx;
385 hw->bitbang.master->setup = nuc900_spi_setup;
386
387 hw->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
388 if (hw->res == NULL) {
389 dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
390 err = -ENOENT;
391 goto err_pdata;
392 }
393
394 hw->ioarea = request_mem_region(hw->res->start,
395 resource_size(hw->res), pdev->name);
396
397 if (hw->ioarea == NULL) {
398 dev_err(&pdev->dev, "Cannot reserve region\n");
399 err = -ENXIO;
400 goto err_pdata;
401 }
402
403 hw->regs = ioremap(hw->res->start, resource_size(hw->res));
404 if (hw->regs == NULL) {
405 dev_err(&pdev->dev, "Cannot map IO\n");
406 err = -ENXIO;
407 goto err_iomap;
408 }
409
410 hw->irq = platform_get_irq(pdev, 0);
411 if (hw->irq < 0) {
412 dev_err(&pdev->dev, "No IRQ specified\n");
413 err = -ENOENT;
414 goto err_irq;
415 }
416
417 err = request_irq(hw->irq, nuc900_spi_irq, 0, pdev->name, hw);
418 if (err) {
419 dev_err(&pdev->dev, "Cannot claim IRQ\n");
420 goto err_irq;
421 }
422
423 hw->clk = clk_get(&pdev->dev, "spi");
424 if (IS_ERR(hw->clk)) {
425 dev_err(&pdev->dev, "No clock for device\n");
426 err = PTR_ERR(hw->clk);
427 goto err_clk;
428 }
429
430 mfp_set_groupg(&pdev->dev);
431 nuc900_init_spi(hw);
432
433 err = spi_bitbang_start(&hw->bitbang);
434 if (err) {
435 dev_err(&pdev->dev, "Failed to register SPI master\n");
436 goto err_register;
437 }
438
439 return 0;
440
441err_register:
442 clk_disable(hw->clk);
443 clk_put(hw->clk);
444err_clk:
445 free_irq(hw->irq, hw);
446err_irq:
447 iounmap(hw->regs);
448err_iomap:
449 release_mem_region(hw->res->start, resource_size(hw->res));
450 kfree(hw->ioarea);
451err_pdata:
452 spi_master_put(hw->master);;
453
454err_nomem:
455 return err;
456}
457
458static int __devexit nuc900_spi_remove(struct platform_device *dev)
459{
460 struct nuc900_spi *hw = platform_get_drvdata(dev);
461
462 free_irq(hw->irq, hw);
463
464 platform_set_drvdata(dev, NULL);
465
466 spi_unregister_master(hw->master);
467
468 clk_disable(hw->clk);
469 clk_put(hw->clk);
470
471 iounmap(hw->regs);
472
473 release_mem_region(hw->res->start, resource_size(hw->res));
474 kfree(hw->ioarea);
475
476 spi_master_put(hw->master);
477 return 0;
478}
479
480static struct platform_driver nuc900_spi_driver = {
481 .probe = nuc900_spi_probe,
482 .remove = __devexit_p(nuc900_spi_remove),
483 .driver = {
484 .name = "nuc900-spi",
485 .owner = THIS_MODULE,
486 },
487};
488
489static int __init nuc900_spi_init(void)
490{
491 return platform_driver_register(&nuc900_spi_driver);
492}
493
494static void __exit nuc900_spi_exit(void)
495{
496 platform_driver_unregister(&nuc900_spi_driver);
497}
498
499module_init(nuc900_spi_init);
500module_exit(nuc900_spi_exit);
501
502MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
503MODULE_DESCRIPTION("nuc900 spi driver!");
504MODULE_LICENSE("GPL");
505MODULE_ALIAS("platform:nuc900-spi");
diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi_ppc4xx.c
index 140a18d6cf3e..7cb5ff37f6e2 100644
--- a/drivers/spi/spi_ppc4xx.c
+++ b/drivers/spi/spi_ppc4xx.c
@@ -26,6 +26,7 @@
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/slab.h>
29#include <linux/errno.h> 30#include <linux/errno.h>
30#include <linux/wait.h> 31#include <linux/wait.h>
31#include <linux/of_platform.h> 32#include <linux/of_platform.h>
@@ -578,7 +579,7 @@ static int __exit spi_ppc4xx_of_remove(struct of_device *op)
578 return 0; 579 return 0;
579} 580}
580 581
581static struct of_device_id spi_ppc4xx_of_match[] = { 582static const struct of_device_id spi_ppc4xx_of_match[] = {
582 { .compatible = "ibm,ppc4xx-spi", }, 583 { .compatible = "ibm,ppc4xx-spi", },
583 {}, 584 {},
584}; 585};
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c
index 33d94f76b9ef..151a95e40653 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi_s3c24xx.c
@@ -1,7 +1,7 @@
1/* linux/drivers/spi/spi_s3c24xx.c 1/* linux/drivers/spi/spi_s3c24xx.c
2 * 2 *
3 * Copyright (c) 2006 Ben Dooks 3 * Copyright (c) 2006 Ben Dooks
4 * Copyright (c) 2006 Simtec Electronics 4 * Copyright 2006-2009 Simtec Electronics
5 * Ben Dooks <ben@simtec.co.uk> 5 * Ben Dooks <ben@simtec.co.uk>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -21,6 +21,7 @@
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/gpio.h> 22#include <linux/gpio.h>
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/slab.h>
24 25
25#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
26#include <linux/spi/spi_bitbang.h> 27#include <linux/spi/spi_bitbang.h>
@@ -28,6 +29,11 @@
28#include <plat/regs-spi.h> 29#include <plat/regs-spi.h>
29#include <mach/spi.h> 30#include <mach/spi.h>
30 31
32#include <plat/fiq.h>
33#include <asm/fiq.h>
34
35#include "spi_s3c24xx_fiq.h"
36
31/** 37/**
32 * s3c24xx_spi_devstate - per device data 38 * s3c24xx_spi_devstate - per device data
33 * @hz: Last frequency calculated for @sppre field. 39 * @hz: Last frequency calculated for @sppre field.
@@ -42,6 +48,13 @@ struct s3c24xx_spi_devstate {
42 u8 sppre; 48 u8 sppre;
43}; 49};
44 50
51enum spi_fiq_mode {
52 FIQ_MODE_NONE = 0,
53 FIQ_MODE_TX = 1,
54 FIQ_MODE_RX = 2,
55 FIQ_MODE_TXRX = 3,
56};
57
45struct s3c24xx_spi { 58struct s3c24xx_spi {
46 /* bitbang has to be first */ 59 /* bitbang has to be first */
47 struct spi_bitbang bitbang; 60 struct spi_bitbang bitbang;
@@ -52,6 +65,11 @@ struct s3c24xx_spi {
52 int len; 65 int len;
53 int count; 66 int count;
54 67
68 struct fiq_handler fiq_handler;
69 enum spi_fiq_mode fiq_mode;
70 unsigned char fiq_inuse;
71 unsigned char fiq_claimed;
72
55 void (*set_cs)(struct s3c2410_spi_info *spi, 73 void (*set_cs)(struct s3c2410_spi_info *spi,
56 int cs, int pol); 74 int cs, int pol);
57 75
@@ -67,6 +85,7 @@ struct s3c24xx_spi {
67 struct s3c2410_spi_info *pdata; 85 struct s3c2410_spi_info *pdata;
68}; 86};
69 87
88
70#define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT) 89#define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT)
71#define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP) 90#define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP)
72 91
@@ -127,7 +146,7 @@ static int s3c24xx_spi_update_state(struct spi_device *spi,
127 } 146 }
128 147
129 if (spi->mode != cs->mode) { 148 if (spi->mode != cs->mode) {
130 u8 spcon = SPCON_DEFAULT; 149 u8 spcon = SPCON_DEFAULT | S3C2410_SPCON_ENSCK;
131 150
132 if (spi->mode & SPI_CPHA) 151 if (spi->mode & SPI_CPHA)
133 spcon |= S3C2410_SPCON_CPHA_FMTB; 152 spcon |= S3C2410_SPCON_CPHA_FMTB;
@@ -214,13 +233,196 @@ static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count)
214 return hw->tx ? hw->tx[count] : 0; 233 return hw->tx ? hw->tx[count] : 0;
215} 234}
216 235
236#ifdef CONFIG_SPI_S3C24XX_FIQ
237/* Support for FIQ based pseudo-DMA to improve the transfer speed.
238 *
239 * This code uses the assembly helper in spi_s3c24xx_spi.S which is
240 * used by the FIQ core to move data between main memory and the peripheral
241 * block. Since this is code running on the processor, there is no problem
242 * with cache coherency of the buffers, so we can use any buffer we like.
243 */
244
245/**
246 * struct spi_fiq_code - FIQ code and header
247 * @length: The length of the code fragment, excluding this header.
248 * @ack_offset: The offset from @data to the word to place the IRQ ACK bit at.
249 * @data: The code itself to install as a FIQ handler.
250 */
251struct spi_fiq_code {
252 u32 length;
253 u32 ack_offset;
254 u8 data[0];
255};
256
257extern struct spi_fiq_code s3c24xx_spi_fiq_txrx;
258extern struct spi_fiq_code s3c24xx_spi_fiq_tx;
259extern struct spi_fiq_code s3c24xx_spi_fiq_rx;
260
261/**
262 * ack_bit - turn IRQ into IRQ acknowledgement bit
263 * @irq: The interrupt number
264 *
265 * Returns the bit to write to the interrupt acknowledge register.
266 */
267static inline u32 ack_bit(unsigned int irq)
268{
269 return 1 << (irq - IRQ_EINT0);
270}
271
272/**
273 * s3c24xx_spi_tryfiq - attempt to claim and setup FIQ for transfer
274 * @hw: The hardware state.
275 *
276 * Claim the FIQ handler (only one can be active at any one time) and
277 * then setup the correct transfer code for this transfer.
278 *
279 * This call updates all the necessary state information if successful,
280 * so the caller does not need to do anything more than start the transfer
281 * as normal, since the IRQ will have been re-routed to the FIQ handler.
282*/
283void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw)
284{
285 struct pt_regs regs;
286 enum spi_fiq_mode mode;
287 struct spi_fiq_code *code;
288 int ret;
289
290 if (!hw->fiq_claimed) {
291 /* try and claim fiq if we haven't got it, and if not
292 * then return and simply use another transfer method */
293
294 ret = claim_fiq(&hw->fiq_handler);
295 if (ret)
296 return;
297 }
298
299 if (hw->tx && !hw->rx)
300 mode = FIQ_MODE_TX;
301 else if (hw->rx && !hw->tx)
302 mode = FIQ_MODE_RX;
303 else
304 mode = FIQ_MODE_TXRX;
305
306 regs.uregs[fiq_rspi] = (long)hw->regs;
307 regs.uregs[fiq_rrx] = (long)hw->rx;
308 regs.uregs[fiq_rtx] = (long)hw->tx + 1;
309 regs.uregs[fiq_rcount] = hw->len - 1;
310 regs.uregs[fiq_rirq] = (long)S3C24XX_VA_IRQ;
311
312 set_fiq_regs(&regs);
313
314 if (hw->fiq_mode != mode) {
315 u32 *ack_ptr;
316
317 hw->fiq_mode = mode;
318
319 switch (mode) {
320 case FIQ_MODE_TX:
321 code = &s3c24xx_spi_fiq_tx;
322 break;
323 case FIQ_MODE_RX:
324 code = &s3c24xx_spi_fiq_rx;
325 break;
326 case FIQ_MODE_TXRX:
327 code = &s3c24xx_spi_fiq_txrx;
328 break;
329 default:
330 code = NULL;
331 }
332
333 BUG_ON(!code);
334
335 ack_ptr = (u32 *)&code->data[code->ack_offset];
336 *ack_ptr = ack_bit(hw->irq);
337
338 set_fiq_handler(&code->data, code->length);
339 }
340
341 s3c24xx_set_fiq(hw->irq, true);
342
343 hw->fiq_mode = mode;
344 hw->fiq_inuse = 1;
345}
346
347/**
348 * s3c24xx_spi_fiqop - FIQ core code callback
349 * @pw: Data registered with the handler
350 * @release: Whether this is a release or a return.
351 *
352 * Called by the FIQ code when another module wants to use the FIQ, so
353 * return whether we are currently using this or not and then update our
354 * internal state.
355 */
356static int s3c24xx_spi_fiqop(void *pw, int release)
357{
358 struct s3c24xx_spi *hw = pw;
359 int ret = 0;
360
361 if (release) {
362 if (hw->fiq_inuse)
363 ret = -EBUSY;
364
365 /* note, we do not need to unroute the FIQ, as the FIQ
366 * vector code de-routes it to signal the end of transfer */
367
368 hw->fiq_mode = FIQ_MODE_NONE;
369 hw->fiq_claimed = 0;
370 } else {
371 hw->fiq_claimed = 1;
372 }
373
374 return ret;
375}
376
377/**
378 * s3c24xx_spi_initfiq - setup the information for the FIQ core
379 * @hw: The hardware state.
380 *
381 * Setup the fiq_handler block to pass to the FIQ core.
382 */
383static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *hw)
384{
385 hw->fiq_handler.dev_id = hw;
386 hw->fiq_handler.name = dev_name(hw->dev);
387 hw->fiq_handler.fiq_op = s3c24xx_spi_fiqop;
388}
389
390/**
391 * s3c24xx_spi_usefiq - return if we should be using FIQ.
392 * @hw: The hardware state.
393 *
394 * Return true if the platform data specifies whether this channel is
395 * allowed to use the FIQ.
396 */
397static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *hw)
398{
399 return hw->pdata->use_fiq;
400}
401
402/**
403 * s3c24xx_spi_usingfiq - return if channel is using FIQ
404 * @spi: The hardware state.
405 *
406 * Return whether the channel is currently using the FIQ (separate from
407 * whether the FIQ is claimed).
408 */
409static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *spi)
410{
411 return spi->fiq_inuse;
412}
413#else
414
415static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *s) { }
416static inline void s3c24xx_spi_tryfiq(struct s3c24xx_spi *s) { }
417static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *s) { return false; }
418static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *s) { return false; }
419
420#endif /* CONFIG_SPI_S3C24XX_FIQ */
421
217static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t) 422static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
218{ 423{
219 struct s3c24xx_spi *hw = to_hw(spi); 424 struct s3c24xx_spi *hw = to_hw(spi);
220 425
221 dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
222 t->tx_buf, t->rx_buf, t->len);
223
224 hw->tx = t->tx_buf; 426 hw->tx = t->tx_buf;
225 hw->rx = t->rx_buf; 427 hw->rx = t->rx_buf;
226 hw->len = t->len; 428 hw->len = t->len;
@@ -228,11 +430,14 @@ static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
228 430
229 init_completion(&hw->done); 431 init_completion(&hw->done);
230 432
433 hw->fiq_inuse = 0;
434 if (s3c24xx_spi_usefiq(hw) && t->len >= 3)
435 s3c24xx_spi_tryfiq(hw);
436
231 /* send the first byte */ 437 /* send the first byte */
232 writeb(hw_txbyte(hw, 0), hw->regs + S3C2410_SPTDAT); 438 writeb(hw_txbyte(hw, 0), hw->regs + S3C2410_SPTDAT);
233 439
234 wait_for_completion(&hw->done); 440 wait_for_completion(&hw->done);
235
236 return hw->count; 441 return hw->count;
237} 442}
238 443
@@ -254,17 +459,27 @@ static irqreturn_t s3c24xx_spi_irq(int irq, void *dev)
254 goto irq_done; 459 goto irq_done;
255 } 460 }
256 461
257 hw->count++; 462 if (!s3c24xx_spi_usingfiq(hw)) {
463 hw->count++;
258 464
259 if (hw->rx) 465 if (hw->rx)
260 hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT); 466 hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT);
261 467
262 count++; 468 count++;
469
470 if (count < hw->len)
471 writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT);
472 else
473 complete(&hw->done);
474 } else {
475 hw->count = hw->len;
476 hw->fiq_inuse = 0;
477
478 if (hw->rx)
479 hw->rx[hw->len-1] = readb(hw->regs + S3C2410_SPRDAT);
263 480
264 if (count < hw->len)
265 writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT);
266 else
267 complete(&hw->done); 481 complete(&hw->done);
482 }
268 483
269 irq_done: 484 irq_done:
270 return IRQ_HANDLED; 485 return IRQ_HANDLED;
@@ -322,6 +537,10 @@ static int __init s3c24xx_spi_probe(struct platform_device *pdev)
322 platform_set_drvdata(pdev, hw); 537 platform_set_drvdata(pdev, hw);
323 init_completion(&hw->done); 538 init_completion(&hw->done);
324 539
540 /* initialise fiq handler */
541
542 s3c24xx_spi_initfiq(hw);
543
325 /* setup the master state. */ 544 /* setup the master state. */
326 545
327 /* the spi->mode bits understood by this driver: */ 546 /* the spi->mode bits understood by this driver: */
@@ -489,7 +708,7 @@ static int s3c24xx_spi_resume(struct device *dev)
489 return 0; 708 return 0;
490} 709}
491 710
492static struct dev_pm_ops s3c24xx_spi_pmops = { 711static const struct dev_pm_ops s3c24xx_spi_pmops = {
493 .suspend = s3c24xx_spi_suspend, 712 .suspend = s3c24xx_spi_suspend,
494 .resume = s3c24xx_spi_resume, 713 .resume = s3c24xx_spi_resume,
495}; 714};
diff --git a/drivers/spi/spi_s3c24xx_fiq.S b/drivers/spi/spi_s3c24xx_fiq.S
new file mode 100644
index 000000000000..3793cae361db
--- /dev/null
+++ b/drivers/spi/spi_s3c24xx_fiq.S
@@ -0,0 +1,116 @@
1/* linux/drivers/spi/spi_s3c24xx_fiq.S
2 *
3 * Copyright 2009 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * S3C24XX SPI - FIQ pseudo-DMA transfer code
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#include <linux/linkage.h>
14#include <asm/assembler.h>
15
16#include <mach/map.h>
17#include <mach/regs-irq.h>
18#include <plat/regs-spi.h>
19
20#include "spi_s3c24xx_fiq.h"
21
22 .text
23
24 @ entry to these routines is as follows, with the register names
25 @ defined in fiq.h so that they can be shared with the C files which
26 @ setup the calling registers.
27 @
28 @ fiq_rirq The base of the IRQ registers to find S3C2410_SRCPND
29 @ fiq_rtmp Temporary register to hold tx/rx data
30 @ fiq_rspi The base of the SPI register block
31 @ fiq_rtx The tx buffer pointer
32 @ fiq_rrx The rx buffer pointer
33 @ fiq_rcount The number of bytes to move
34
35 @ each entry starts with a word entry of how long it is
36 @ and an offset to the irq acknowledgment word
37
38ENTRY(s3c24xx_spi_fiq_rx)
39s3c24xx_spi_fix_rx:
40 .word fiq_rx_end - fiq_rx_start
41 .word fiq_rx_irq_ack - fiq_rx_start
42fiq_rx_start:
43 ldr fiq_rtmp, fiq_rx_irq_ack
44 str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
45
46 ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
47 strb fiq_rtmp, [ fiq_rrx ], #1
48
49 mov fiq_rtmp, #0xff
50 strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
51
52 subs fiq_rcount, fiq_rcount, #1
53 subnes pc, lr, #4 @@ return, still have work to do
54
55 @@ set IRQ controller so that next op will trigger IRQ
56 mov fiq_rtmp, #0
57 str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
58 subs pc, lr, #4
59
60fiq_rx_irq_ack:
61 .word 0
62fiq_rx_end:
63
64ENTRY(s3c24xx_spi_fiq_txrx)
65s3c24xx_spi_fiq_txrx:
66 .word fiq_txrx_end - fiq_txrx_start
67 .word fiq_txrx_irq_ack - fiq_txrx_start
68fiq_txrx_start:
69
70 ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
71 strb fiq_rtmp, [ fiq_rrx ], #1
72
73 ldr fiq_rtmp, fiq_txrx_irq_ack
74 str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
75
76 ldrb fiq_rtmp, [ fiq_rtx ], #1
77 strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
78
79 subs fiq_rcount, fiq_rcount, #1
80 subnes pc, lr, #4 @@ return, still have work to do
81
82 mov fiq_rtmp, #0
83 str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
84 subs pc, lr, #4
85
86fiq_txrx_irq_ack:
87 .word 0
88
89fiq_txrx_end:
90
91ENTRY(s3c24xx_spi_fiq_tx)
92s3c24xx_spi_fix_tx:
93 .word fiq_tx_end - fiq_tx_start
94 .word fiq_tx_irq_ack - fiq_tx_start
95fiq_tx_start:
96 ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
97
98 ldr fiq_rtmp, fiq_tx_irq_ack
99 str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
100
101 ldrb fiq_rtmp, [ fiq_rtx ], #1
102 strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
103
104 subs fiq_rcount, fiq_rcount, #1
105 subnes pc, lr, #4 @@ return, still have work to do
106
107 mov fiq_rtmp, #0
108 str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
109 subs pc, lr, #4
110
111fiq_tx_irq_ack:
112 .word 0
113
114fiq_tx_end:
115
116 .end
diff --git a/drivers/spi/spi_s3c24xx_fiq.h b/drivers/spi/spi_s3c24xx_fiq.h
new file mode 100644
index 000000000000..a5950bb25b51
--- /dev/null
+++ b/drivers/spi/spi_s3c24xx_fiq.h
@@ -0,0 +1,26 @@
1/* linux/drivers/spi/spi_s3c24xx_fiq.h
2 *
3 * Copyright 2009 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * S3C24XX SPI - FIQ pseudo-DMA transfer support
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13/* We have R8 through R13 to play with */
14
15#ifdef __ASSEMBLY__
16#define __REG_NR(x) r##x
17#else
18#define __REG_NR(x) (x)
19#endif
20
21#define fiq_rspi __REG_NR(8)
22#define fiq_rtmp __REG_NR(9)
23#define fiq_rrx __REG_NR(10)
24#define fiq_rtx __REG_NR(11)
25#define fiq_rcount __REG_NR(12)
26#define fiq_rirq __REG_NR(13)
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c
new file mode 100644
index 000000000000..97365815a729
--- /dev/null
+++ b/drivers/spi/spi_s3c64xx.c
@@ -0,0 +1,1183 @@
1/* linux/drivers/spi/spi_s3c64xx.c
2 *
3 * Copyright (C) 2009 Samsung Electronics Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/workqueue.h>
24#include <linux/delay.h>
25#include <linux/clk.h>
26#include <linux/dma-mapping.h>
27#include <linux/platform_device.h>
28#include <linux/spi/spi.h>
29
30#include <mach/dma.h>
31#include <plat/s3c64xx-spi.h>
32
33/* Registers and bit-fields */
34
35#define S3C64XX_SPI_CH_CFG 0x00
36#define S3C64XX_SPI_CLK_CFG 0x04
37#define S3C64XX_SPI_MODE_CFG 0x08
38#define S3C64XX_SPI_SLAVE_SEL 0x0C
39#define S3C64XX_SPI_INT_EN 0x10
40#define S3C64XX_SPI_STATUS 0x14
41#define S3C64XX_SPI_TX_DATA 0x18
42#define S3C64XX_SPI_RX_DATA 0x1C
43#define S3C64XX_SPI_PACKET_CNT 0x20
44#define S3C64XX_SPI_PENDING_CLR 0x24
45#define S3C64XX_SPI_SWAP_CFG 0x28
46#define S3C64XX_SPI_FB_CLK 0x2C
47
48#define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
49#define S3C64XX_SPI_CH_SW_RST (1<<5)
50#define S3C64XX_SPI_CH_SLAVE (1<<4)
51#define S3C64XX_SPI_CPOL_L (1<<3)
52#define S3C64XX_SPI_CPHA_B (1<<2)
53#define S3C64XX_SPI_CH_RXCH_ON (1<<1)
54#define S3C64XX_SPI_CH_TXCH_ON (1<<0)
55
56#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
57#define S3C64XX_SPI_CLKSEL_SRCSHFT 9
58#define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
59#define S3C64XX_SPI_PSR_MASK 0xff
60
61#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
62#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
63#define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
64#define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
65#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
66#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
67#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
68#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
69#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
70#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
71#define S3C64XX_SPI_MODE_4BURST (1<<0)
72
73#define S3C64XX_SPI_SLAVE_AUTO (1<<1)
74#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
75
76#define S3C64XX_SPI_ACT(c) writel(0, (c)->regs + S3C64XX_SPI_SLAVE_SEL)
77
78#define S3C64XX_SPI_DEACT(c) writel(S3C64XX_SPI_SLAVE_SIG_INACT, \
79 (c)->regs + S3C64XX_SPI_SLAVE_SEL)
80
81#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
82#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
83#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
84#define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
85#define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
86#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
87#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
88
89#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
90#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
91#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
92#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
93#define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
94#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
95
96#define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
97
98#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
99#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
100#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
101#define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
102#define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
103
104#define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
105#define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
106#define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
107#define S3C64XX_SPI_SWAP_RX_EN (1<<4)
108#define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
109#define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
110#define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
111#define S3C64XX_SPI_SWAP_TX_EN (1<<0)
112
113#define S3C64XX_SPI_FBCLK_MSK (3<<0)
114
115#define S3C64XX_SPI_ST_TRLCNTZ(v, i) ((((v) >> (i)->rx_lvl_offset) & \
116 (((i)->fifo_lvl_mask + 1))) \
117 ? 1 : 0)
118
119#define S3C64XX_SPI_ST_TX_DONE(v, i) ((((v) >> (i)->rx_lvl_offset) & \
120 (((i)->fifo_lvl_mask + 1) << 1)) \
121 ? 1 : 0)
122#define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask)
123#define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask)
124
125#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
126#define S3C64XX_SPI_TRAILCNT_OFF 19
127
128#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
129
130#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
131
132#define SUSPND (1<<0)
133#define SPIBUSY (1<<1)
134#define RXBUSY (1<<2)
135#define TXBUSY (1<<3)
136
137/**
138 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
139 * @clk: Pointer to the spi clock.
140 * @src_clk: Pointer to the clock used to generate SPI signals.
141 * @master: Pointer to the SPI Protocol master.
142 * @workqueue: Work queue for the SPI xfer requests.
143 * @cntrlr_info: Platform specific data for the controller this driver manages.
144 * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
145 * @work: Work
146 * @queue: To log SPI xfer requests.
147 * @lock: Controller specific lock.
148 * @state: Set of FLAGS to indicate status.
149 * @rx_dmach: Controller's DMA channel for Rx.
150 * @tx_dmach: Controller's DMA channel for Tx.
151 * @sfr_start: BUS address of SPI controller regs.
152 * @regs: Pointer to ioremap'ed controller registers.
153 * @xfer_completion: To indicate completion of xfer task.
154 * @cur_mode: Stores the active configuration of the controller.
155 * @cur_bpw: Stores the active bits per word settings.
156 * @cur_speed: Stores the active xfer clock speed.
157 */
158struct s3c64xx_spi_driver_data {
159 void __iomem *regs;
160 struct clk *clk;
161 struct clk *src_clk;
162 struct platform_device *pdev;
163 struct spi_master *master;
164 struct workqueue_struct *workqueue;
165 struct s3c64xx_spi_info *cntrlr_info;
166 struct spi_device *tgl_spi;
167 struct work_struct work;
168 struct list_head queue;
169 spinlock_t lock;
170 enum dma_ch rx_dmach;
171 enum dma_ch tx_dmach;
172 unsigned long sfr_start;
173 struct completion xfer_completion;
174 unsigned state;
175 unsigned cur_mode, cur_bpw;
176 unsigned cur_speed;
177};
178
179static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
180 .name = "samsung-spi-dma",
181};
182
183static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
184{
185 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
186 void __iomem *regs = sdd->regs;
187 unsigned long loops;
188 u32 val;
189
190 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
191
192 val = readl(regs + S3C64XX_SPI_CH_CFG);
193 val |= S3C64XX_SPI_CH_SW_RST;
194 val &= ~S3C64XX_SPI_CH_HS_EN;
195 writel(val, regs + S3C64XX_SPI_CH_CFG);
196
197 /* Flush TxFIFO*/
198 loops = msecs_to_loops(1);
199 do {
200 val = readl(regs + S3C64XX_SPI_STATUS);
201 } while (TX_FIFO_LVL(val, sci) && loops--);
202
203 /* Flush RxFIFO*/
204 loops = msecs_to_loops(1);
205 do {
206 val = readl(regs + S3C64XX_SPI_STATUS);
207 if (RX_FIFO_LVL(val, sci))
208 readl(regs + S3C64XX_SPI_RX_DATA);
209 else
210 break;
211 } while (loops--);
212
213 val = readl(regs + S3C64XX_SPI_CH_CFG);
214 val &= ~S3C64XX_SPI_CH_SW_RST;
215 writel(val, regs + S3C64XX_SPI_CH_CFG);
216
217 val = readl(regs + S3C64XX_SPI_MODE_CFG);
218 val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
219 writel(val, regs + S3C64XX_SPI_MODE_CFG);
220
221 val = readl(regs + S3C64XX_SPI_CH_CFG);
222 val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
223 writel(val, regs + S3C64XX_SPI_CH_CFG);
224}
225
226static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
227 struct spi_device *spi,
228 struct spi_transfer *xfer, int dma_mode)
229{
230 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
231 void __iomem *regs = sdd->regs;
232 u32 modecfg, chcfg;
233
234 modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
235 modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
236
237 chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
238 chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
239
240 if (dma_mode) {
241 chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
242 } else {
243 /* Always shift in data in FIFO, even if xfer is Tx only,
244 * this helps setting PCKT_CNT value for generating clocks
245 * as exactly needed.
246 */
247 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
248 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
249 | S3C64XX_SPI_PACKET_CNT_EN,
250 regs + S3C64XX_SPI_PACKET_CNT);
251 }
252
253 if (xfer->tx_buf != NULL) {
254 sdd->state |= TXBUSY;
255 chcfg |= S3C64XX_SPI_CH_TXCH_ON;
256 if (dma_mode) {
257 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
258 s3c2410_dma_config(sdd->tx_dmach, 1);
259 s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd,
260 xfer->tx_dma, xfer->len);
261 s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START);
262 } else {
263 unsigned char *buf = (unsigned char *) xfer->tx_buf;
264 int i = 0;
265 while (i < xfer->len)
266 writeb(buf[i++], regs + S3C64XX_SPI_TX_DATA);
267 }
268 }
269
270 if (xfer->rx_buf != NULL) {
271 sdd->state |= RXBUSY;
272
273 if (sci->high_speed && sdd->cur_speed >= 30000000UL
274 && !(sdd->cur_mode & SPI_CPHA))
275 chcfg |= S3C64XX_SPI_CH_HS_EN;
276
277 if (dma_mode) {
278 modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
279 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
280 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
281 | S3C64XX_SPI_PACKET_CNT_EN,
282 regs + S3C64XX_SPI_PACKET_CNT);
283 s3c2410_dma_config(sdd->rx_dmach, 1);
284 s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd,
285 xfer->rx_dma, xfer->len);
286 s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START);
287 }
288 }
289
290 writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
291 writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
292}
293
294static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
295 struct spi_device *spi)
296{
297 struct s3c64xx_spi_csinfo *cs;
298
299 if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
300 if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
301 /* Deselect the last toggled device */
302 cs = sdd->tgl_spi->controller_data;
303 cs->set_level(cs->line,
304 spi->mode & SPI_CS_HIGH ? 0 : 1);
305 }
306 sdd->tgl_spi = NULL;
307 }
308
309 cs = spi->controller_data;
310 cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
311}
312
313static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
314 struct spi_transfer *xfer, int dma_mode)
315{
316 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
317 void __iomem *regs = sdd->regs;
318 unsigned long val;
319 int ms;
320
321 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
322 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
323 ms += 5; /* some tolerance */
324
325 if (dma_mode) {
326 val = msecs_to_jiffies(ms) + 10;
327 val = wait_for_completion_timeout(&sdd->xfer_completion, val);
328 } else {
329 val = msecs_to_loops(ms);
330 do {
331 val = readl(regs + S3C64XX_SPI_STATUS);
332 } while (RX_FIFO_LVL(val, sci) < xfer->len && --val);
333 }
334
335 if (!val)
336 return -EIO;
337
338 if (dma_mode) {
339 u32 status;
340
341 /*
342 * DmaTx returns after simply writing data in the FIFO,
343 * w/o waiting for real transmission on the bus to finish.
344 * DmaRx returns only after Dma read data from FIFO which
345 * needs bus transmission to finish, so we don't worry if
346 * Xfer involved Rx(with or without Tx).
347 */
348 if (xfer->rx_buf == NULL) {
349 val = msecs_to_loops(10);
350 status = readl(regs + S3C64XX_SPI_STATUS);
351 while ((TX_FIFO_LVL(status, sci)
352 || !S3C64XX_SPI_ST_TX_DONE(status, sci))
353 && --val) {
354 cpu_relax();
355 status = readl(regs + S3C64XX_SPI_STATUS);
356 }
357
358 if (!val)
359 return -EIO;
360 }
361 } else {
362 unsigned char *buf;
363 int i;
364
365 /* If it was only Tx */
366 if (xfer->rx_buf == NULL) {
367 sdd->state &= ~TXBUSY;
368 return 0;
369 }
370
371 i = 0;
372 buf = xfer->rx_buf;
373 while (i < xfer->len)
374 buf[i++] = readb(regs + S3C64XX_SPI_RX_DATA);
375
376 sdd->state &= ~RXBUSY;
377 }
378
379 return 0;
380}
381
382static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
383 struct spi_device *spi)
384{
385 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
386
387 if (sdd->tgl_spi == spi)
388 sdd->tgl_spi = NULL;
389
390 cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
391}
392
393static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
394{
395 void __iomem *regs = sdd->regs;
396 u32 val;
397
398 /* Disable Clock */
399 val = readl(regs + S3C64XX_SPI_CLK_CFG);
400 val &= ~S3C64XX_SPI_ENCLK_ENABLE;
401 writel(val, regs + S3C64XX_SPI_CLK_CFG);
402
403 /* Set Polarity and Phase */
404 val = readl(regs + S3C64XX_SPI_CH_CFG);
405 val &= ~(S3C64XX_SPI_CH_SLAVE |
406 S3C64XX_SPI_CPOL_L |
407 S3C64XX_SPI_CPHA_B);
408
409 if (sdd->cur_mode & SPI_CPOL)
410 val |= S3C64XX_SPI_CPOL_L;
411
412 if (sdd->cur_mode & SPI_CPHA)
413 val |= S3C64XX_SPI_CPHA_B;
414
415 writel(val, regs + S3C64XX_SPI_CH_CFG);
416
417 /* Set Channel & DMA Mode */
418 val = readl(regs + S3C64XX_SPI_MODE_CFG);
419 val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
420 | S3C64XX_SPI_MODE_CH_TSZ_MASK);
421
422 switch (sdd->cur_bpw) {
423 case 32:
424 val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
425 break;
426 case 16:
427 val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
428 break;
429 default:
430 val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
431 break;
432 }
433 val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; /* Always 8bits wide */
434
435 writel(val, regs + S3C64XX_SPI_MODE_CFG);
436
437 /* Configure Clock */
438 val = readl(regs + S3C64XX_SPI_CLK_CFG);
439 val &= ~S3C64XX_SPI_PSR_MASK;
440 val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
441 & S3C64XX_SPI_PSR_MASK);
442 writel(val, regs + S3C64XX_SPI_CLK_CFG);
443
444 /* Enable Clock */
445 val = readl(regs + S3C64XX_SPI_CLK_CFG);
446 val |= S3C64XX_SPI_ENCLK_ENABLE;
447 writel(val, regs + S3C64XX_SPI_CLK_CFG);
448}
449
450void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
451 int size, enum s3c2410_dma_buffresult res)
452{
453 struct s3c64xx_spi_driver_data *sdd = buf_id;
454 unsigned long flags;
455
456 spin_lock_irqsave(&sdd->lock, flags);
457
458 if (res == S3C2410_RES_OK)
459 sdd->state &= ~RXBUSY;
460 else
461 dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size);
462
463 /* If the other done */
464 if (!(sdd->state & TXBUSY))
465 complete(&sdd->xfer_completion);
466
467 spin_unlock_irqrestore(&sdd->lock, flags);
468}
469
470void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
471 int size, enum s3c2410_dma_buffresult res)
472{
473 struct s3c64xx_spi_driver_data *sdd = buf_id;
474 unsigned long flags;
475
476 spin_lock_irqsave(&sdd->lock, flags);
477
478 if (res == S3C2410_RES_OK)
479 sdd->state &= ~TXBUSY;
480 else
481 dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size);
482
483 /* If the other done */
484 if (!(sdd->state & RXBUSY))
485 complete(&sdd->xfer_completion);
486
487 spin_unlock_irqrestore(&sdd->lock, flags);
488}
489
490#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
491
492static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
493 struct spi_message *msg)
494{
495 struct device *dev = &sdd->pdev->dev;
496 struct spi_transfer *xfer;
497
498 if (msg->is_dma_mapped)
499 return 0;
500
501 /* First mark all xfer unmapped */
502 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
503 xfer->rx_dma = XFER_DMAADDR_INVALID;
504 xfer->tx_dma = XFER_DMAADDR_INVALID;
505 }
506
507 /* Map until end or first fail */
508 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
509
510 if (xfer->tx_buf != NULL) {
511 xfer->tx_dma = dma_map_single(dev, xfer->tx_buf,
512 xfer->len, DMA_TO_DEVICE);
513 if (dma_mapping_error(dev, xfer->tx_dma)) {
514 dev_err(dev, "dma_map_single Tx failed\n");
515 xfer->tx_dma = XFER_DMAADDR_INVALID;
516 return -ENOMEM;
517 }
518 }
519
520 if (xfer->rx_buf != NULL) {
521 xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
522 xfer->len, DMA_FROM_DEVICE);
523 if (dma_mapping_error(dev, xfer->rx_dma)) {
524 dev_err(dev, "dma_map_single Rx failed\n");
525 dma_unmap_single(dev, xfer->tx_dma,
526 xfer->len, DMA_TO_DEVICE);
527 xfer->tx_dma = XFER_DMAADDR_INVALID;
528 xfer->rx_dma = XFER_DMAADDR_INVALID;
529 return -ENOMEM;
530 }
531 }
532 }
533
534 return 0;
535}
536
537static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
538 struct spi_message *msg)
539{
540 struct device *dev = &sdd->pdev->dev;
541 struct spi_transfer *xfer;
542
543 if (msg->is_dma_mapped)
544 return;
545
546 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
547
548 if (xfer->rx_buf != NULL
549 && xfer->rx_dma != XFER_DMAADDR_INVALID)
550 dma_unmap_single(dev, xfer->rx_dma,
551 xfer->len, DMA_FROM_DEVICE);
552
553 if (xfer->tx_buf != NULL
554 && xfer->tx_dma != XFER_DMAADDR_INVALID)
555 dma_unmap_single(dev, xfer->tx_dma,
556 xfer->len, DMA_TO_DEVICE);
557 }
558}
559
560static void handle_msg(struct s3c64xx_spi_driver_data *sdd,
561 struct spi_message *msg)
562{
563 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
564 struct spi_device *spi = msg->spi;
565 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
566 struct spi_transfer *xfer;
567 int status = 0, cs_toggle = 0;
568 u32 speed;
569 u8 bpw;
570
571 /* If Master's(controller) state differs from that needed by Slave */
572 if (sdd->cur_speed != spi->max_speed_hz
573 || sdd->cur_mode != spi->mode
574 || sdd->cur_bpw != spi->bits_per_word) {
575 sdd->cur_bpw = spi->bits_per_word;
576 sdd->cur_speed = spi->max_speed_hz;
577 sdd->cur_mode = spi->mode;
578 s3c64xx_spi_config(sdd);
579 }
580
581 /* Map all the transfers if needed */
582 if (s3c64xx_spi_map_mssg(sdd, msg)) {
583 dev_err(&spi->dev,
584 "Xfer: Unable to map message buffers!\n");
585 status = -ENOMEM;
586 goto out;
587 }
588
589 /* Configure feedback delay */
590 writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
591
592 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
593
594 unsigned long flags;
595 int use_dma;
596
597 INIT_COMPLETION(sdd->xfer_completion);
598
599 /* Only BPW and Speed may change across transfers */
600 bpw = xfer->bits_per_word ? : spi->bits_per_word;
601 speed = xfer->speed_hz ? : spi->max_speed_hz;
602
603 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
604 sdd->cur_bpw = bpw;
605 sdd->cur_speed = speed;
606 s3c64xx_spi_config(sdd);
607 }
608
609 /* Polling method for xfers not bigger than FIFO capacity */
610 if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
611 use_dma = 0;
612 else
613 use_dma = 1;
614
615 spin_lock_irqsave(&sdd->lock, flags);
616
617 /* Pending only which is to be done */
618 sdd->state &= ~RXBUSY;
619 sdd->state &= ~TXBUSY;
620
621 enable_datapath(sdd, spi, xfer, use_dma);
622
623 /* Slave Select */
624 enable_cs(sdd, spi);
625
626 /* Start the signals */
627 S3C64XX_SPI_ACT(sdd);
628
629 spin_unlock_irqrestore(&sdd->lock, flags);
630
631 status = wait_for_xfer(sdd, xfer, use_dma);
632
633 /* Quiese the signals */
634 S3C64XX_SPI_DEACT(sdd);
635
636 if (status) {
637 dev_err(&spi->dev, "I/O Error: "
638 "rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
639 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
640 (sdd->state & RXBUSY) ? 'f' : 'p',
641 (sdd->state & TXBUSY) ? 'f' : 'p',
642 xfer->len);
643
644 if (use_dma) {
645 if (xfer->tx_buf != NULL
646 && (sdd->state & TXBUSY))
647 s3c2410_dma_ctrl(sdd->tx_dmach,
648 S3C2410_DMAOP_FLUSH);
649 if (xfer->rx_buf != NULL
650 && (sdd->state & RXBUSY))
651 s3c2410_dma_ctrl(sdd->rx_dmach,
652 S3C2410_DMAOP_FLUSH);
653 }
654
655 goto out;
656 }
657
658 if (xfer->delay_usecs)
659 udelay(xfer->delay_usecs);
660
661 if (xfer->cs_change) {
662 /* Hint that the next mssg is gonna be
663 for the same device */
664 if (list_is_last(&xfer->transfer_list,
665 &msg->transfers))
666 cs_toggle = 1;
667 else
668 disable_cs(sdd, spi);
669 }
670
671 msg->actual_length += xfer->len;
672
673 flush_fifo(sdd);
674 }
675
676out:
677 if (!cs_toggle || status)
678 disable_cs(sdd, spi);
679 else
680 sdd->tgl_spi = spi;
681
682 s3c64xx_spi_unmap_mssg(sdd, msg);
683
684 msg->status = status;
685
686 if (msg->complete)
687 msg->complete(msg->context);
688}
689
690static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
691{
692 if (s3c2410_dma_request(sdd->rx_dmach,
693 &s3c64xx_spi_dma_client, NULL) < 0) {
694 dev_err(&sdd->pdev->dev, "cannot get RxDMA\n");
695 return 0;
696 }
697 s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb);
698 s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW,
699 sdd->sfr_start + S3C64XX_SPI_RX_DATA);
700
701 if (s3c2410_dma_request(sdd->tx_dmach,
702 &s3c64xx_spi_dma_client, NULL) < 0) {
703 dev_err(&sdd->pdev->dev, "cannot get TxDMA\n");
704 s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
705 return 0;
706 }
707 s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb);
708 s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM,
709 sdd->sfr_start + S3C64XX_SPI_TX_DATA);
710
711 return 1;
712}
713
714static void s3c64xx_spi_work(struct work_struct *work)
715{
716 struct s3c64xx_spi_driver_data *sdd = container_of(work,
717 struct s3c64xx_spi_driver_data, work);
718 unsigned long flags;
719
720 /* Acquire DMA channels */
721 while (!acquire_dma(sdd))
722 msleep(10);
723
724 spin_lock_irqsave(&sdd->lock, flags);
725
726 while (!list_empty(&sdd->queue)
727 && !(sdd->state & SUSPND)) {
728
729 struct spi_message *msg;
730
731 msg = container_of(sdd->queue.next, struct spi_message, queue);
732
733 list_del_init(&msg->queue);
734
735 /* Set Xfer busy flag */
736 sdd->state |= SPIBUSY;
737
738 spin_unlock_irqrestore(&sdd->lock, flags);
739
740 handle_msg(sdd, msg);
741
742 spin_lock_irqsave(&sdd->lock, flags);
743
744 sdd->state &= ~SPIBUSY;
745 }
746
747 spin_unlock_irqrestore(&sdd->lock, flags);
748
749 /* Free DMA channels */
750 s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client);
751 s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
752}
753
754static int s3c64xx_spi_transfer(struct spi_device *spi,
755 struct spi_message *msg)
756{
757 struct s3c64xx_spi_driver_data *sdd;
758 unsigned long flags;
759
760 sdd = spi_master_get_devdata(spi->master);
761
762 spin_lock_irqsave(&sdd->lock, flags);
763
764 if (sdd->state & SUSPND) {
765 spin_unlock_irqrestore(&sdd->lock, flags);
766 return -ESHUTDOWN;
767 }
768
769 msg->status = -EINPROGRESS;
770 msg->actual_length = 0;
771
772 list_add_tail(&msg->queue, &sdd->queue);
773
774 queue_work(sdd->workqueue, &sdd->work);
775
776 spin_unlock_irqrestore(&sdd->lock, flags);
777
778 return 0;
779}
780
781/*
782 * Here we only check the validity of requested configuration
783 * and save the configuration in a local data-structure.
784 * The controller is actually configured only just before we
785 * get a message to transfer.
786 */
787static int s3c64xx_spi_setup(struct spi_device *spi)
788{
789 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
790 struct s3c64xx_spi_driver_data *sdd;
791 struct s3c64xx_spi_info *sci;
792 struct spi_message *msg;
793 u32 psr, speed;
794 unsigned long flags;
795 int err = 0;
796
797 if (cs == NULL || cs->set_level == NULL) {
798 dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
799 return -ENODEV;
800 }
801
802 sdd = spi_master_get_devdata(spi->master);
803 sci = sdd->cntrlr_info;
804
805 spin_lock_irqsave(&sdd->lock, flags);
806
807 list_for_each_entry(msg, &sdd->queue, queue) {
808 /* Is some mssg is already queued for this device */
809 if (msg->spi == spi) {
810 dev_err(&spi->dev,
811 "setup: attempt while mssg in queue!\n");
812 spin_unlock_irqrestore(&sdd->lock, flags);
813 return -EBUSY;
814 }
815 }
816
817 if (sdd->state & SUSPND) {
818 spin_unlock_irqrestore(&sdd->lock, flags);
819 dev_err(&spi->dev,
820 "setup: SPI-%d not active!\n", spi->master->bus_num);
821 return -ESHUTDOWN;
822 }
823
824 spin_unlock_irqrestore(&sdd->lock, flags);
825
826 if (spi->bits_per_word != 8
827 && spi->bits_per_word != 16
828 && spi->bits_per_word != 32) {
829 dev_err(&spi->dev, "setup: %dbits/wrd not supported!\n",
830 spi->bits_per_word);
831 err = -EINVAL;
832 goto setup_exit;
833 }
834
835 /* Check if we can provide the requested rate */
836 speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); /* Max possible */
837
838 if (spi->max_speed_hz > speed)
839 spi->max_speed_hz = speed;
840
841 psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
842 psr &= S3C64XX_SPI_PSR_MASK;
843 if (psr == S3C64XX_SPI_PSR_MASK)
844 psr--;
845
846 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
847 if (spi->max_speed_hz < speed) {
848 if (psr+1 < S3C64XX_SPI_PSR_MASK) {
849 psr++;
850 } else {
851 err = -EINVAL;
852 goto setup_exit;
853 }
854 }
855
856 speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
857 if (spi->max_speed_hz >= speed)
858 spi->max_speed_hz = speed;
859 else
860 err = -EINVAL;
861
862setup_exit:
863
864 /* setup() returns with device de-selected */
865 disable_cs(sdd, spi);
866
867 return err;
868}
869
870static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
871{
872 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
873 void __iomem *regs = sdd->regs;
874 unsigned int val;
875
876 sdd->cur_speed = 0;
877
878 S3C64XX_SPI_DEACT(sdd);
879
880 /* Disable Interrupts - we use Polling if not DMA mode */
881 writel(0, regs + S3C64XX_SPI_INT_EN);
882
883 writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
884 regs + S3C64XX_SPI_CLK_CFG);
885 writel(0, regs + S3C64XX_SPI_MODE_CFG);
886 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
887
888 /* Clear any irq pending bits */
889 writel(readl(regs + S3C64XX_SPI_PENDING_CLR),
890 regs + S3C64XX_SPI_PENDING_CLR);
891
892 writel(0, regs + S3C64XX_SPI_SWAP_CFG);
893
894 val = readl(regs + S3C64XX_SPI_MODE_CFG);
895 val &= ~S3C64XX_SPI_MODE_4BURST;
896 val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
897 val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
898 writel(val, regs + S3C64XX_SPI_MODE_CFG);
899
900 flush_fifo(sdd);
901}
902
903static int __init s3c64xx_spi_probe(struct platform_device *pdev)
904{
905 struct resource *mem_res, *dmatx_res, *dmarx_res;
906 struct s3c64xx_spi_driver_data *sdd;
907 struct s3c64xx_spi_info *sci;
908 struct spi_master *master;
909 int ret;
910
911 if (pdev->id < 0) {
912 dev_err(&pdev->dev,
913 "Invalid platform device id-%d\n", pdev->id);
914 return -ENODEV;
915 }
916
917 if (pdev->dev.platform_data == NULL) {
918 dev_err(&pdev->dev, "platform_data missing!\n");
919 return -ENODEV;
920 }
921
922 /* Check for availability of necessary resource */
923
924 dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
925 if (dmatx_res == NULL) {
926 dev_err(&pdev->dev, "Unable to get SPI-Tx dma resource\n");
927 return -ENXIO;
928 }
929
930 dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
931 if (dmarx_res == NULL) {
932 dev_err(&pdev->dev, "Unable to get SPI-Rx dma resource\n");
933 return -ENXIO;
934 }
935
936 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
937 if (mem_res == NULL) {
938 dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
939 return -ENXIO;
940 }
941
942 master = spi_alloc_master(&pdev->dev,
943 sizeof(struct s3c64xx_spi_driver_data));
944 if (master == NULL) {
945 dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
946 return -ENOMEM;
947 }
948
949 sci = pdev->dev.platform_data;
950
951 platform_set_drvdata(pdev, master);
952
953 sdd = spi_master_get_devdata(master);
954 sdd->master = master;
955 sdd->cntrlr_info = sci;
956 sdd->pdev = pdev;
957 sdd->sfr_start = mem_res->start;
958 sdd->tx_dmach = dmatx_res->start;
959 sdd->rx_dmach = dmarx_res->start;
960
961 sdd->cur_bpw = 8;
962
963 master->bus_num = pdev->id;
964 master->setup = s3c64xx_spi_setup;
965 master->transfer = s3c64xx_spi_transfer;
966 master->num_chipselect = sci->num_cs;
967 master->dma_alignment = 8;
968 /* the spi->mode bits understood by this driver: */
969 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
970
971 if (request_mem_region(mem_res->start,
972 resource_size(mem_res), pdev->name) == NULL) {
973 dev_err(&pdev->dev, "Req mem region failed\n");
974 ret = -ENXIO;
975 goto err0;
976 }
977
978 sdd->regs = ioremap(mem_res->start, resource_size(mem_res));
979 if (sdd->regs == NULL) {
980 dev_err(&pdev->dev, "Unable to remap IO\n");
981 ret = -ENXIO;
982 goto err1;
983 }
984
985 if (sci->cfg_gpio == NULL || sci->cfg_gpio(pdev)) {
986 dev_err(&pdev->dev, "Unable to config gpio\n");
987 ret = -EBUSY;
988 goto err2;
989 }
990
991 /* Setup clocks */
992 sdd->clk = clk_get(&pdev->dev, "spi");
993 if (IS_ERR(sdd->clk)) {
994 dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
995 ret = PTR_ERR(sdd->clk);
996 goto err3;
997 }
998
999 if (clk_enable(sdd->clk)) {
1000 dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
1001 ret = -EBUSY;
1002 goto err4;
1003 }
1004
1005 sdd->src_clk = clk_get(&pdev->dev, sci->src_clk_name);
1006 if (IS_ERR(sdd->src_clk)) {
1007 dev_err(&pdev->dev,
1008 "Unable to acquire clock '%s'\n", sci->src_clk_name);
1009 ret = PTR_ERR(sdd->src_clk);
1010 goto err5;
1011 }
1012
1013 if (clk_enable(sdd->src_clk)) {
1014 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n",
1015 sci->src_clk_name);
1016 ret = -EBUSY;
1017 goto err6;
1018 }
1019
1020 sdd->workqueue = create_singlethread_workqueue(
1021 dev_name(master->dev.parent));
1022 if (sdd->workqueue == NULL) {
1023 dev_err(&pdev->dev, "Unable to create workqueue\n");
1024 ret = -ENOMEM;
1025 goto err7;
1026 }
1027
1028 /* Setup Deufult Mode */
1029 s3c64xx_spi_hwinit(sdd, pdev->id);
1030
1031 spin_lock_init(&sdd->lock);
1032 init_completion(&sdd->xfer_completion);
1033 INIT_WORK(&sdd->work, s3c64xx_spi_work);
1034 INIT_LIST_HEAD(&sdd->queue);
1035
1036 if (spi_register_master(master)) {
1037 dev_err(&pdev->dev, "cannot register SPI master\n");
1038 ret = -EBUSY;
1039 goto err8;
1040 }
1041
1042 dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d "
1043 "with %d Slaves attached\n",
1044 pdev->id, master->num_chipselect);
1045 dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n",
1046 mem_res->end, mem_res->start,
1047 sdd->rx_dmach, sdd->tx_dmach);
1048
1049 return 0;
1050
1051err8:
1052 destroy_workqueue(sdd->workqueue);
1053err7:
1054 clk_disable(sdd->src_clk);
1055err6:
1056 clk_put(sdd->src_clk);
1057err5:
1058 clk_disable(sdd->clk);
1059err4:
1060 clk_put(sdd->clk);
1061err3:
1062err2:
1063 iounmap((void *) sdd->regs);
1064err1:
1065 release_mem_region(mem_res->start, resource_size(mem_res));
1066err0:
1067 platform_set_drvdata(pdev, NULL);
1068 spi_master_put(master);
1069
1070 return ret;
1071}
1072
1073static int s3c64xx_spi_remove(struct platform_device *pdev)
1074{
1075 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1076 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1077 struct resource *mem_res;
1078 unsigned long flags;
1079
1080 spin_lock_irqsave(&sdd->lock, flags);
1081 sdd->state |= SUSPND;
1082 spin_unlock_irqrestore(&sdd->lock, flags);
1083
1084 while (sdd->state & SPIBUSY)
1085 msleep(10);
1086
1087 spi_unregister_master(master);
1088
1089 destroy_workqueue(sdd->workqueue);
1090
1091 clk_disable(sdd->src_clk);
1092 clk_put(sdd->src_clk);
1093
1094 clk_disable(sdd->clk);
1095 clk_put(sdd->clk);
1096
1097 iounmap((void *) sdd->regs);
1098
1099 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1100 if (mem_res != NULL)
1101 release_mem_region(mem_res->start, resource_size(mem_res));
1102
1103 platform_set_drvdata(pdev, NULL);
1104 spi_master_put(master);
1105
1106 return 0;
1107}
1108
1109#ifdef CONFIG_PM
1110static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
1111{
1112 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1113 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1114 unsigned long flags;
1115
1116 spin_lock_irqsave(&sdd->lock, flags);
1117 sdd->state |= SUSPND;
1118 spin_unlock_irqrestore(&sdd->lock, flags);
1119
1120 while (sdd->state & SPIBUSY)
1121 msleep(10);
1122
1123 /* Disable the clock */
1124 clk_disable(sdd->src_clk);
1125 clk_disable(sdd->clk);
1126
1127 sdd->cur_speed = 0; /* Output Clock is stopped */
1128
1129 return 0;
1130}
1131
1132static int s3c64xx_spi_resume(struct platform_device *pdev)
1133{
1134 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1135 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1136 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1137 unsigned long flags;
1138
1139 sci->cfg_gpio(pdev);
1140
1141 /* Enable the clock */
1142 clk_enable(sdd->src_clk);
1143 clk_enable(sdd->clk);
1144
1145 s3c64xx_spi_hwinit(sdd, pdev->id);
1146
1147 spin_lock_irqsave(&sdd->lock, flags);
1148 sdd->state &= ~SUSPND;
1149 spin_unlock_irqrestore(&sdd->lock, flags);
1150
1151 return 0;
1152}
1153#else
1154#define s3c64xx_spi_suspend NULL
1155#define s3c64xx_spi_resume NULL
1156#endif /* CONFIG_PM */
1157
1158static struct platform_driver s3c64xx_spi_driver = {
1159 .driver = {
1160 .name = "s3c64xx-spi",
1161 .owner = THIS_MODULE,
1162 },
1163 .remove = s3c64xx_spi_remove,
1164 .suspend = s3c64xx_spi_suspend,
1165 .resume = s3c64xx_spi_resume,
1166};
1167MODULE_ALIAS("platform:s3c64xx-spi");
1168
1169static int __init s3c64xx_spi_init(void)
1170{
1171 return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
1172}
1173module_init(s3c64xx_spi_init);
1174
1175static void __exit s3c64xx_spi_exit(void)
1176{
1177 platform_driver_unregister(&s3c64xx_spi_driver);
1178}
1179module_exit(s3c64xx_spi_exit);
1180
1181MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1182MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
1183MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi_sh_msiof.c
new file mode 100644
index 000000000000..d93b66743ba7
--- /dev/null
+++ b/drivers/spi/spi_sh_msiof.c
@@ -0,0 +1,688 @@
1/*
2 * SuperH MSIOF SPI Master Interface
3 *
4 * Copyright (c) 2009 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/delay.h>
15#include <linux/interrupt.h>
16#include <linux/platform_device.h>
17#include <linux/completion.h>
18#include <linux/pm_runtime.h>
19#include <linux/gpio.h>
20#include <linux/bitmap.h>
21#include <linux/clk.h>
22#include <linux/io.h>
23#include <linux/err.h>
24
25#include <linux/spi/spi.h>
26#include <linux/spi/spi_bitbang.h>
27#include <linux/spi/sh_msiof.h>
28
29#include <asm/unaligned.h>
30
31struct sh_msiof_spi_priv {
32 struct spi_bitbang bitbang; /* must be first for spi_bitbang.c */
33 void __iomem *mapbase;
34 struct clk *clk;
35 struct platform_device *pdev;
36 struct sh_msiof_spi_info *info;
37 struct completion done;
38 unsigned long flags;
39 int tx_fifo_size;
40 int rx_fifo_size;
41};
42
43#define TMDR1 0x00
44#define TMDR2 0x04
45#define TMDR3 0x08
46#define RMDR1 0x10
47#define RMDR2 0x14
48#define RMDR3 0x18
49#define TSCR 0x20
50#define RSCR 0x22
51#define CTR 0x28
52#define FCTR 0x30
53#define STR 0x40
54#define IER 0x44
55#define TDR1 0x48
56#define TDR2 0x4c
57#define TFDR 0x50
58#define RDR1 0x58
59#define RDR2 0x5c
60#define RFDR 0x60
61
62#define CTR_TSCKE (1 << 15)
63#define CTR_TFSE (1 << 14)
64#define CTR_TXE (1 << 9)
65#define CTR_RXE (1 << 8)
66
67#define STR_TEOF (1 << 23)
68#define STR_REOF (1 << 7)
69
70static unsigned long sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
71{
72 switch (reg_offs) {
73 case TSCR:
74 case RSCR:
75 return ioread16(p->mapbase + reg_offs);
76 default:
77 return ioread32(p->mapbase + reg_offs);
78 }
79}
80
81static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
82 unsigned long value)
83{
84 switch (reg_offs) {
85 case TSCR:
86 case RSCR:
87 iowrite16(value, p->mapbase + reg_offs);
88 break;
89 default:
90 iowrite32(value, p->mapbase + reg_offs);
91 break;
92 }
93}
94
95static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
96 unsigned long clr, unsigned long set)
97{
98 unsigned long mask = clr | set;
99 unsigned long data;
100 int k;
101
102 data = sh_msiof_read(p, CTR);
103 data &= ~clr;
104 data |= set;
105 sh_msiof_write(p, CTR, data);
106
107 for (k = 100; k > 0; k--) {
108 if ((sh_msiof_read(p, CTR) & mask) == set)
109 break;
110
111 udelay(10);
112 }
113
114 return k > 0 ? 0 : -ETIMEDOUT;
115}
116
117static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
118{
119 struct sh_msiof_spi_priv *p = data;
120
121 /* just disable the interrupt and wake up */
122 sh_msiof_write(p, IER, 0);
123 complete(&p->done);
124
125 return IRQ_HANDLED;
126}
127
128static struct {
129 unsigned short div;
130 unsigned short scr;
131} const sh_msiof_spi_clk_table[] = {
132 { 1, 0x0007 },
133 { 2, 0x0000 },
134 { 4, 0x0001 },
135 { 8, 0x0002 },
136 { 16, 0x0003 },
137 { 32, 0x0004 },
138 { 64, 0x1f00 },
139 { 128, 0x1f01 },
140 { 256, 0x1f02 },
141 { 512, 0x1f03 },
142 { 1024, 0x1f04 },
143};
144
145static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
146 unsigned long parent_rate,
147 unsigned long spi_hz)
148{
149 unsigned long div = 1024;
150 size_t k;
151
152 if (!WARN_ON(!spi_hz || !parent_rate))
153 div = parent_rate / spi_hz;
154
155 /* TODO: make more fine grained */
156
157 for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_clk_table); k++) {
158 if (sh_msiof_spi_clk_table[k].div >= div)
159 break;
160 }
161
162 k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_clk_table) - 1);
163
164 sh_msiof_write(p, TSCR, sh_msiof_spi_clk_table[k].scr);
165 sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr);
166}
167
168static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
169 int cpol, int cpha,
170 int tx_hi_z, int lsb_first)
171{
172 unsigned long tmp;
173 int edge;
174
175 /*
176 * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG
177 * 0 0 10 10 1 1
178 * 0 1 10 10 0 0
179 * 1 0 11 11 0 0
180 * 1 1 11 11 1 1
181 */
182 sh_msiof_write(p, FCTR, 0);
183 sh_msiof_write(p, TMDR1, 0xe2000005 | (lsb_first << 24));
184 sh_msiof_write(p, RMDR1, 0x22000005 | (lsb_first << 24));
185
186 tmp = 0xa0000000;
187 tmp |= cpol << 30; /* TSCKIZ */
188 tmp |= cpol << 28; /* RSCKIZ */
189
190 edge = cpol ? cpha : !cpha;
191
192 tmp |= edge << 27; /* TEDG */
193 tmp |= edge << 26; /* REDG */
194 tmp |= (tx_hi_z ? 2 : 0) << 22; /* TXDIZ */
195 sh_msiof_write(p, CTR, tmp);
196}
197
198static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
199 const void *tx_buf, void *rx_buf,
200 int bits, int words)
201{
202 unsigned long dr2;
203
204 dr2 = ((bits - 1) << 24) | ((words - 1) << 16);
205
206 if (tx_buf)
207 sh_msiof_write(p, TMDR2, dr2);
208 else
209 sh_msiof_write(p, TMDR2, dr2 | 1);
210
211 if (rx_buf)
212 sh_msiof_write(p, RMDR2, dr2);
213
214 sh_msiof_write(p, IER, STR_TEOF | STR_REOF);
215}
216
217static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
218{
219 sh_msiof_write(p, STR, sh_msiof_read(p, STR));
220}
221
222static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
223 const void *tx_buf, int words, int fs)
224{
225 const unsigned char *buf_8 = tx_buf;
226 int k;
227
228 for (k = 0; k < words; k++)
229 sh_msiof_write(p, TFDR, buf_8[k] << fs);
230}
231
232static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
233 const void *tx_buf, int words, int fs)
234{
235 const unsigned short *buf_16 = tx_buf;
236 int k;
237
238 for (k = 0; k < words; k++)
239 sh_msiof_write(p, TFDR, buf_16[k] << fs);
240}
241
242static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
243 const void *tx_buf, int words, int fs)
244{
245 const unsigned short *buf_16 = tx_buf;
246 int k;
247
248 for (k = 0; k < words; k++)
249 sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs);
250}
251
252static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
253 const void *tx_buf, int words, int fs)
254{
255 const unsigned int *buf_32 = tx_buf;
256 int k;
257
258 for (k = 0; k < words; k++)
259 sh_msiof_write(p, TFDR, buf_32[k] << fs);
260}
261
262static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
263 const void *tx_buf, int words, int fs)
264{
265 const unsigned int *buf_32 = tx_buf;
266 int k;
267
268 for (k = 0; k < words; k++)
269 sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs);
270}
271
272static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
273 void *rx_buf, int words, int fs)
274{
275 unsigned char *buf_8 = rx_buf;
276 int k;
277
278 for (k = 0; k < words; k++)
279 buf_8[k] = sh_msiof_read(p, RFDR) >> fs;
280}
281
282static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
283 void *rx_buf, int words, int fs)
284{
285 unsigned short *buf_16 = rx_buf;
286 int k;
287
288 for (k = 0; k < words; k++)
289 buf_16[k] = sh_msiof_read(p, RFDR) >> fs;
290}
291
292static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
293 void *rx_buf, int words, int fs)
294{
295 unsigned short *buf_16 = rx_buf;
296 int k;
297
298 for (k = 0; k < words; k++)
299 put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]);
300}
301
302static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
303 void *rx_buf, int words, int fs)
304{
305 unsigned int *buf_32 = rx_buf;
306 int k;
307
308 for (k = 0; k < words; k++)
309 buf_32[k] = sh_msiof_read(p, RFDR) >> fs;
310}
311
312static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
313 void *rx_buf, int words, int fs)
314{
315 unsigned int *buf_32 = rx_buf;
316 int k;
317
318 for (k = 0; k < words; k++)
319 put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]);
320}
321
322static int sh_msiof_spi_bits(struct spi_device *spi, struct spi_transfer *t)
323{
324 int bits;
325
326 bits = t ? t->bits_per_word : 0;
327 bits = bits ? bits : spi->bits_per_word;
328 return bits;
329}
330
331static unsigned long sh_msiof_spi_hz(struct spi_device *spi,
332 struct spi_transfer *t)
333{
334 unsigned long hz;
335
336 hz = t ? t->speed_hz : 0;
337 hz = hz ? hz : spi->max_speed_hz;
338 return hz;
339}
340
341static int sh_msiof_spi_setup_transfer(struct spi_device *spi,
342 struct spi_transfer *t)
343{
344 int bits;
345
346 /* noting to check hz values against since parent clock is disabled */
347
348 bits = sh_msiof_spi_bits(spi, t);
349 if (bits < 8)
350 return -EINVAL;
351 if (bits > 32)
352 return -EINVAL;
353
354 return spi_bitbang_setup_transfer(spi, t);
355}
356
357static void sh_msiof_spi_chipselect(struct spi_device *spi, int is_on)
358{
359 struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
360 int value;
361
362 /* chip select is active low unless SPI_CS_HIGH is set */
363 if (spi->mode & SPI_CS_HIGH)
364 value = (is_on == BITBANG_CS_ACTIVE) ? 1 : 0;
365 else
366 value = (is_on == BITBANG_CS_ACTIVE) ? 0 : 1;
367
368 if (is_on == BITBANG_CS_ACTIVE) {
369 if (!test_and_set_bit(0, &p->flags)) {
370 pm_runtime_get_sync(&p->pdev->dev);
371 clk_enable(p->clk);
372 }
373
374 /* Configure pins before asserting CS */
375 sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
376 !!(spi->mode & SPI_CPHA),
377 !!(spi->mode & SPI_3WIRE),
378 !!(spi->mode & SPI_LSB_FIRST));
379 }
380
381 /* use spi->controller data for CS (same strategy as spi_gpio) */
382 gpio_set_value((unsigned)spi->controller_data, value);
383
384 if (is_on == BITBANG_CS_INACTIVE) {
385 if (test_and_clear_bit(0, &p->flags)) {
386 clk_disable(p->clk);
387 pm_runtime_put(&p->pdev->dev);
388 }
389 }
390}
391
392static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
393 void (*tx_fifo)(struct sh_msiof_spi_priv *,
394 const void *, int, int),
395 void (*rx_fifo)(struct sh_msiof_spi_priv *,
396 void *, int, int),
397 const void *tx_buf, void *rx_buf,
398 int words, int bits)
399{
400 int fifo_shift;
401 int ret;
402
403 /* limit maximum word transfer to rx/tx fifo size */
404 if (tx_buf)
405 words = min_t(int, words, p->tx_fifo_size);
406 if (rx_buf)
407 words = min_t(int, words, p->rx_fifo_size);
408
409 /* the fifo contents need shifting */
410 fifo_shift = 32 - bits;
411
412 /* setup msiof transfer mode registers */
413 sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
414
415 /* write tx fifo */
416 if (tx_buf)
417 tx_fifo(p, tx_buf, words, fifo_shift);
418
419 /* setup clock and rx/tx signals */
420 ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE);
421 if (rx_buf)
422 ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_RXE);
423 ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
424
425 /* start by setting frame bit */
426 INIT_COMPLETION(p->done);
427 ret = ret ? ret : sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
428 if (ret) {
429 dev_err(&p->pdev->dev, "failed to start hardware\n");
430 goto err;
431 }
432
433 /* wait for tx fifo to be emptied / rx fifo to be filled */
434 wait_for_completion(&p->done);
435
436 /* read rx fifo */
437 if (rx_buf)
438 rx_fifo(p, rx_buf, words, fifo_shift);
439
440 /* clear status bits */
441 sh_msiof_reset_str(p);
442
443 /* shut down frame, tx/tx and clock signals */
444 ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
445 ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
446 if (rx_buf)
447 ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_RXE, 0);
448 ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0);
449 if (ret) {
450 dev_err(&p->pdev->dev, "failed to shut down hardware\n");
451 goto err;
452 }
453
454 return words;
455
456 err:
457 sh_msiof_write(p, IER, 0);
458 return ret;
459}
460
461static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
462{
463 struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
464 void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
465 void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
466 int bits;
467 int bytes_per_word;
468 int bytes_done;
469 int words;
470 int n;
471
472 bits = sh_msiof_spi_bits(spi, t);
473
474 /* setup bytes per word and fifo read/write functions */
475 if (bits <= 8) {
476 bytes_per_word = 1;
477 tx_fifo = sh_msiof_spi_write_fifo_8;
478 rx_fifo = sh_msiof_spi_read_fifo_8;
479 } else if (bits <= 16) {
480 bytes_per_word = 2;
481 if ((unsigned long)t->tx_buf & 0x01)
482 tx_fifo = sh_msiof_spi_write_fifo_16u;
483 else
484 tx_fifo = sh_msiof_spi_write_fifo_16;
485
486 if ((unsigned long)t->rx_buf & 0x01)
487 rx_fifo = sh_msiof_spi_read_fifo_16u;
488 else
489 rx_fifo = sh_msiof_spi_read_fifo_16;
490 } else {
491 bytes_per_word = 4;
492 if ((unsigned long)t->tx_buf & 0x03)
493 tx_fifo = sh_msiof_spi_write_fifo_32u;
494 else
495 tx_fifo = sh_msiof_spi_write_fifo_32;
496
497 if ((unsigned long)t->rx_buf & 0x03)
498 rx_fifo = sh_msiof_spi_read_fifo_32u;
499 else
500 rx_fifo = sh_msiof_spi_read_fifo_32;
501 }
502
503 /* setup clocks (clock already enabled in chipselect()) */
504 sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk),
505 sh_msiof_spi_hz(spi, t));
506
507 /* transfer in fifo sized chunks */
508 words = t->len / bytes_per_word;
509 bytes_done = 0;
510
511 while (bytes_done < t->len) {
512 n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo,
513 t->tx_buf + bytes_done,
514 t->rx_buf + bytes_done,
515 words, bits);
516 if (n < 0)
517 break;
518
519 bytes_done += n * bytes_per_word;
520 words -= n;
521 }
522
523 return bytes_done;
524}
525
526static u32 sh_msiof_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
527 u32 word, u8 bits)
528{
529 BUG(); /* unused but needed by bitbang code */
530 return 0;
531}
532
533static int sh_msiof_spi_probe(struct platform_device *pdev)
534{
535 struct resource *r;
536 struct spi_master *master;
537 struct sh_msiof_spi_priv *p;
538 char clk_name[16];
539 int i;
540 int ret;
541
542 master = spi_alloc_master(&pdev->dev, sizeof(struct sh_msiof_spi_priv));
543 if (master == NULL) {
544 dev_err(&pdev->dev, "failed to allocate spi master\n");
545 ret = -ENOMEM;
546 goto err0;
547 }
548
549 p = spi_master_get_devdata(master);
550
551 platform_set_drvdata(pdev, p);
552 p->info = pdev->dev.platform_data;
553 init_completion(&p->done);
554
555 snprintf(clk_name, sizeof(clk_name), "msiof%d", pdev->id);
556 p->clk = clk_get(&pdev->dev, clk_name);
557 if (IS_ERR(p->clk)) {
558 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
559 ret = PTR_ERR(p->clk);
560 goto err1;
561 }
562
563 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
564 i = platform_get_irq(pdev, 0);
565 if (!r || i < 0) {
566 dev_err(&pdev->dev, "cannot get platform resources\n");
567 ret = -ENOENT;
568 goto err2;
569 }
570 p->mapbase = ioremap_nocache(r->start, resource_size(r));
571 if (!p->mapbase) {
572 dev_err(&pdev->dev, "unable to ioremap\n");
573 ret = -ENXIO;
574 goto err2;
575 }
576
577 ret = request_irq(i, sh_msiof_spi_irq, IRQF_DISABLED,
578 dev_name(&pdev->dev), p);
579 if (ret) {
580 dev_err(&pdev->dev, "unable to request irq\n");
581 goto err3;
582 }
583
584 p->pdev = pdev;
585 pm_runtime_enable(&pdev->dev);
586
587 /* The standard version of MSIOF use 64 word FIFOs */
588 p->tx_fifo_size = 64;
589 p->rx_fifo_size = 64;
590
591 /* Platform data may override FIFO sizes */
592 if (p->info->tx_fifo_override)
593 p->tx_fifo_size = p->info->tx_fifo_override;
594 if (p->info->rx_fifo_override)
595 p->rx_fifo_size = p->info->rx_fifo_override;
596
597 /* init master and bitbang code */
598 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
599 master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
600 master->flags = 0;
601 master->bus_num = pdev->id;
602 master->num_chipselect = p->info->num_chipselect;
603 master->setup = spi_bitbang_setup;
604 master->cleanup = spi_bitbang_cleanup;
605
606 p->bitbang.master = master;
607 p->bitbang.chipselect = sh_msiof_spi_chipselect;
608 p->bitbang.setup_transfer = sh_msiof_spi_setup_transfer;
609 p->bitbang.txrx_bufs = sh_msiof_spi_txrx;
610 p->bitbang.txrx_word[SPI_MODE_0] = sh_msiof_spi_txrx_word;
611 p->bitbang.txrx_word[SPI_MODE_1] = sh_msiof_spi_txrx_word;
612 p->bitbang.txrx_word[SPI_MODE_2] = sh_msiof_spi_txrx_word;
613 p->bitbang.txrx_word[SPI_MODE_3] = sh_msiof_spi_txrx_word;
614
615 ret = spi_bitbang_start(&p->bitbang);
616 if (ret == 0)
617 return 0;
618
619 pm_runtime_disable(&pdev->dev);
620 err3:
621 iounmap(p->mapbase);
622 err2:
623 clk_put(p->clk);
624 err1:
625 spi_master_put(master);
626 err0:
627 return ret;
628}
629
630static int sh_msiof_spi_remove(struct platform_device *pdev)
631{
632 struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
633 int ret;
634
635 ret = spi_bitbang_stop(&p->bitbang);
636 if (!ret) {
637 pm_runtime_disable(&pdev->dev);
638 free_irq(platform_get_irq(pdev, 0), sh_msiof_spi_irq);
639 iounmap(p->mapbase);
640 clk_put(p->clk);
641 spi_master_put(p->bitbang.master);
642 }
643 return ret;
644}
645
646static int sh_msiof_spi_runtime_nop(struct device *dev)
647{
648 /* Runtime PM callback shared between ->runtime_suspend()
649 * and ->runtime_resume(). Simply returns success.
650 *
651 * This driver re-initializes all registers after
652 * pm_runtime_get_sync() anyway so there is no need
653 * to save and restore registers here.
654 */
655 return 0;
656}
657
658static struct dev_pm_ops sh_msiof_spi_dev_pm_ops = {
659 .runtime_suspend = sh_msiof_spi_runtime_nop,
660 .runtime_resume = sh_msiof_spi_runtime_nop,
661};
662
663static struct platform_driver sh_msiof_spi_drv = {
664 .probe = sh_msiof_spi_probe,
665 .remove = sh_msiof_spi_remove,
666 .driver = {
667 .name = "spi_sh_msiof",
668 .owner = THIS_MODULE,
669 .pm = &sh_msiof_spi_dev_pm_ops,
670 },
671};
672
673static int __init sh_msiof_spi_init(void)
674{
675 return platform_driver_register(&sh_msiof_spi_drv);
676}
677module_init(sh_msiof_spi_init);
678
679static void __exit sh_msiof_spi_exit(void)
680{
681 platform_driver_unregister(&sh_msiof_spi_drv);
682}
683module_exit(sh_msiof_spi_exit);
684
685MODULE_DESCRIPTION("SuperH MSIOF SPI Master Interface Driver");
686MODULE_AUTHOR("Magnus Damm");
687MODULE_LICENSE("GPL v2");
688MODULE_ALIAS("platform:spi_sh_msiof");
diff --git a/drivers/spi/spi_sh_sci.c b/drivers/spi/spi_sh_sci.c
index 7d36720eb982..a65c12ffa733 100644
--- a/drivers/spi/spi_sh_sci.c
+++ b/drivers/spi/spi_sh_sci.c
@@ -148,7 +148,7 @@ static int sh_sci_spi_probe(struct platform_device *dev)
148 ret = -ENOENT; 148 ret = -ENOENT;
149 goto err1; 149 goto err1;
150 } 150 }
151 sp->membase = ioremap(r->start, r->end - r->start + 1); 151 sp->membase = ioremap(r->start, resource_size(r));
152 if (!sp->membase) { 152 if (!sp->membase) {
153 ret = -ENXIO; 153 ret = -ENXIO;
154 goto err1; 154 goto err1;
diff --git a/drivers/spi/spi_stmp.c b/drivers/spi/spi_stmp.c
index 2552bb364005..fadff76eb7e0 100644
--- a/drivers/spi/spi_stmp.c
+++ b/drivers/spi/spi_stmp.c
@@ -76,7 +76,7 @@ struct stmp_spi {
76 break; \ 76 break; \
77 } \ 77 } \
78 cpu_relax(); \ 78 cpu_relax(); \
79 } while (time_before(end_jiffies, jiffies)); \ 79 } while (time_before(jiffies, end_jiffies)); \
80 succeeded; \ 80 succeeded; \
81 }) 81 })
82 82
diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c
index 19f75627c3de..dfa024b633e1 100644
--- a/drivers/spi/spi_txx9.c
+++ b/drivers/spi/spi_txx9.c
@@ -375,12 +375,10 @@ static int __init txx9spi_probe(struct platform_device *dev)
375 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 375 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
376 if (!res) 376 if (!res)
377 goto exit_busy; 377 goto exit_busy;
378 if (!devm_request_mem_region(&dev->dev, 378 if (!devm_request_mem_region(&dev->dev, res->start, resource_size(res),
379 res->start, res->end - res->start + 1,
380 "spi_txx9")) 379 "spi_txx9"))
381 goto exit_busy; 380 goto exit_busy;
382 c->membase = devm_ioremap(&dev->dev, 381 c->membase = devm_ioremap(&dev->dev, res->start, resource_size(res));
383 res->start, res->end - res->start + 1);
384 if (!c->membase) 382 if (!c->membase)
385 goto exit_busy; 383 goto exit_busy;
386 384
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 5d23983f02fc..ea1bec3c9a13 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -30,7 +30,6 @@
30#include <linux/errno.h> 30#include <linux/errno.h>
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/smp_lock.h>
34 33
35#include <linux/spi/spi.h> 34#include <linux/spi/spi.h>
36#include <linux/spi/spidev.h> 35#include <linux/spi/spidev.h>
@@ -42,7 +41,7 @@
42 * This supports acccess to SPI devices using normal userspace I/O calls. 41 * This supports acccess to SPI devices using normal userspace I/O calls.
43 * Note that while traditional UNIX/POSIX I/O semantics are half duplex, 42 * Note that while traditional UNIX/POSIX I/O semantics are half duplex,
44 * and often mask message boundaries, full SPI support requires full duplex 43 * and often mask message boundaries, full SPI support requires full duplex
45 * transfers. There are several kinds of of internal message boundaries to 44 * transfers. There are several kinds of internal message boundaries to
46 * handle chipselect management and other protocol options. 45 * handle chipselect management and other protocol options.
47 * 46 *
48 * SPI has a character major number assigned. We allocate minor numbers 47 * SPI has a character major number assigned. We allocate minor numbers
@@ -54,7 +53,7 @@
54#define SPIDEV_MAJOR 153 /* assigned */ 53#define SPIDEV_MAJOR 153 /* assigned */
55#define N_SPI_MINORS 32 /* ... up to 256 */ 54#define N_SPI_MINORS 32 /* ... up to 256 */
56 55
57static unsigned long minors[N_SPI_MINORS / BITS_PER_LONG]; 56static DECLARE_BITMAP(minors, N_SPI_MINORS);
58 57
59 58
60/* Bit masks for spi_device.mode management. Note that incorrect 59/* Bit masks for spi_device.mode management. Note that incorrect
@@ -267,15 +266,15 @@ static int spidev_message(struct spidev_data *spidev,
267 k_tmp->delay_usecs = u_tmp->delay_usecs; 266 k_tmp->delay_usecs = u_tmp->delay_usecs;
268 k_tmp->speed_hz = u_tmp->speed_hz; 267 k_tmp->speed_hz = u_tmp->speed_hz;
269#ifdef VERBOSE 268#ifdef VERBOSE
270 dev_dbg(&spi->dev, 269 dev_dbg(&spidev->spi->dev,
271 " xfer len %zd %s%s%s%dbits %u usec %uHz\n", 270 " xfer len %zd %s%s%s%dbits %u usec %uHz\n",
272 u_tmp->len, 271 u_tmp->len,
273 u_tmp->rx_buf ? "rx " : "", 272 u_tmp->rx_buf ? "rx " : "",
274 u_tmp->tx_buf ? "tx " : "", 273 u_tmp->tx_buf ? "tx " : "",
275 u_tmp->cs_change ? "cs " : "", 274 u_tmp->cs_change ? "cs " : "",
276 u_tmp->bits_per_word ? : spi->bits_per_word, 275 u_tmp->bits_per_word ? : spidev->spi->bits_per_word,
277 u_tmp->delay_usecs, 276 u_tmp->delay_usecs,
278 u_tmp->speed_hz ? : spi->max_speed_hz); 277 u_tmp->speed_hz ? : spidev->spi->max_speed_hz);
279#endif 278#endif
280 spi_message_add_tail(k_tmp, &msg); 279 spi_message_add_tail(k_tmp, &msg);
281 } 280 }
@@ -477,7 +476,6 @@ static int spidev_open(struct inode *inode, struct file *filp)
477 struct spidev_data *spidev; 476 struct spidev_data *spidev;
478 int status = -ENXIO; 477 int status = -ENXIO;
479 478
480 lock_kernel();
481 mutex_lock(&device_list_lock); 479 mutex_lock(&device_list_lock);
482 480
483 list_for_each_entry(spidev, &device_list, device_entry) { 481 list_for_each_entry(spidev, &device_list, device_entry) {
@@ -503,7 +501,6 @@ static int spidev_open(struct inode *inode, struct file *filp)
503 pr_debug("spidev: nothing for minor %d\n", iminor(inode)); 501 pr_debug("spidev: nothing for minor %d\n", iminor(inode));
504 502
505 mutex_unlock(&device_list_lock); 503 mutex_unlock(&device_list_lock);
506 unlock_kernel();
507 return status; 504 return status;
508} 505}
509 506
@@ -561,7 +558,7 @@ static struct class *spidev_class;
561 558
562/*-------------------------------------------------------------------------*/ 559/*-------------------------------------------------------------------------*/
563 560
564static int spidev_probe(struct spi_device *spi) 561static int __devinit spidev_probe(struct spi_device *spi)
565{ 562{
566 struct spidev_data *spidev; 563 struct spidev_data *spidev;
567 int status; 564 int status;
@@ -610,7 +607,7 @@ static int spidev_probe(struct spi_device *spi)
610 return status; 607 return status;
611} 608}
612 609
613static int spidev_remove(struct spi_device *spi) 610static int __devexit spidev_remove(struct spi_device *spi)
614{ 611{
615 struct spidev_data *spidev = spi_get_drvdata(spi); 612 struct spidev_data *spidev = spi_get_drvdata(spi);
616 613
@@ -632,7 +629,7 @@ static int spidev_remove(struct spi_device *spi)
632 return 0; 629 return 0;
633} 630}
634 631
635static struct spi_driver spidev_spi = { 632static struct spi_driver spidev_spi_driver = {
636 .driver = { 633 .driver = {
637 .name = "spidev", 634 .name = "spidev",
638 .owner = THIS_MODULE, 635 .owner = THIS_MODULE,
@@ -664,14 +661,14 @@ static int __init spidev_init(void)
664 661
665 spidev_class = class_create(THIS_MODULE, "spidev"); 662 spidev_class = class_create(THIS_MODULE, "spidev");
666 if (IS_ERR(spidev_class)) { 663 if (IS_ERR(spidev_class)) {
667 unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); 664 unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
668 return PTR_ERR(spidev_class); 665 return PTR_ERR(spidev_class);
669 } 666 }
670 667
671 status = spi_register_driver(&spidev_spi); 668 status = spi_register_driver(&spidev_spi_driver);
672 if (status < 0) { 669 if (status < 0) {
673 class_destroy(spidev_class); 670 class_destroy(spidev_class);
674 unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); 671 unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
675 } 672 }
676 return status; 673 return status;
677} 674}
@@ -679,9 +676,9 @@ module_init(spidev_init);
679 676
680static void __exit spidev_exit(void) 677static void __exit spidev_exit(void)
681{ 678{
682 spi_unregister_driver(&spidev_spi); 679 spi_unregister_driver(&spidev_spi_driver);
683 class_destroy(spidev_class); 680 class_destroy(spidev_class);
684 unregister_chrdev(SPIDEV_MAJOR, spidev_spi.driver.name); 681 unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
685} 682}
686module_exit(spidev_exit); 683module_exit(spidev_exit);
687 684
diff --git a/drivers/spi/tle62x0.c b/drivers/spi/tle62x0.c
index bf9540f5fb98..a3938958147c 100644
--- a/drivers/spi/tle62x0.c
+++ b/drivers/spi/tle62x0.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/slab.h>
14 15
15#include <linux/spi/spi.h> 16#include <linux/spi/spi.h>
16#include <linux/spi/tle62x0.h> 17#include <linux/spi/tle62x0.h>
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 46b8c5c2f45e..1b47363cb73f 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -14,22 +14,20 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18
19#include <linux/of_platform.h>
20#include <linux/of_device.h>
21#include <linux/of_spi.h>
22 17
23#include <linux/spi/spi.h> 18#include <linux/spi/spi.h>
24#include <linux/spi/spi_bitbang.h> 19#include <linux/spi/spi_bitbang.h>
25#include <linux/io.h> 20#include <linux/io.h>
26 21
22#include "xilinx_spi.h"
23#include <linux/spi/xilinx_spi.h>
24
27#define XILINX_SPI_NAME "xilinx_spi" 25#define XILINX_SPI_NAME "xilinx_spi"
28 26
29/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e) 27/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
30 * Product Specification", DS464 28 * Product Specification", DS464
31 */ 29 */
32#define XSPI_CR_OFFSET 0x62 /* 16-bit Control Register */ 30#define XSPI_CR_OFFSET 0x60 /* Control Register */
33 31
34#define XSPI_CR_ENABLE 0x02 32#define XSPI_CR_ENABLE 0x02
35#define XSPI_CR_MASTER_MODE 0x04 33#define XSPI_CR_MASTER_MODE 0x04
@@ -40,8 +38,9 @@
40#define XSPI_CR_RXFIFO_RESET 0x40 38#define XSPI_CR_RXFIFO_RESET 0x40
41#define XSPI_CR_MANUAL_SSELECT 0x80 39#define XSPI_CR_MANUAL_SSELECT 0x80
42#define XSPI_CR_TRANS_INHIBIT 0x100 40#define XSPI_CR_TRANS_INHIBIT 0x100
41#define XSPI_CR_LSB_FIRST 0x200
43 42
44#define XSPI_SR_OFFSET 0x67 /* 8-bit Status Register */ 43#define XSPI_SR_OFFSET 0x64 /* Status Register */
45 44
46#define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */ 45#define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */
47#define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */ 46#define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */
@@ -49,8 +48,8 @@
49#define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */ 48#define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */
50#define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */ 49#define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */
51 50
52#define XSPI_TXD_OFFSET 0x6b /* 8-bit Data Transmit Register */ 51#define XSPI_TXD_OFFSET 0x68 /* Data Transmit Register */
53#define XSPI_RXD_OFFSET 0x6f /* 8-bit Data Receive Register */ 52#define XSPI_RXD_OFFSET 0x6c /* Data Receive Register */
54 53
55#define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */ 54#define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */
56 55
@@ -70,6 +69,7 @@
70#define XSPI_INTR_TX_UNDERRUN 0x08 /* TxFIFO was underrun */ 69#define XSPI_INTR_TX_UNDERRUN 0x08 /* TxFIFO was underrun */
71#define XSPI_INTR_RX_FULL 0x10 /* RxFIFO is full */ 70#define XSPI_INTR_RX_FULL 0x10 /* RxFIFO is full */
72#define XSPI_INTR_RX_OVERRUN 0x20 /* RxFIFO was overrun */ 71#define XSPI_INTR_RX_OVERRUN 0x20 /* RxFIFO was overrun */
72#define XSPI_INTR_TX_HALF_EMPTY 0x40 /* TxFIFO is half empty */
73 73
74#define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */ 74#define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */
75#define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */ 75#define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */
@@ -78,35 +78,105 @@ struct xilinx_spi {
78 /* bitbang has to be first */ 78 /* bitbang has to be first */
79 struct spi_bitbang bitbang; 79 struct spi_bitbang bitbang;
80 struct completion done; 80 struct completion done;
81 81 struct resource mem; /* phys mem */
82 void __iomem *regs; /* virt. address of the control registers */ 82 void __iomem *regs; /* virt. address of the control registers */
83 83
84 u32 irq; 84 u32 irq;
85 85
86 u32 speed_hz; /* SCK has a fixed frequency of speed_hz Hz */
87
88 u8 *rx_ptr; /* pointer in the Tx buffer */ 86 u8 *rx_ptr; /* pointer in the Tx buffer */
89 const u8 *tx_ptr; /* pointer in the Rx buffer */ 87 const u8 *tx_ptr; /* pointer in the Rx buffer */
90 int remaining_bytes; /* the number of bytes left to transfer */ 88 int remaining_bytes; /* the number of bytes left to transfer */
89 u8 bits_per_word;
90 unsigned int (*read_fn) (void __iomem *);
91 void (*write_fn) (u32, void __iomem *);
92 void (*tx_fn) (struct xilinx_spi *);
93 void (*rx_fn) (struct xilinx_spi *);
91}; 94};
92 95
93static void xspi_init_hw(void __iomem *regs_base) 96static void xspi_write32(u32 val, void __iomem *addr)
97{
98 iowrite32(val, addr);
99}
100
101static unsigned int xspi_read32(void __iomem *addr)
102{
103 return ioread32(addr);
104}
105
106static void xspi_write32_be(u32 val, void __iomem *addr)
94{ 107{
108 iowrite32be(val, addr);
109}
110
111static unsigned int xspi_read32_be(void __iomem *addr)
112{
113 return ioread32be(addr);
114}
115
116static void xspi_tx8(struct xilinx_spi *xspi)
117{
118 xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET);
119 xspi->tx_ptr++;
120}
121
122static void xspi_tx16(struct xilinx_spi *xspi)
123{
124 xspi->write_fn(*(u16 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET);
125 xspi->tx_ptr += 2;
126}
127
128static void xspi_tx32(struct xilinx_spi *xspi)
129{
130 xspi->write_fn(*(u32 *)(xspi->tx_ptr), xspi->regs + XSPI_TXD_OFFSET);
131 xspi->tx_ptr += 4;
132}
133
134static void xspi_rx8(struct xilinx_spi *xspi)
135{
136 u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
137 if (xspi->rx_ptr) {
138 *xspi->rx_ptr = data & 0xff;
139 xspi->rx_ptr++;
140 }
141}
142
143static void xspi_rx16(struct xilinx_spi *xspi)
144{
145 u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
146 if (xspi->rx_ptr) {
147 *(u16 *)(xspi->rx_ptr) = data & 0xffff;
148 xspi->rx_ptr += 2;
149 }
150}
151
152static void xspi_rx32(struct xilinx_spi *xspi)
153{
154 u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
155 if (xspi->rx_ptr) {
156 *(u32 *)(xspi->rx_ptr) = data;
157 xspi->rx_ptr += 4;
158 }
159}
160
161static void xspi_init_hw(struct xilinx_spi *xspi)
162{
163 void __iomem *regs_base = xspi->regs;
164
95 /* Reset the SPI device */ 165 /* Reset the SPI device */
96 out_be32(regs_base + XIPIF_V123B_RESETR_OFFSET, 166 xspi->write_fn(XIPIF_V123B_RESET_MASK,
97 XIPIF_V123B_RESET_MASK); 167 regs_base + XIPIF_V123B_RESETR_OFFSET);
98 /* Disable all the interrupts just in case */ 168 /* Disable all the interrupts just in case */
99 out_be32(regs_base + XIPIF_V123B_IIER_OFFSET, 0); 169 xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET);
100 /* Enable the global IPIF interrupt */ 170 /* Enable the global IPIF interrupt */
101 out_be32(regs_base + XIPIF_V123B_DGIER_OFFSET, 171 xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
102 XIPIF_V123B_GINTR_ENABLE); 172 regs_base + XIPIF_V123B_DGIER_OFFSET);
103 /* Deselect the slave on the SPI bus */ 173 /* Deselect the slave on the SPI bus */
104 out_be32(regs_base + XSPI_SSR_OFFSET, 0xffff); 174 xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET);
105 /* Disable the transmitter, enable Manual Slave Select Assertion, 175 /* Disable the transmitter, enable Manual Slave Select Assertion,
106 * put SPI controller into master mode, and enable it */ 176 * put SPI controller into master mode, and enable it */
107 out_be16(regs_base + XSPI_CR_OFFSET, 177 xspi->write_fn(XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT |
108 XSPI_CR_TRANS_INHIBIT | XSPI_CR_MANUAL_SSELECT 178 XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET |
109 | XSPI_CR_MASTER_MODE | XSPI_CR_ENABLE); 179 XSPI_CR_RXFIFO_RESET, regs_base + XSPI_CR_OFFSET);
110} 180}
111 181
112static void xilinx_spi_chipselect(struct spi_device *spi, int is_on) 182static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
@@ -115,16 +185,16 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
115 185
116 if (is_on == BITBANG_CS_INACTIVE) { 186 if (is_on == BITBANG_CS_INACTIVE) {
117 /* Deselect the slave on the SPI bus */ 187 /* Deselect the slave on the SPI bus */
118 out_be32(xspi->regs + XSPI_SSR_OFFSET, 0xffff); 188 xspi->write_fn(0xffff, xspi->regs + XSPI_SSR_OFFSET);
119 } else if (is_on == BITBANG_CS_ACTIVE) { 189 } else if (is_on == BITBANG_CS_ACTIVE) {
120 /* Set the SPI clock phase and polarity */ 190 /* Set the SPI clock phase and polarity */
121 u16 cr = in_be16(xspi->regs + XSPI_CR_OFFSET) 191 u16 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET)
122 & ~XSPI_CR_MODE_MASK; 192 & ~XSPI_CR_MODE_MASK;
123 if (spi->mode & SPI_CPHA) 193 if (spi->mode & SPI_CPHA)
124 cr |= XSPI_CR_CPHA; 194 cr |= XSPI_CR_CPHA;
125 if (spi->mode & SPI_CPOL) 195 if (spi->mode & SPI_CPOL)
126 cr |= XSPI_CR_CPOL; 196 cr |= XSPI_CR_CPOL;
127 out_be16(xspi->regs + XSPI_CR_OFFSET, cr); 197 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
128 198
129 /* We do not check spi->max_speed_hz here as the SPI clock 199 /* We do not check spi->max_speed_hz here as the SPI clock
130 * frequency is not software programmable (the IP block design 200 * frequency is not software programmable (the IP block design
@@ -132,24 +202,27 @@ static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
132 */ 202 */
133 203
134 /* Activate the chip select */ 204 /* Activate the chip select */
135 out_be32(xspi->regs + XSPI_SSR_OFFSET, 205 xspi->write_fn(~(0x0001 << spi->chip_select),
136 ~(0x0001 << spi->chip_select)); 206 xspi->regs + XSPI_SSR_OFFSET);
137 } 207 }
138} 208}
139 209
140/* spi_bitbang requires custom setup_transfer() to be defined if there is a 210/* spi_bitbang requires custom setup_transfer() to be defined if there is a
141 * custom txrx_bufs(). We have nothing to setup here as the SPI IP block 211 * custom txrx_bufs(). We have nothing to setup here as the SPI IP block
142 * supports just 8 bits per word, and SPI clock can't be changed in software. 212 * supports 8 or 16 bits per word which cannot be changed in software.
143 * Check for 8 bits per word. Chip select delay calculations could be 213 * SPI clock can't be changed in software either.
214 * Check for correct bits per word. Chip select delay calculations could be
144 * added here as soon as bitbang_work() can be made aware of the delay value. 215 * added here as soon as bitbang_work() can be made aware of the delay value.
145 */ 216 */
146static int xilinx_spi_setup_transfer(struct spi_device *spi, 217static int xilinx_spi_setup_transfer(struct spi_device *spi,
147 struct spi_transfer *t) 218 struct spi_transfer *t)
148{ 219{
220 struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
149 u8 bits_per_word; 221 u8 bits_per_word;
150 222
151 bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; 223 bits_per_word = (t && t->bits_per_word)
152 if (bits_per_word != 8) { 224 ? t->bits_per_word : spi->bits_per_word;
225 if (bits_per_word != xspi->bits_per_word) {
153 dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", 226 dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
154 __func__, bits_per_word); 227 __func__, bits_per_word);
155 return -EINVAL; 228 return -EINVAL;
@@ -160,17 +233,16 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi,
160 233
161static int xilinx_spi_setup(struct spi_device *spi) 234static int xilinx_spi_setup(struct spi_device *spi)
162{ 235{
163 struct spi_bitbang *bitbang; 236 /* always return 0, we can not check the number of bits.
164 struct xilinx_spi *xspi; 237 * There are cases when SPI setup is called before any driver is
165 int retval; 238 * there, in that case the SPI core defaults to 8 bits, which we
166 239 * do not support in some cases. But if we return an error, the
167 xspi = spi_master_get_devdata(spi->master); 240 * SPI device would not be registered and no driver can get hold of it
168 bitbang = &xspi->bitbang; 241 * When the driver is there, it will call SPI setup again with the
169 242 * correct number of bits per transfer.
170 retval = xilinx_spi_setup_transfer(spi, NULL); 243 * If a driver setups with the wrong bit number, it will fail when
171 if (retval < 0) 244 * it tries to do a transfer
172 return retval; 245 */
173
174 return 0; 246 return 0;
175} 247}
176 248
@@ -179,15 +251,14 @@ static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi)
179 u8 sr; 251 u8 sr;
180 252
181 /* Fill the Tx FIFO with as many bytes as possible */ 253 /* Fill the Tx FIFO with as many bytes as possible */
182 sr = in_8(xspi->regs + XSPI_SR_OFFSET); 254 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
183 while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) { 255 while ((sr & XSPI_SR_TX_FULL_MASK) == 0 && xspi->remaining_bytes > 0) {
184 if (xspi->tx_ptr) { 256 if (xspi->tx_ptr)
185 out_8(xspi->regs + XSPI_TXD_OFFSET, *xspi->tx_ptr++); 257 xspi->tx_fn(xspi);
186 } else { 258 else
187 out_8(xspi->regs + XSPI_TXD_OFFSET, 0); 259 xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
188 } 260 xspi->remaining_bytes -= xspi->bits_per_word / 8;
189 xspi->remaining_bytes--; 261 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
190 sr = in_8(xspi->regs + XSPI_SR_OFFSET);
191 } 262 }
192} 263}
193 264
@@ -209,18 +280,19 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
209 /* Enable the transmit empty interrupt, which we use to determine 280 /* Enable the transmit empty interrupt, which we use to determine
210 * progress on the transmission. 281 * progress on the transmission.
211 */ 282 */
212 ipif_ier = in_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET); 283 ipif_ier = xspi->read_fn(xspi->regs + XIPIF_V123B_IIER_OFFSET);
213 out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET, 284 xspi->write_fn(ipif_ier | XSPI_INTR_TX_EMPTY,
214 ipif_ier | XSPI_INTR_TX_EMPTY); 285 xspi->regs + XIPIF_V123B_IIER_OFFSET);
215 286
216 /* Start the transfer by not inhibiting the transmitter any longer */ 287 /* Start the transfer by not inhibiting the transmitter any longer */
217 cr = in_be16(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_TRANS_INHIBIT; 288 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) &
218 out_be16(xspi->regs + XSPI_CR_OFFSET, cr); 289 ~XSPI_CR_TRANS_INHIBIT;
290 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
219 291
220 wait_for_completion(&xspi->done); 292 wait_for_completion(&xspi->done);
221 293
222 /* Disable the transmit empty interrupt */ 294 /* Disable the transmit empty interrupt */
223 out_be32(xspi->regs + XIPIF_V123B_IIER_OFFSET, ipif_ier); 295 xspi->write_fn(ipif_ier, xspi->regs + XIPIF_V123B_IIER_OFFSET);
224 296
225 return t->len - xspi->remaining_bytes; 297 return t->len - xspi->remaining_bytes;
226} 298}
@@ -237,8 +309,8 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
237 u32 ipif_isr; 309 u32 ipif_isr;
238 310
239 /* Get the IPIF interrupts, and clear them immediately */ 311 /* Get the IPIF interrupts, and clear them immediately */
240 ipif_isr = in_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET); 312 ipif_isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET);
241 out_be32(xspi->regs + XIPIF_V123B_IISR_OFFSET, ipif_isr); 313 xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET);
242 314
243 if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */ 315 if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
244 u16 cr; 316 u16 cr;
@@ -249,20 +321,15 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
249 * transmitter while the Isr refills the transmit register/FIFO, 321 * transmitter while the Isr refills the transmit register/FIFO,
250 * or make sure it is stopped if we're done. 322 * or make sure it is stopped if we're done.
251 */ 323 */
252 cr = in_be16(xspi->regs + XSPI_CR_OFFSET); 324 cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
253 out_be16(xspi->regs + XSPI_CR_OFFSET, 325 xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
254 cr | XSPI_CR_TRANS_INHIBIT); 326 xspi->regs + XSPI_CR_OFFSET);
255 327
256 /* Read out all the data from the Rx FIFO */ 328 /* Read out all the data from the Rx FIFO */
257 sr = in_8(xspi->regs + XSPI_SR_OFFSET); 329 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
258 while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) { 330 while ((sr & XSPI_SR_RX_EMPTY_MASK) == 0) {
259 u8 data; 331 xspi->rx_fn(xspi);
260 332 sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
261 data = in_8(xspi->regs + XSPI_RXD_OFFSET);
262 if (xspi->rx_ptr) {
263 *xspi->rx_ptr++ = data;
264 }
265 sr = in_8(xspi->regs + XSPI_SR_OFFSET);
266 } 333 }
267 334
268 /* See if there is more data to send */ 335 /* See if there is more data to send */
@@ -271,7 +338,7 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
271 /* Start the transfer by not inhibiting the 338 /* Start the transfer by not inhibiting the
272 * transmitter any longer 339 * transmitter any longer
273 */ 340 */
274 out_be16(xspi->regs + XSPI_CR_OFFSET, cr); 341 xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
275 } else { 342 } else {
276 /* No more data to send. 343 /* No more data to send.
277 * Indicate the transfer is completed. 344 * Indicate the transfer is completed.
@@ -283,40 +350,22 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
283 return IRQ_HANDLED; 350 return IRQ_HANDLED;
284} 351}
285 352
286static int __init xilinx_spi_of_probe(struct of_device *ofdev, 353struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
287 const struct of_device_id *match) 354 u32 irq, s16 bus_num)
288{ 355{
289 struct spi_master *master; 356 struct spi_master *master;
290 struct xilinx_spi *xspi; 357 struct xilinx_spi *xspi;
291 struct resource r_irq_struct; 358 struct xspi_platform_data *pdata = dev->platform_data;
292 struct resource r_mem_struct; 359 int ret;
293
294 struct resource *r_irq = &r_irq_struct;
295 struct resource *r_mem = &r_mem_struct;
296 int rc = 0;
297 const u32 *prop;
298 int len;
299
300 /* Get resources(memory, IRQ) associated with the device */
301 master = spi_alloc_master(&ofdev->dev, sizeof(struct xilinx_spi));
302 360
303 if (master == NULL) { 361 if (!pdata) {
304 return -ENOMEM; 362 dev_err(dev, "No platform data attached\n");
363 return NULL;
305 } 364 }
306 365
307 dev_set_drvdata(&ofdev->dev, master); 366 master = spi_alloc_master(dev, sizeof(struct xilinx_spi));
308 367 if (!master)
309 rc = of_address_to_resource(ofdev->node, 0, r_mem); 368 return NULL;
310 if (rc) {
311 dev_warn(&ofdev->dev, "invalid address\n");
312 goto put_master;
313 }
314
315 rc = of_irq_to_resource(ofdev->node, 0, r_irq);
316 if (rc == NO_IRQ) {
317 dev_warn(&ofdev->dev, "no IRQ found\n");
318 goto put_master;
319 }
320 369
321 /* the spi->mode bits understood by this driver: */ 370 /* the spi->mode bits understood by this driver: */
322 master->mode_bits = SPI_CPOL | SPI_CPHA; 371 master->mode_bits = SPI_CPOL | SPI_CPHA;
@@ -329,128 +378,87 @@ static int __init xilinx_spi_of_probe(struct of_device *ofdev,
329 xspi->bitbang.master->setup = xilinx_spi_setup; 378 xspi->bitbang.master->setup = xilinx_spi_setup;
330 init_completion(&xspi->done); 379 init_completion(&xspi->done);
331 380
332 xspi->irq = r_irq->start; 381 if (!request_mem_region(mem->start, resource_size(mem),
333 382 XILINX_SPI_NAME))
334 if (!request_mem_region(r_mem->start,
335 r_mem->end - r_mem->start + 1, XILINX_SPI_NAME)) {
336 rc = -ENXIO;
337 dev_warn(&ofdev->dev, "memory request failure\n");
338 goto put_master; 383 goto put_master;
339 }
340 384
341 xspi->regs = ioremap(r_mem->start, r_mem->end - r_mem->start + 1); 385 xspi->regs = ioremap(mem->start, resource_size(mem));
342 if (xspi->regs == NULL) { 386 if (xspi->regs == NULL) {
343 rc = -ENOMEM; 387 dev_warn(dev, "ioremap failure\n");
344 dev_warn(&ofdev->dev, "ioremap failure\n"); 388 goto map_failed;
345 goto release_mem;
346 } 389 }
347 xspi->irq = r_irq->start;
348
349 /* dynamic bus assignment */
350 master->bus_num = -1;
351 390
352 /* number of slave select bits is required */ 391 master->bus_num = bus_num;
353 prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len); 392 master->num_chipselect = pdata->num_chipselect;
354 if (!prop || len < sizeof(*prop)) { 393
355 dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n"); 394 xspi->mem = *mem;
356 goto unmap_io; 395 xspi->irq = irq;
396 if (pdata->little_endian) {
397 xspi->read_fn = xspi_read32;
398 xspi->write_fn = xspi_write32;
399 } else {
400 xspi->read_fn = xspi_read32_be;
401 xspi->write_fn = xspi_write32_be;
357 } 402 }
358 master->num_chipselect = *prop; 403 xspi->bits_per_word = pdata->bits_per_word;
404 if (xspi->bits_per_word == 8) {
405 xspi->tx_fn = xspi_tx8;
406 xspi->rx_fn = xspi_rx8;
407 } else if (xspi->bits_per_word == 16) {
408 xspi->tx_fn = xspi_tx16;
409 xspi->rx_fn = xspi_rx16;
410 } else if (xspi->bits_per_word == 32) {
411 xspi->tx_fn = xspi_tx32;
412 xspi->rx_fn = xspi_rx32;
413 } else
414 goto unmap_io;
415
359 416
360 /* SPI controller initializations */ 417 /* SPI controller initializations */
361 xspi_init_hw(xspi->regs); 418 xspi_init_hw(xspi);
362 419
363 /* Register for SPI Interrupt */ 420 /* Register for SPI Interrupt */
364 rc = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi); 421 ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
365 if (rc != 0) { 422 if (ret)
366 dev_warn(&ofdev->dev, "irq request failure: %d\n", xspi->irq);
367 goto unmap_io; 423 goto unmap_io;
368 }
369 424
370 rc = spi_bitbang_start(&xspi->bitbang); 425 ret = spi_bitbang_start(&xspi->bitbang);
371 if (rc != 0) { 426 if (ret) {
372 dev_err(&ofdev->dev, "spi_bitbang_start FAILED\n"); 427 dev_err(dev, "spi_bitbang_start FAILED\n");
373 goto free_irq; 428 goto free_irq;
374 } 429 }
375 430
376 dev_info(&ofdev->dev, "at 0x%08X mapped to 0x%08X, irq=%d\n", 431 dev_info(dev, "at 0x%08llX mapped to 0x%p, irq=%d\n",
377 (unsigned int)r_mem->start, (u32)xspi->regs, xspi->irq); 432 (unsigned long long)mem->start, xspi->regs, xspi->irq);
378 433 return master;
379 /* Add any subnodes on the SPI bus */
380 of_register_spi_devices(master, ofdev->node);
381
382 return rc;
383 434
384free_irq: 435free_irq:
385 free_irq(xspi->irq, xspi); 436 free_irq(xspi->irq, xspi);
386unmap_io: 437unmap_io:
387 iounmap(xspi->regs); 438 iounmap(xspi->regs);
388release_mem: 439map_failed:
389 release_mem_region(r_mem->start, resource_size(r_mem)); 440 release_mem_region(mem->start, resource_size(mem));
390put_master: 441put_master:
391 spi_master_put(master); 442 spi_master_put(master);
392 return rc; 443 return NULL;
393} 444}
445EXPORT_SYMBOL(xilinx_spi_init);
394 446
395static int __devexit xilinx_spi_remove(struct of_device *ofdev) 447void xilinx_spi_deinit(struct spi_master *master)
396{ 448{
397 struct xilinx_spi *xspi; 449 struct xilinx_spi *xspi;
398 struct spi_master *master;
399 struct resource r_mem;
400 450
401 master = platform_get_drvdata(ofdev);
402 xspi = spi_master_get_devdata(master); 451 xspi = spi_master_get_devdata(master);
403 452
404 spi_bitbang_stop(&xspi->bitbang); 453 spi_bitbang_stop(&xspi->bitbang);
405 free_irq(xspi->irq, xspi); 454 free_irq(xspi->irq, xspi);
406 iounmap(xspi->regs); 455 iounmap(xspi->regs);
407 if (!of_address_to_resource(ofdev->node, 0, &r_mem))
408 release_mem_region(r_mem.start, resource_size(&r_mem));
409 dev_set_drvdata(&ofdev->dev, 0);
410 spi_master_put(xspi->bitbang.master);
411
412 return 0;
413}
414
415/* work with hotplug and coldplug */
416MODULE_ALIAS("platform:" XILINX_SPI_NAME);
417
418static int __exit xilinx_spi_of_remove(struct of_device *op)
419{
420 return xilinx_spi_remove(op);
421}
422
423static struct of_device_id xilinx_spi_of_match[] = {
424 { .compatible = "xlnx,xps-spi-2.00.a", },
425 { .compatible = "xlnx,xps-spi-2.00.b", },
426 {}
427};
428
429MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
430
431static struct of_platform_driver xilinx_spi_of_driver = {
432 .owner = THIS_MODULE,
433 .name = "xilinx-xps-spi",
434 .match_table = xilinx_spi_of_match,
435 .probe = xilinx_spi_of_probe,
436 .remove = __exit_p(xilinx_spi_of_remove),
437 .driver = {
438 .name = "xilinx-xps-spi",
439 .owner = THIS_MODULE,
440 },
441};
442 456
443static int __init xilinx_spi_init(void) 457 release_mem_region(xspi->mem.start, resource_size(&xspi->mem));
444{ 458 spi_master_put(xspi->bitbang.master);
445 return of_register_platform_driver(&xilinx_spi_of_driver);
446} 459}
447module_init(xilinx_spi_init); 460EXPORT_SYMBOL(xilinx_spi_deinit);
448 461
449static void __exit xilinx_spi_exit(void)
450{
451 of_unregister_platform_driver(&xilinx_spi_of_driver);
452}
453module_exit(xilinx_spi_exit);
454MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>"); 462MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
455MODULE_DESCRIPTION("Xilinx SPI driver"); 463MODULE_DESCRIPTION("Xilinx SPI driver");
456MODULE_LICENSE("GPL"); 464MODULE_LICENSE("GPL");
diff --git a/drivers/spi/xilinx_spi.h b/drivers/spi/xilinx_spi.h
new file mode 100644
index 000000000000..d211accf68d2
--- /dev/null
+++ b/drivers/spi/xilinx_spi.h
@@ -0,0 +1,32 @@
1/*
2 * Xilinx SPI device driver API and platform data header file
3 *
4 * Copyright (c) 2009 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20#ifndef _XILINX_SPI_H_
21#define _XILINX_SPI_H_
22
23#include <linux/spi/spi.h>
24#include <linux/spi/spi_bitbang.h>
25
26#define XILINX_SPI_NAME "xilinx_spi"
27
28struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
29 u32 irq, s16 bus_num);
30
31void xilinx_spi_deinit(struct spi_master *master);
32#endif
diff --git a/drivers/spi/xilinx_spi_of.c b/drivers/spi/xilinx_spi_of.c
new file mode 100644
index 000000000000..748d33a76d29
--- /dev/null
+++ b/drivers/spi/xilinx_spi_of.c
@@ -0,0 +1,135 @@
1/*
2 * Xilinx SPI OF device driver
3 *
4 * Copyright (c) 2009 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20/* Supports:
21 * Xilinx SPI devices as OF devices
22 *
23 * Inspired by xilinx_spi.c, 2002-2007 (c) MontaVista Software, Inc.
24 */
25
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/interrupt.h>
29#include <linux/io.h>
30#include <linux/slab.h>
31
32#include <linux/of_platform.h>
33#include <linux/of_device.h>
34#include <linux/of_spi.h>
35
36#include <linux/spi/xilinx_spi.h>
37#include "xilinx_spi.h"
38
39
40static int __devinit xilinx_spi_of_probe(struct of_device *ofdev,
41 const struct of_device_id *match)
42{
43 struct spi_master *master;
44 struct xspi_platform_data *pdata;
45 struct resource r_mem;
46 struct resource r_irq;
47 int rc = 0;
48 const u32 *prop;
49 int len;
50
51 rc = of_address_to_resource(ofdev->node, 0, &r_mem);
52 if (rc) {
53 dev_warn(&ofdev->dev, "invalid address\n");
54 return rc;
55 }
56
57 rc = of_irq_to_resource(ofdev->node, 0, &r_irq);
58 if (rc == NO_IRQ) {
59 dev_warn(&ofdev->dev, "no IRQ found\n");
60 return -ENODEV;
61 }
62
63 ofdev->dev.platform_data =
64 kzalloc(sizeof(struct xspi_platform_data), GFP_KERNEL);
65 pdata = ofdev->dev.platform_data;
66 if (!pdata)
67 return -ENOMEM;
68
69 /* number of slave select bits is required */
70 prop = of_get_property(ofdev->node, "xlnx,num-ss-bits", &len);
71 if (!prop || len < sizeof(*prop)) {
72 dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n");
73 return -EINVAL;
74 }
75 pdata->num_chipselect = *prop;
76 pdata->bits_per_word = 8;
77 master = xilinx_spi_init(&ofdev->dev, &r_mem, r_irq.start, -1);
78 if (!master)
79 return -ENODEV;
80
81 dev_set_drvdata(&ofdev->dev, master);
82
83 /* Add any subnodes on the SPI bus */
84 of_register_spi_devices(master, ofdev->node);
85
86 return 0;
87}
88
89static int __devexit xilinx_spi_remove(struct of_device *ofdev)
90{
91 xilinx_spi_deinit(dev_get_drvdata(&ofdev->dev));
92 dev_set_drvdata(&ofdev->dev, 0);
93 kfree(ofdev->dev.platform_data);
94 ofdev->dev.platform_data = NULL;
95 return 0;
96}
97
98static int __exit xilinx_spi_of_remove(struct of_device *op)
99{
100 return xilinx_spi_remove(op);
101}
102
103static const struct of_device_id xilinx_spi_of_match[] = {
104 { .compatible = "xlnx,xps-spi-2.00.a", },
105 { .compatible = "xlnx,xps-spi-2.00.b", },
106 {}
107};
108
109MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
110
111static struct of_platform_driver xilinx_spi_of_driver = {
112 .match_table = xilinx_spi_of_match,
113 .probe = xilinx_spi_of_probe,
114 .remove = __exit_p(xilinx_spi_of_remove),
115 .driver = {
116 .name = "xilinx-xps-spi",
117 .owner = THIS_MODULE,
118 },
119};
120
121static int __init xilinx_spi_of_init(void)
122{
123 return of_register_platform_driver(&xilinx_spi_of_driver);
124}
125module_init(xilinx_spi_of_init);
126
127static void __exit xilinx_spi_of_exit(void)
128{
129 of_unregister_platform_driver(&xilinx_spi_of_driver);
130}
131module_exit(xilinx_spi_of_exit);
132
133MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
134MODULE_DESCRIPTION("Xilinx SPI platform driver");
135MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/xilinx_spi_pltfm.c b/drivers/spi/xilinx_spi_pltfm.c
new file mode 100644
index 000000000000..24debac646a9
--- /dev/null
+++ b/drivers/spi/xilinx_spi_pltfm.c
@@ -0,0 +1,102 @@
1/*
2 * Support for Xilinx SPI platform devices
3 * Copyright (c) 2009 Intel Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/* Supports:
20 * Xilinx SPI devices as platform devices
21 *
22 * Inspired by xilinx_spi.c, 2002-2007 (c) MontaVista Software, Inc.
23 */
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
29#include <linux/platform_device.h>
30
31#include <linux/spi/spi.h>
32#include <linux/spi/spi_bitbang.h>
33#include <linux/spi/xilinx_spi.h>
34
35#include "xilinx_spi.h"
36
37static int __devinit xilinx_spi_probe(struct platform_device *dev)
38{
39 struct xspi_platform_data *pdata;
40 struct resource *r;
41 int irq;
42 struct spi_master *master;
43 u8 i;
44
45 pdata = dev->dev.platform_data;
46 if (!pdata)
47 return -ENODEV;
48
49 r = platform_get_resource(dev, IORESOURCE_MEM, 0);
50 if (!r)
51 return -ENODEV;
52
53 irq = platform_get_irq(dev, 0);
54 if (irq < 0)
55 return -ENXIO;
56
57 master = xilinx_spi_init(&dev->dev, r, irq, dev->id);
58 if (!master)
59 return -ENODEV;
60
61 for (i = 0; i < pdata->num_devices; i++)
62 spi_new_device(master, pdata->devices + i);
63
64 platform_set_drvdata(dev, master);
65 return 0;
66}
67
68static int __devexit xilinx_spi_remove(struct platform_device *dev)
69{
70 xilinx_spi_deinit(platform_get_drvdata(dev));
71 platform_set_drvdata(dev, 0);
72
73 return 0;
74}
75
76/* work with hotplug and coldplug */
77MODULE_ALIAS("platform:" XILINX_SPI_NAME);
78
79static struct platform_driver xilinx_spi_driver = {
80 .probe = xilinx_spi_probe,
81 .remove = __devexit_p(xilinx_spi_remove),
82 .driver = {
83 .name = XILINX_SPI_NAME,
84 .owner = THIS_MODULE,
85 },
86};
87
88static int __init xilinx_spi_pltfm_init(void)
89{
90 return platform_driver_register(&xilinx_spi_driver);
91}
92module_init(xilinx_spi_pltfm_init);
93
94static void __exit xilinx_spi_pltfm_exit(void)
95{
96 platform_driver_unregister(&xilinx_spi_driver);
97}
98module_exit(xilinx_spi_pltfm_exit);
99
100MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
101MODULE_DESCRIPTION("Xilinx SPI platform driver");
102MODULE_LICENSE("GPL v2");