aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-07 20:08:46 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-07 20:08:46 -0500
commit021db8e2bde53024a163fb4417a185de46fe77aa (patch)
tree098a28bd2414ea2622493a1736a677dab5085dfc /drivers/spi
parent72eb6a791459c87a0340318840bb3bd9252b627b (diff)
parent07fe0351702b6f0c9749e80cdbcb758686b0fe9b (diff)
Merge branch 'next-spi' of git://git.secretlab.ca/git/linux-2.6
* 'next-spi' of git://git.secretlab.ca/git/linux-2.6: (77 commits) spi/omap: Fix DMA API usage in OMAP MCSPI driver spi/imx: correct the test on platform_get_irq() return value spi/topcliff: Typo fix threhold to threshold spi/dw_spi Typo change diable to disable. spi/fsl_espi: change the read behaviour of the SPIRF spi/mpc52xx-psc-spi: move probe/remove to proper sections spi/dw_spi: add DMA support spi/dw_spi: change to EXPORT_SYMBOL_GPL for exported APIs spi/dw_spi: Fix too short timeout in spi polling loop spi/pl022: convert running variable spi/pl022: convert busy flag to a bool spi/pl022: pass the returned sglen to the DMA engine spi/pl022: map the buffers on the DMA engine spi/topcliff_pch: Fix data transfer issue spi/imx: remove autodetection spi/pxa2xx: pass of_node to spi device and set a parent device spi/pxa2xx: Modify RX-Tresh instead of busy-loop for the remaining RX bytes. spi/pxa2xx: Add chipselect support for Sodaville spi/pxa2xx: Consider CE4100's FIFO depth spi/pxa2xx: Add CE4100 support ...
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig38
-rw-r--r--drivers/spi/Makefile6
-rw-r--r--drivers/spi/amba-pl022.c56
-rw-r--r--drivers/spi/davinci_spi.c1314
-rw-r--r--drivers/spi/dw_spi.c52
-rw-r--r--drivers/spi/dw_spi_mid.c223
-rw-r--r--drivers/spi/dw_spi_pci.c20
-rw-r--r--drivers/spi/mpc52xx_psc_spi.c35
-rw-r--r--drivers/spi/omap2_mcspi.c11
-rw-r--r--drivers/spi/pxa2xx_spi.c190
-rw-r--r--drivers/spi/pxa2xx_spi_pci.c201
-rw-r--r--drivers/spi/spi_imx.c32
-rw-r--r--drivers/spi/spi_nuc900.c2
-rw-r--r--drivers/spi/spi_topcliff_pch.c12
-rw-r--r--drivers/spi/xilinx_spi.c133
-rw-r--r--drivers/spi/xilinx_spi.h32
-rw-r--r--drivers/spi/xilinx_spi_of.c133
-rw-r--r--drivers/spi/xilinx_spi_pltfm.c102
18 files changed, 1327 insertions, 1265 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 78f9fd02c1b2..1906840c1113 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -111,11 +111,14 @@ config SPI_COLDFIRE_QSPI
111 will be called coldfire_qspi. 111 will be called coldfire_qspi.
112 112
113config SPI_DAVINCI 113config SPI_DAVINCI
114 tristate "SPI controller driver for DaVinci/DA8xx SoC's" 114 tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
115 depends on SPI_MASTER && ARCH_DAVINCI 115 depends on SPI_MASTER && ARCH_DAVINCI
116 select SPI_BITBANG 116 select SPI_BITBANG
117 help 117 help
118 SPI master controller for DaVinci and DA8xx SPI modules. 118 SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
119
120 This driver can also be built as a module. The module will be called
121 davinci_spi.
119 122
120config SPI_EP93XX 123config SPI_EP93XX
121 tristate "Cirrus Logic EP93xx SPI controller" 124 tristate "Cirrus Logic EP93xx SPI controller"
@@ -267,12 +270,15 @@ config SPI_PPC4xx
267 270
268config SPI_PXA2XX 271config SPI_PXA2XX
269 tristate "PXA2xx SSP SPI master" 272 tristate "PXA2xx SSP SPI master"
270 depends on ARCH_PXA && EXPERIMENTAL 273 depends on (ARCH_PXA || (X86_32 && PCI)) && EXPERIMENTAL
271 select PXA_SSP 274 select PXA_SSP if ARCH_PXA
272 help 275 help
273 This enables using a PXA2xx SSP port as a SPI master controller. 276 This enables using a PXA2xx or Sodaville SSP port as a SPI master
274 The driver can be configured to use any SSP port and additional 277 controller. The driver can be configured to use any SSP port and
275 documentation can be found a Documentation/spi/pxa2xx. 278 additional documentation can be found a Documentation/spi/pxa2xx.
279
280config SPI_PXA2XX_PCI
281 def_bool SPI_PXA2XX && X86_32 && PCI
276 282
277config SPI_S3C24XX 283config SPI_S3C24XX
278 tristate "Samsung S3C24XX series SPI" 284 tristate "Samsung S3C24XX series SPI"
@@ -353,7 +359,6 @@ config SPI_XILINX
353 tristate "Xilinx SPI controller common module" 359 tristate "Xilinx SPI controller common module"
354 depends on HAS_IOMEM && EXPERIMENTAL 360 depends on HAS_IOMEM && EXPERIMENTAL
355 select SPI_BITBANG 361 select SPI_BITBANG
356 select SPI_XILINX_OF if (XILINX_VIRTEX || MICROBLAZE)
357 help 362 help
358 This exposes the SPI controller IP from the Xilinx EDK. 363 This exposes the SPI controller IP from the Xilinx EDK.
359 364
@@ -362,19 +367,6 @@ config SPI_XILINX
362 367
363 Or for the DS570, see "XPS Serial Peripheral Interface (SPI) (v2.00b)" 368 Or for the DS570, see "XPS Serial Peripheral Interface (SPI) (v2.00b)"
364 369
365config SPI_XILINX_OF
366 tristate "Xilinx SPI controller OF device"
367 depends on SPI_XILINX && (XILINX_VIRTEX || MICROBLAZE)
368 help
369 This is the OF driver for the SPI controller IP from the Xilinx EDK.
370
371config SPI_XILINX_PLTFM
372 tristate "Xilinx SPI controller platform device"
373 depends on SPI_XILINX
374 help
375 This is the platform driver for the SPI controller IP
376 from the Xilinx EDK.
377
378config SPI_NUC900 370config SPI_NUC900
379 tristate "Nuvoton NUC900 series SPI" 371 tristate "Nuvoton NUC900 series SPI"
380 depends on ARCH_W90X900 && EXPERIMENTAL 372 depends on ARCH_W90X900 && EXPERIMENTAL
@@ -396,6 +388,10 @@ config SPI_DW_PCI
396 tristate "PCI interface driver for DW SPI core" 388 tristate "PCI interface driver for DW SPI core"
397 depends on SPI_DESIGNWARE && PCI 389 depends on SPI_DESIGNWARE && PCI
398 390
391config SPI_DW_MID_DMA
392 bool "DMA support for DW SPI controller on Intel Moorestown platform"
393 depends on SPI_DW_PCI && INTEL_MID_DMAC
394
399config SPI_DW_MMIO 395config SPI_DW_MMIO
400 tristate "Memory-mapped io interface driver for DW SPI core" 396 tristate "Memory-mapped io interface driver for DW SPI core"
401 depends on SPI_DESIGNWARE && HAVE_CLK 397 depends on SPI_DESIGNWARE && HAVE_CLK
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 8bc1a5abac1f..3a42463c92a4 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -17,13 +17,15 @@ obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
17obj-$(CONFIG_SPI_COLDFIRE_QSPI) += coldfire_qspi.o 17obj-$(CONFIG_SPI_COLDFIRE_QSPI) += coldfire_qspi.o
18obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o 18obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o
19obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o 19obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o
20obj-$(CONFIG_SPI_DW_PCI) += dw_spi_pci.o 20obj-$(CONFIG_SPI_DW_PCI) += dw_spi_midpci.o
21dw_spi_midpci-objs := dw_spi_pci.o dw_spi_mid.o
21obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o 22obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o
22obj-$(CONFIG_SPI_EP93XX) += ep93xx_spi.o 23obj-$(CONFIG_SPI_EP93XX) += ep93xx_spi.o
23obj-$(CONFIG_SPI_GPIO) += spi_gpio.o 24obj-$(CONFIG_SPI_GPIO) += spi_gpio.o
24obj-$(CONFIG_SPI_IMX) += spi_imx.o 25obj-$(CONFIG_SPI_IMX) += spi_imx.o
25obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o 26obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
26obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o 27obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
28obj-$(CONFIG_SPI_PXA2XX_PCI) += pxa2xx_spi_pci.o
27obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o 29obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
28obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o 30obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
29obj-$(CONFIG_SPI_OMAP_100K) += omap_spi_100k.o 31obj-$(CONFIG_SPI_OMAP_100K) += omap_spi_100k.o
@@ -43,8 +45,6 @@ obj-$(CONFIG_SPI_TEGRA) += spi_tegra.o
43obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi_topcliff_pch.o 45obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi_topcliff_pch.o
44obj-$(CONFIG_SPI_TXX9) += spi_txx9.o 46obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
45obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o 47obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
46obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o
47obj-$(CONFIG_SPI_XILINX_PLTFM) += xilinx_spi_pltfm.o
48obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o 48obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
49obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o 49obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o
50obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o 50obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index fb3d1b31772d..a2a5921c730a 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -253,11 +253,6 @@
253#define STATE_ERROR ((void *) -1) 253#define STATE_ERROR ((void *) -1)
254 254
255/* 255/*
256 * Queue State
257 */
258#define QUEUE_RUNNING (0)
259#define QUEUE_STOPPED (1)
260/*
261 * SSP State - Whether Enabled or Disabled 256 * SSP State - Whether Enabled or Disabled
262 */ 257 */
263#define SSP_DISABLED (0) 258#define SSP_DISABLED (0)
@@ -344,7 +339,7 @@ struct vendor_data {
344 * @lock: spinlock to syncronise access to driver data 339 * @lock: spinlock to syncronise access to driver data
345 * @workqueue: a workqueue on which any spi_message request is queued 340 * @workqueue: a workqueue on which any spi_message request is queued
346 * @busy: workqueue is busy 341 * @busy: workqueue is busy
347 * @run: workqueue is running 342 * @running: workqueue is running
348 * @pump_transfers: Tasklet used in Interrupt Transfer mode 343 * @pump_transfers: Tasklet used in Interrupt Transfer mode
349 * @cur_msg: Pointer to current spi_message being processed 344 * @cur_msg: Pointer to current spi_message being processed
350 * @cur_transfer: Pointer to current spi_transfer 345 * @cur_transfer: Pointer to current spi_transfer
@@ -369,8 +364,8 @@ struct pl022 {
369 struct work_struct pump_messages; 364 struct work_struct pump_messages;
370 spinlock_t queue_lock; 365 spinlock_t queue_lock;
371 struct list_head queue; 366 struct list_head queue;
372 int busy; 367 bool busy;
373 int run; 368 bool running;
374 /* Message transfer pump */ 369 /* Message transfer pump */
375 struct tasklet_struct pump_transfers; 370 struct tasklet_struct pump_transfers;
376 struct spi_message *cur_msg; 371 struct spi_message *cur_msg;
@@ -782,9 +777,9 @@ static void *next_transfer(struct pl022 *pl022)
782static void unmap_free_dma_scatter(struct pl022 *pl022) 777static void unmap_free_dma_scatter(struct pl022 *pl022)
783{ 778{
784 /* Unmap and free the SG tables */ 779 /* Unmap and free the SG tables */
785 dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, 780 dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl,
786 pl022->sgt_tx.nents, DMA_TO_DEVICE); 781 pl022->sgt_tx.nents, DMA_TO_DEVICE);
787 dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, 782 dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl,
788 pl022->sgt_rx.nents, DMA_FROM_DEVICE); 783 pl022->sgt_rx.nents, DMA_FROM_DEVICE);
789 sg_free_table(&pl022->sgt_rx); 784 sg_free_table(&pl022->sgt_rx);
790 sg_free_table(&pl022->sgt_tx); 785 sg_free_table(&pl022->sgt_tx);
@@ -917,7 +912,7 @@ static int configure_dma(struct pl022 *pl022)
917 }; 912 };
918 unsigned int pages; 913 unsigned int pages;
919 int ret; 914 int ret;
920 int sglen; 915 int rx_sglen, tx_sglen;
921 struct dma_chan *rxchan = pl022->dma_rx_channel; 916 struct dma_chan *rxchan = pl022->dma_rx_channel;
922 struct dma_chan *txchan = pl022->dma_tx_channel; 917 struct dma_chan *txchan = pl022->dma_tx_channel;
923 struct dma_async_tx_descriptor *rxdesc; 918 struct dma_async_tx_descriptor *rxdesc;
@@ -956,7 +951,7 @@ static int configure_dma(struct pl022 *pl022)
956 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 951 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
957 break; 952 break;
958 case WRITING_U32: 953 case WRITING_U32:
959 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;; 954 tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
960 break; 955 break;
961 } 956 }
962 957
@@ -991,20 +986,20 @@ static int configure_dma(struct pl022 *pl022)
991 pl022->cur_transfer->len, &pl022->sgt_tx); 986 pl022->cur_transfer->len, &pl022->sgt_tx);
992 987
993 /* Map DMA buffers */ 988 /* Map DMA buffers */
994 sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, 989 rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
995 pl022->sgt_rx.nents, DMA_FROM_DEVICE); 990 pl022->sgt_rx.nents, DMA_FROM_DEVICE);
996 if (!sglen) 991 if (!rx_sglen)
997 goto err_rx_sgmap; 992 goto err_rx_sgmap;
998 993
999 sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, 994 tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl,
1000 pl022->sgt_tx.nents, DMA_TO_DEVICE); 995 pl022->sgt_tx.nents, DMA_TO_DEVICE);
1001 if (!sglen) 996 if (!tx_sglen)
1002 goto err_tx_sgmap; 997 goto err_tx_sgmap;
1003 998
1004 /* Send both scatterlists */ 999 /* Send both scatterlists */
1005 rxdesc = rxchan->device->device_prep_slave_sg(rxchan, 1000 rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
1006 pl022->sgt_rx.sgl, 1001 pl022->sgt_rx.sgl,
1007 pl022->sgt_rx.nents, 1002 rx_sglen,
1008 DMA_FROM_DEVICE, 1003 DMA_FROM_DEVICE,
1009 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1004 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1010 if (!rxdesc) 1005 if (!rxdesc)
@@ -1012,7 +1007,7 @@ static int configure_dma(struct pl022 *pl022)
1012 1007
1013 txdesc = txchan->device->device_prep_slave_sg(txchan, 1008 txdesc = txchan->device->device_prep_slave_sg(txchan,
1014 pl022->sgt_tx.sgl, 1009 pl022->sgt_tx.sgl,
1015 pl022->sgt_tx.nents, 1010 tx_sglen,
1016 DMA_TO_DEVICE, 1011 DMA_TO_DEVICE,
1017 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1012 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1018 if (!txdesc) 1013 if (!txdesc)
@@ -1040,10 +1035,10 @@ err_txdesc:
1040 txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0); 1035 txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0);
1041err_rxdesc: 1036err_rxdesc:
1042 rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0); 1037 rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0);
1043 dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, 1038 dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl,
1044 pl022->sgt_tx.nents, DMA_TO_DEVICE); 1039 pl022->sgt_tx.nents, DMA_TO_DEVICE);
1045err_tx_sgmap: 1040err_tx_sgmap:
1046 dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, 1041 dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
1047 pl022->sgt_tx.nents, DMA_FROM_DEVICE); 1042 pl022->sgt_tx.nents, DMA_FROM_DEVICE);
1048err_rx_sgmap: 1043err_rx_sgmap:
1049 sg_free_table(&pl022->sgt_tx); 1044 sg_free_table(&pl022->sgt_tx);
@@ -1460,8 +1455,8 @@ static void pump_messages(struct work_struct *work)
1460 1455
1461 /* Lock queue and check for queue work */ 1456 /* Lock queue and check for queue work */
1462 spin_lock_irqsave(&pl022->queue_lock, flags); 1457 spin_lock_irqsave(&pl022->queue_lock, flags);
1463 if (list_empty(&pl022->queue) || pl022->run == QUEUE_STOPPED) { 1458 if (list_empty(&pl022->queue) || !pl022->running) {
1464 pl022->busy = 0; 1459 pl022->busy = false;
1465 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1460 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1466 return; 1461 return;
1467 } 1462 }
@@ -1475,7 +1470,7 @@ static void pump_messages(struct work_struct *work)
1475 list_entry(pl022->queue.next, struct spi_message, queue); 1470 list_entry(pl022->queue.next, struct spi_message, queue);
1476 1471
1477 list_del_init(&pl022->cur_msg->queue); 1472 list_del_init(&pl022->cur_msg->queue);
1478 pl022->busy = 1; 1473 pl022->busy = true;
1479 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1474 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1480 1475
1481 /* Initial message state */ 1476 /* Initial message state */
@@ -1507,8 +1502,8 @@ static int __init init_queue(struct pl022 *pl022)
1507 INIT_LIST_HEAD(&pl022->queue); 1502 INIT_LIST_HEAD(&pl022->queue);
1508 spin_lock_init(&pl022->queue_lock); 1503 spin_lock_init(&pl022->queue_lock);
1509 1504
1510 pl022->run = QUEUE_STOPPED; 1505 pl022->running = false;
1511 pl022->busy = 0; 1506 pl022->busy = false;
1512 1507
1513 tasklet_init(&pl022->pump_transfers, 1508 tasklet_init(&pl022->pump_transfers,
1514 pump_transfers, (unsigned long)pl022); 1509 pump_transfers, (unsigned long)pl022);
@@ -1529,12 +1524,12 @@ static int start_queue(struct pl022 *pl022)
1529 1524
1530 spin_lock_irqsave(&pl022->queue_lock, flags); 1525 spin_lock_irqsave(&pl022->queue_lock, flags);
1531 1526
1532 if (pl022->run == QUEUE_RUNNING || pl022->busy) { 1527 if (pl022->running || pl022->busy) {
1533 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1528 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1534 return -EBUSY; 1529 return -EBUSY;
1535 } 1530 }
1536 1531
1537 pl022->run = QUEUE_RUNNING; 1532 pl022->running = true;
1538 pl022->cur_msg = NULL; 1533 pl022->cur_msg = NULL;
1539 pl022->cur_transfer = NULL; 1534 pl022->cur_transfer = NULL;
1540 pl022->cur_chip = NULL; 1535 pl022->cur_chip = NULL;
@@ -1566,7 +1561,8 @@ static int stop_queue(struct pl022 *pl022)
1566 1561
1567 if (!list_empty(&pl022->queue) || pl022->busy) 1562 if (!list_empty(&pl022->queue) || pl022->busy)
1568 status = -EBUSY; 1563 status = -EBUSY;
1569 else pl022->run = QUEUE_STOPPED; 1564 else
1565 pl022->running = false;
1570 1566
1571 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1567 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1572 1568
@@ -1684,7 +1680,7 @@ static int pl022_transfer(struct spi_device *spi, struct spi_message *msg)
1684 1680
1685 spin_lock_irqsave(&pl022->queue_lock, flags); 1681 spin_lock_irqsave(&pl022->queue_lock, flags);
1686 1682
1687 if (pl022->run == QUEUE_STOPPED) { 1683 if (!pl022->running) {
1688 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1684 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1689 return -ESHUTDOWN; 1685 return -ESHUTDOWN;
1690 } 1686 }
@@ -1693,7 +1689,7 @@ static int pl022_transfer(struct spi_device *spi, struct spi_message *msg)
1693 msg->state = STATE_START; 1689 msg->state = STATE_START;
1694 1690
1695 list_add_tail(&msg->queue, &pl022->queue); 1691 list_add_tail(&msg->queue, &pl022->queue);
1696 if (pl022->run == QUEUE_RUNNING && !pl022->busy) 1692 if (pl022->running && !pl022->busy)
1697 queue_work(pl022->workqueue, &pl022->pump_messages); 1693 queue_work(pl022->workqueue, &pl022->pump_messages);
1698 1694
1699 spin_unlock_irqrestore(&pl022->queue_lock, flags); 1695 spin_unlock_irqrestore(&pl022->queue_lock, flags);
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c
index b85090caf7cf..6beab99bf95b 100644
--- a/drivers/spi/davinci_spi.c
+++ b/drivers/spi/davinci_spi.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2009 Texas Instruments. 2 * Copyright (C) 2009 Texas Instruments.
3 * Copyright (C) 2010 EF Johnson Technologies
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -38,11 +39,6 @@
38 39
39#define CS_DEFAULT 0xFF 40#define CS_DEFAULT 0xFF
40 41
41#define SPI_BUFSIZ (SMP_CACHE_BYTES + 1)
42#define DAVINCI_DMA_DATA_TYPE_S8 0x01
43#define DAVINCI_DMA_DATA_TYPE_S16 0x02
44#define DAVINCI_DMA_DATA_TYPE_S32 0x04
45
46#define SPIFMT_PHASE_MASK BIT(16) 42#define SPIFMT_PHASE_MASK BIT(16)
47#define SPIFMT_POLARITY_MASK BIT(17) 43#define SPIFMT_POLARITY_MASK BIT(17)
48#define SPIFMT_DISTIMER_MASK BIT(18) 44#define SPIFMT_DISTIMER_MASK BIT(18)
@@ -52,34 +48,43 @@
52#define SPIFMT_ODD_PARITY_MASK BIT(23) 48#define SPIFMT_ODD_PARITY_MASK BIT(23)
53#define SPIFMT_WDELAY_MASK 0x3f000000u 49#define SPIFMT_WDELAY_MASK 0x3f000000u
54#define SPIFMT_WDELAY_SHIFT 24 50#define SPIFMT_WDELAY_SHIFT 24
55#define SPIFMT_CHARLEN_MASK 0x0000001Fu 51#define SPIFMT_PRESCALE_SHIFT 8
56
57/* SPIGCR1 */
58#define SPIGCR1_SPIENA_MASK 0x01000000u
59 52
60/* SPIPC0 */ 53/* SPIPC0 */
61#define SPIPC0_DIFUN_MASK BIT(11) /* MISO */ 54#define SPIPC0_DIFUN_MASK BIT(11) /* MISO */
62#define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */ 55#define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */
63#define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */ 56#define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
64#define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */ 57#define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */
65#define SPIPC0_EN1FUN_MASK BIT(1)
66#define SPIPC0_EN0FUN_MASK BIT(0)
67 58
68#define SPIINT_MASKALL 0x0101035F 59#define SPIINT_MASKALL 0x0101035F
69#define SPI_INTLVL_1 0x000001FFu 60#define SPIINT_MASKINT 0x0000015F
70#define SPI_INTLVL_0 0x00000000u 61#define SPI_INTLVL_1 0x000001FF
62#define SPI_INTLVL_0 0x00000000
71 63
72/* SPIDAT1 */ 64/* SPIDAT1 (upper 16 bit defines) */
73#define SPIDAT1_CSHOLD_SHIFT 28 65#define SPIDAT1_CSHOLD_MASK BIT(12)
74#define SPIDAT1_CSNR_SHIFT 16 66
67/* SPIGCR1 */
75#define SPIGCR1_CLKMOD_MASK BIT(1) 68#define SPIGCR1_CLKMOD_MASK BIT(1)
76#define SPIGCR1_MASTER_MASK BIT(0) 69#define SPIGCR1_MASTER_MASK BIT(0)
70#define SPIGCR1_POWERDOWN_MASK BIT(8)
77#define SPIGCR1_LOOPBACK_MASK BIT(16) 71#define SPIGCR1_LOOPBACK_MASK BIT(16)
72#define SPIGCR1_SPIENA_MASK BIT(24)
78 73
79/* SPIBUF */ 74/* SPIBUF */
80#define SPIBUF_TXFULL_MASK BIT(29) 75#define SPIBUF_TXFULL_MASK BIT(29)
81#define SPIBUF_RXEMPTY_MASK BIT(31) 76#define SPIBUF_RXEMPTY_MASK BIT(31)
82 77
78/* SPIDELAY */
79#define SPIDELAY_C2TDELAY_SHIFT 24
80#define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT)
81#define SPIDELAY_T2CDELAY_SHIFT 16
82#define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT)
83#define SPIDELAY_T2EDELAY_SHIFT 8
84#define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT)
85#define SPIDELAY_C2EDELAY_SHIFT 0
86#define SPIDELAY_C2EDELAY_MASK 0xFF
87
83/* Error Masks */ 88/* Error Masks */
84#define SPIFLG_DLEN_ERR_MASK BIT(0) 89#define SPIFLG_DLEN_ERR_MASK BIT(0)
85#define SPIFLG_TIMEOUT_MASK BIT(1) 90#define SPIFLG_TIMEOUT_MASK BIT(1)
@@ -87,29 +92,13 @@
87#define SPIFLG_DESYNC_MASK BIT(3) 92#define SPIFLG_DESYNC_MASK BIT(3)
88#define SPIFLG_BITERR_MASK BIT(4) 93#define SPIFLG_BITERR_MASK BIT(4)
89#define SPIFLG_OVRRUN_MASK BIT(6) 94#define SPIFLG_OVRRUN_MASK BIT(6)
90#define SPIFLG_RX_INTR_MASK BIT(8)
91#define SPIFLG_TX_INTR_MASK BIT(9)
92#define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24) 95#define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24)
93#define SPIFLG_MASK (SPIFLG_DLEN_ERR_MASK \ 96#define SPIFLG_ERROR_MASK (SPIFLG_DLEN_ERR_MASK \
94 | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \ 97 | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \
95 | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \ 98 | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \
96 | SPIFLG_OVRRUN_MASK | SPIFLG_RX_INTR_MASK \ 99 | SPIFLG_OVRRUN_MASK)
97 | SPIFLG_TX_INTR_MASK \
98 | SPIFLG_BUF_INIT_ACTIVE_MASK)
99
100#define SPIINT_DLEN_ERR_INTR BIT(0)
101#define SPIINT_TIMEOUT_INTR BIT(1)
102#define SPIINT_PARERR_INTR BIT(2)
103#define SPIINT_DESYNC_INTR BIT(3)
104#define SPIINT_BITERR_INTR BIT(4)
105#define SPIINT_OVRRUN_INTR BIT(6)
106#define SPIINT_RX_INTR BIT(8)
107#define SPIINT_TX_INTR BIT(9)
108#define SPIINT_DMA_REQ_EN BIT(16)
109#define SPIINT_ENABLE_HIGHZ BIT(24)
110 100
111#define SPI_T2CDELAY_SHIFT 16 101#define SPIINT_DMA_REQ_EN BIT(16)
112#define SPI_C2TDELAY_SHIFT 24
113 102
114/* SPI Controller registers */ 103/* SPI Controller registers */
115#define SPIGCR0 0x00 104#define SPIGCR0 0x00
@@ -118,44 +107,18 @@
118#define SPILVL 0x0c 107#define SPILVL 0x0c
119#define SPIFLG 0x10 108#define SPIFLG 0x10
120#define SPIPC0 0x14 109#define SPIPC0 0x14
121#define SPIPC1 0x18
122#define SPIPC2 0x1c
123#define SPIPC3 0x20
124#define SPIPC4 0x24
125#define SPIPC5 0x28
126#define SPIPC6 0x2c
127#define SPIPC7 0x30
128#define SPIPC8 0x34
129#define SPIDAT0 0x38
130#define SPIDAT1 0x3c 110#define SPIDAT1 0x3c
131#define SPIBUF 0x40 111#define SPIBUF 0x40
132#define SPIEMU 0x44
133#define SPIDELAY 0x48 112#define SPIDELAY 0x48
134#define SPIDEF 0x4c 113#define SPIDEF 0x4c
135#define SPIFMT0 0x50 114#define SPIFMT0 0x50
136#define SPIFMT1 0x54
137#define SPIFMT2 0x58
138#define SPIFMT3 0x5c
139#define TGINTVEC0 0x60
140#define TGINTVEC1 0x64
141
142struct davinci_spi_slave {
143 u32 cmd_to_write;
144 u32 clk_ctrl_to_write;
145 u32 bytes_per_word;
146 u8 active_cs;
147};
148 115
149/* We have 2 DMA channels per CS, one for RX and one for TX */ 116/* We have 2 DMA channels per CS, one for RX and one for TX */
150struct davinci_spi_dma { 117struct davinci_spi_dma {
151 int dma_tx_channel; 118 int tx_channel;
152 int dma_rx_channel; 119 int rx_channel;
153 int dma_tx_sync_dev; 120 int dummy_param_slot;
154 int dma_rx_sync_dev;
155 enum dma_event_q eventq; 121 enum dma_event_q eventq;
156
157 struct completion dma_tx_completion;
158 struct completion dma_rx_completion;
159}; 122};
160 123
161/* SPI Controller driver's private data. */ 124/* SPI Controller driver's private data. */
@@ -166,58 +129,63 @@ struct davinci_spi {
166 u8 version; 129 u8 version;
167 resource_size_t pbase; 130 resource_size_t pbase;
168 void __iomem *base; 131 void __iomem *base;
169 size_t region_size;
170 u32 irq; 132 u32 irq;
171 struct completion done; 133 struct completion done;
172 134
173 const void *tx; 135 const void *tx;
174 void *rx; 136 void *rx;
175 u8 *tmp_buf; 137#define SPI_TMP_BUFSZ (SMP_CACHE_BYTES + 1)
176 int count; 138 u8 rx_tmp_buf[SPI_TMP_BUFSZ];
177 struct davinci_spi_dma *dma_channels; 139 int rcount;
178 struct davinci_spi_platform_data *pdata; 140 int wcount;
141 struct davinci_spi_dma dma;
142 struct davinci_spi_platform_data *pdata;
179 143
180 void (*get_rx)(u32 rx_data, struct davinci_spi *); 144 void (*get_rx)(u32 rx_data, struct davinci_spi *);
181 u32 (*get_tx)(struct davinci_spi *); 145 u32 (*get_tx)(struct davinci_spi *);
182 146
183 struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT]; 147 u8 bytes_per_word[SPI_MAX_CHIPSELECT];
184}; 148};
185 149
186static unsigned use_dma; 150static struct davinci_spi_config davinci_spi_default_cfg;
187 151
188static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi) 152static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *dspi)
189{ 153{
190 u8 *rx = davinci_spi->rx; 154 if (dspi->rx) {
191 155 u8 *rx = dspi->rx;
192 *rx++ = (u8)data; 156 *rx++ = (u8)data;
193 davinci_spi->rx = rx; 157 dspi->rx = rx;
158 }
194} 159}
195 160
196static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi) 161static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi)
197{ 162{
198 u16 *rx = davinci_spi->rx; 163 if (dspi->rx) {
199 164 u16 *rx = dspi->rx;
200 *rx++ = (u16)data; 165 *rx++ = (u16)data;
201 davinci_spi->rx = rx; 166 dspi->rx = rx;
167 }
202} 168}
203 169
204static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi) 170static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi)
205{ 171{
206 u32 data; 172 u32 data = 0;
207 const u8 *tx = davinci_spi->tx; 173 if (dspi->tx) {
208 174 const u8 *tx = dspi->tx;
209 data = *tx++; 175 data = *tx++;
210 davinci_spi->tx = tx; 176 dspi->tx = tx;
177 }
211 return data; 178 return data;
212} 179}
213 180
214static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi) 181static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi)
215{ 182{
216 u32 data; 183 u32 data = 0;
217 const u16 *tx = davinci_spi->tx; 184 if (dspi->tx) {
218 185 const u16 *tx = dspi->tx;
219 data = *tx++; 186 data = *tx++;
220 davinci_spi->tx = tx; 187 dspi->tx = tx;
188 }
221 return data; 189 return data;
222} 190}
223 191
@@ -237,55 +205,67 @@ static inline void clear_io_bits(void __iomem *addr, u32 bits)
237 iowrite32(v, addr); 205 iowrite32(v, addr);
238} 206}
239 207
240static inline void set_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
241{
242 set_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
243}
244
245static inline void clear_fmt_bits(void __iomem *addr, u32 bits, int cs_num)
246{
247 clear_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits);
248}
249
250static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable)
251{
252 struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
253
254 if (enable)
255 set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
256 else
257 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN);
258}
259
260/* 208/*
261 * Interface to control the chip select signal 209 * Interface to control the chip select signal
262 */ 210 */
263static void davinci_spi_chipselect(struct spi_device *spi, int value) 211static void davinci_spi_chipselect(struct spi_device *spi, int value)
264{ 212{
265 struct davinci_spi *davinci_spi; 213 struct davinci_spi *dspi;
266 struct davinci_spi_platform_data *pdata; 214 struct davinci_spi_platform_data *pdata;
267 u32 data1_reg_val = 0; 215 u8 chip_sel = spi->chip_select;
216 u16 spidat1 = CS_DEFAULT;
217 bool gpio_chipsel = false;
268 218
269 davinci_spi = spi_master_get_devdata(spi->master); 219 dspi = spi_master_get_devdata(spi->master);
270 pdata = davinci_spi->pdata; 220 pdata = dspi->pdata;
221
222 if (pdata->chip_sel && chip_sel < pdata->num_chipselect &&
223 pdata->chip_sel[chip_sel] != SPI_INTERN_CS)
224 gpio_chipsel = true;
271 225
272 /* 226 /*
273 * Board specific chip select logic decides the polarity and cs 227 * Board specific chip select logic decides the polarity and cs
274 * line for the controller 228 * line for the controller
275 */ 229 */
276 if (value == BITBANG_CS_INACTIVE) { 230 if (gpio_chipsel) {
277 set_io_bits(davinci_spi->base + SPIDEF, CS_DEFAULT); 231 if (value == BITBANG_CS_ACTIVE)
278 232 gpio_set_value(pdata->chip_sel[chip_sel], 0);
279 data1_reg_val |= CS_DEFAULT << SPIDAT1_CSNR_SHIFT; 233 else
280 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); 234 gpio_set_value(pdata->chip_sel[chip_sel], 1);
235 } else {
236 if (value == BITBANG_CS_ACTIVE) {
237 spidat1 |= SPIDAT1_CSHOLD_MASK;
238 spidat1 &= ~(0x1 << chip_sel);
239 }
281 240
282 while ((ioread32(davinci_spi->base + SPIBUF) 241 iowrite16(spidat1, dspi->base + SPIDAT1 + 2);
283 & SPIBUF_RXEMPTY_MASK) == 0)
284 cpu_relax();
285 } 242 }
286} 243}
287 244
288/** 245/**
246 * davinci_spi_get_prescale - Calculates the correct prescale value
247 * @maxspeed_hz: the maximum rate the SPI clock can run at
248 *
249 * This function calculates the prescale value that generates a clock rate
250 * less than or equal to the specified maximum.
251 *
252 * Returns: calculated prescale - 1 for easy programming into SPI registers
253 * or negative error number if valid prescalar cannot be updated.
254 */
255static inline int davinci_spi_get_prescale(struct davinci_spi *dspi,
256 u32 max_speed_hz)
257{
258 int ret;
259
260 ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz);
261
262 if (ret < 3 || ret > 256)
263 return -EINVAL;
264
265 return ret - 1;
266}
267
268/**
289 * davinci_spi_setup_transfer - This functions will determine transfer method 269 * davinci_spi_setup_transfer - This functions will determine transfer method
290 * @spi: spi device on which data transfer to be done 270 * @spi: spi device on which data transfer to be done
291 * @t: spi transfer in which transfer info is filled 271 * @t: spi transfer in which transfer info is filled
@@ -298,13 +278,15 @@ static int davinci_spi_setup_transfer(struct spi_device *spi,
298 struct spi_transfer *t) 278 struct spi_transfer *t)
299{ 279{
300 280
301 struct davinci_spi *davinci_spi; 281 struct davinci_spi *dspi;
302 struct davinci_spi_platform_data *pdata; 282 struct davinci_spi_config *spicfg;
303 u8 bits_per_word = 0; 283 u8 bits_per_word = 0;
304 u32 hz = 0, prescale = 0, clkspeed; 284 u32 hz = 0, spifmt = 0, prescale = 0;
305 285
306 davinci_spi = spi_master_get_devdata(spi->master); 286 dspi = spi_master_get_devdata(spi->master);
307 pdata = davinci_spi->pdata; 287 spicfg = (struct davinci_spi_config *)spi->controller_data;
288 if (!spicfg)
289 spicfg = &davinci_spi_default_cfg;
308 290
309 if (t) { 291 if (t) {
310 bits_per_word = t->bits_per_word; 292 bits_per_word = t->bits_per_word;
@@ -320,111 +302,83 @@ static int davinci_spi_setup_transfer(struct spi_device *spi,
320 * 8bit, 16bit or 32bit transfer 302 * 8bit, 16bit or 32bit transfer
321 */ 303 */
322 if (bits_per_word <= 8 && bits_per_word >= 2) { 304 if (bits_per_word <= 8 && bits_per_word >= 2) {
323 davinci_spi->get_rx = davinci_spi_rx_buf_u8; 305 dspi->get_rx = davinci_spi_rx_buf_u8;
324 davinci_spi->get_tx = davinci_spi_tx_buf_u8; 306 dspi->get_tx = davinci_spi_tx_buf_u8;
325 davinci_spi->slave[spi->chip_select].bytes_per_word = 1; 307 dspi->bytes_per_word[spi->chip_select] = 1;
326 } else if (bits_per_word <= 16 && bits_per_word >= 2) { 308 } else if (bits_per_word <= 16 && bits_per_word >= 2) {
327 davinci_spi->get_rx = davinci_spi_rx_buf_u16; 309 dspi->get_rx = davinci_spi_rx_buf_u16;
328 davinci_spi->get_tx = davinci_spi_tx_buf_u16; 310 dspi->get_tx = davinci_spi_tx_buf_u16;
329 davinci_spi->slave[spi->chip_select].bytes_per_word = 2; 311 dspi->bytes_per_word[spi->chip_select] = 2;
330 } else 312 } else
331 return -EINVAL; 313 return -EINVAL;
332 314
333 if (!hz) 315 if (!hz)
334 hz = spi->max_speed_hz; 316 hz = spi->max_speed_hz;
335 317
336 clear_fmt_bits(davinci_spi->base, SPIFMT_CHARLEN_MASK, 318 /* Set up SPIFMTn register, unique to this chipselect. */
337 spi->chip_select);
338 set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f,
339 spi->chip_select);
340 319
341 clkspeed = clk_get_rate(davinci_spi->clk); 320 prescale = davinci_spi_get_prescale(dspi, hz);
342 if (hz > clkspeed / 2) 321 if (prescale < 0)
343 prescale = 1 << 8; 322 return prescale;
344 if (hz < clkspeed / 256)
345 prescale = 255 << 8;
346 if (!prescale)
347 prescale = ((clkspeed / hz - 1) << 8) & 0x0000ff00;
348 323
349 clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select); 324 spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f);
350 set_fmt_bits(davinci_spi->base, prescale, spi->chip_select);
351 325
352 return 0; 326 if (spi->mode & SPI_LSB_FIRST)
353} 327 spifmt |= SPIFMT_SHIFTDIR_MASK;
354 328
355static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data) 329 if (spi->mode & SPI_CPOL)
356{ 330 spifmt |= SPIFMT_POLARITY_MASK;
357 struct spi_device *spi = (struct spi_device *)data;
358 struct davinci_spi *davinci_spi;
359 struct davinci_spi_dma *davinci_spi_dma;
360 struct davinci_spi_platform_data *pdata;
361 331
362 davinci_spi = spi_master_get_devdata(spi->master); 332 if (!(spi->mode & SPI_CPHA))
363 davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); 333 spifmt |= SPIFMT_PHASE_MASK;
364 pdata = davinci_spi->pdata;
365 334
366 if (ch_status == DMA_COMPLETE) 335 /*
367 edma_stop(davinci_spi_dma->dma_rx_channel); 336 * Version 1 hardware supports two basic SPI modes:
368 else 337 * - Standard SPI mode uses 4 pins, with chipselect
369 edma_clean_channel(davinci_spi_dma->dma_rx_channel); 338 * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
339 * (distinct from SPI_3WIRE, with just one data wire;
340 * or similar variants without MOSI or without MISO)
341 *
342 * Version 2 hardware supports an optional handshaking signal,
343 * so it can support two more modes:
344 * - 5 pin SPI variant is standard SPI plus SPI_READY
345 * - 4 pin with enable is (SPI_READY | SPI_NO_CS)
346 */
370 347
371 complete(&davinci_spi_dma->dma_rx_completion); 348 if (dspi->version == SPI_VERSION_2) {
372 /* We must disable the DMA RX request */
373 davinci_spi_set_dma_req(spi, 0);
374}
375 349
376static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data) 350 u32 delay = 0;
377{
378 struct spi_device *spi = (struct spi_device *)data;
379 struct davinci_spi *davinci_spi;
380 struct davinci_spi_dma *davinci_spi_dma;
381 struct davinci_spi_platform_data *pdata;
382 351
383 davinci_spi = spi_master_get_devdata(spi->master); 352 spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT)
384 davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); 353 & SPIFMT_WDELAY_MASK);
385 pdata = davinci_spi->pdata;
386 354
387 if (ch_status == DMA_COMPLETE) 355 if (spicfg->odd_parity)
388 edma_stop(davinci_spi_dma->dma_tx_channel); 356 spifmt |= SPIFMT_ODD_PARITY_MASK;
389 else
390 edma_clean_channel(davinci_spi_dma->dma_tx_channel);
391 357
392 complete(&davinci_spi_dma->dma_tx_completion); 358 if (spicfg->parity_enable)
393 /* We must disable the DMA TX request */ 359 spifmt |= SPIFMT_PARITYENA_MASK;
394 davinci_spi_set_dma_req(spi, 0);
395}
396 360
397static int davinci_spi_request_dma(struct spi_device *spi) 361 if (spicfg->timer_disable) {
398{ 362 spifmt |= SPIFMT_DISTIMER_MASK;
399 struct davinci_spi *davinci_spi; 363 } else {
400 struct davinci_spi_dma *davinci_spi_dma; 364 delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT)
401 struct davinci_spi_platform_data *pdata; 365 & SPIDELAY_C2TDELAY_MASK;
402 struct device *sdev; 366 delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT)
403 int r; 367 & SPIDELAY_T2CDELAY_MASK;
368 }
404 369
405 davinci_spi = spi_master_get_devdata(spi->master); 370 if (spi->mode & SPI_READY) {
406 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; 371 spifmt |= SPIFMT_WAITENA_MASK;
407 pdata = davinci_spi->pdata; 372 delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT)
408 sdev = davinci_spi->bitbang.master->dev.parent; 373 & SPIDELAY_T2EDELAY_MASK;
374 delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT)
375 & SPIDELAY_C2EDELAY_MASK;
376 }
409 377
410 r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev, 378 iowrite32(delay, dspi->base + SPIDELAY);
411 davinci_spi_dma_rx_callback, spi,
412 davinci_spi_dma->eventq);
413 if (r < 0) {
414 dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n");
415 return -EAGAIN;
416 } 379 }
417 davinci_spi_dma->dma_rx_channel = r; 380
418 r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev, 381 iowrite32(spifmt, dspi->base + SPIFMT0);
419 davinci_spi_dma_tx_callback, spi,
420 davinci_spi_dma->eventq);
421 if (r < 0) {
422 edma_free_channel(davinci_spi_dma->dma_rx_channel);
423 davinci_spi_dma->dma_rx_channel = -1;
424 dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n");
425 return -EAGAIN;
426 }
427 davinci_spi_dma->dma_tx_channel = r;
428 382
429 return 0; 383 return 0;
430} 384}
@@ -435,190 +389,40 @@ static int davinci_spi_request_dma(struct spi_device *spi)
435 * 389 *
436 * This functions sets the default transfer method. 390 * This functions sets the default transfer method.
437 */ 391 */
438
439static int davinci_spi_setup(struct spi_device *spi) 392static int davinci_spi_setup(struct spi_device *spi)
440{ 393{
441 int retval; 394 int retval = 0;
442 struct davinci_spi *davinci_spi; 395 struct davinci_spi *dspi;
443 struct davinci_spi_dma *davinci_spi_dma; 396 struct davinci_spi_platform_data *pdata;
444 struct device *sdev;
445 397
446 davinci_spi = spi_master_get_devdata(spi->master); 398 dspi = spi_master_get_devdata(spi->master);
447 sdev = davinci_spi->bitbang.master->dev.parent; 399 pdata = dspi->pdata;
448 400
449 /* if bits per word length is zero then set it default 8 */ 401 /* if bits per word length is zero then set it default 8 */
450 if (!spi->bits_per_word) 402 if (!spi->bits_per_word)
451 spi->bits_per_word = 8; 403 spi->bits_per_word = 8;
452 404
453 davinci_spi->slave[spi->chip_select].cmd_to_write = 0; 405 if (!(spi->mode & SPI_NO_CS)) {
454 406 if ((pdata->chip_sel == NULL) ||
455 if (use_dma && davinci_spi->dma_channels) { 407 (pdata->chip_sel[spi->chip_select] == SPI_INTERN_CS))
456 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; 408 set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select);
457
458 if ((davinci_spi_dma->dma_rx_channel == -1)
459 || (davinci_spi_dma->dma_tx_channel == -1)) {
460 retval = davinci_spi_request_dma(spi);
461 if (retval < 0)
462 return retval;
463 }
464 }
465
466 /*
467 * SPI in DaVinci and DA8xx operate between
468 * 600 KHz and 50 MHz
469 */
470 if (spi->max_speed_hz < 600000 || spi->max_speed_hz > 50000000) {
471 dev_dbg(sdev, "Operating frequency is not in acceptable "
472 "range\n");
473 return -EINVAL;
474 }
475
476 /*
477 * Set up SPIFMTn register, unique to this chipselect.
478 *
479 * NOTE: we could do all of these with one write. Also, some
480 * of the "version 2" features are found in chips that don't
481 * support all of them...
482 */
483 if (spi->mode & SPI_LSB_FIRST)
484 set_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
485 spi->chip_select);
486 else
487 clear_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK,
488 spi->chip_select);
489
490 if (spi->mode & SPI_CPOL)
491 set_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
492 spi->chip_select);
493 else
494 clear_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK,
495 spi->chip_select);
496 409
497 if (!(spi->mode & SPI_CPHA))
498 set_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
499 spi->chip_select);
500 else
501 clear_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK,
502 spi->chip_select);
503
504 /*
505 * Version 1 hardware supports two basic SPI modes:
506 * - Standard SPI mode uses 4 pins, with chipselect
507 * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
508 * (distinct from SPI_3WIRE, with just one data wire;
509 * or similar variants without MOSI or without MISO)
510 *
511 * Version 2 hardware supports an optional handshaking signal,
512 * so it can support two more modes:
513 * - 5 pin SPI variant is standard SPI plus SPI_READY
514 * - 4 pin with enable is (SPI_READY | SPI_NO_CS)
515 */
516
517 if (davinci_spi->version == SPI_VERSION_2) {
518 clear_fmt_bits(davinci_spi->base, SPIFMT_WDELAY_MASK,
519 spi->chip_select);
520 set_fmt_bits(davinci_spi->base,
521 (davinci_spi->pdata->wdelay
522 << SPIFMT_WDELAY_SHIFT)
523 & SPIFMT_WDELAY_MASK,
524 spi->chip_select);
525
526 if (davinci_spi->pdata->odd_parity)
527 set_fmt_bits(davinci_spi->base,
528 SPIFMT_ODD_PARITY_MASK,
529 spi->chip_select);
530 else
531 clear_fmt_bits(davinci_spi->base,
532 SPIFMT_ODD_PARITY_MASK,
533 spi->chip_select);
534
535 if (davinci_spi->pdata->parity_enable)
536 set_fmt_bits(davinci_spi->base,
537 SPIFMT_PARITYENA_MASK,
538 spi->chip_select);
539 else
540 clear_fmt_bits(davinci_spi->base,
541 SPIFMT_PARITYENA_MASK,
542 spi->chip_select);
543
544 if (davinci_spi->pdata->wait_enable)
545 set_fmt_bits(davinci_spi->base,
546 SPIFMT_WAITENA_MASK,
547 spi->chip_select);
548 else
549 clear_fmt_bits(davinci_spi->base,
550 SPIFMT_WAITENA_MASK,
551 spi->chip_select);
552
553 if (davinci_spi->pdata->timer_disable)
554 set_fmt_bits(davinci_spi->base,
555 SPIFMT_DISTIMER_MASK,
556 spi->chip_select);
557 else
558 clear_fmt_bits(davinci_spi->base,
559 SPIFMT_DISTIMER_MASK,
560 spi->chip_select);
561 } 410 }
562 411
563 retval = davinci_spi_setup_transfer(spi, NULL);
564
565 return retval;
566}
567
568static void davinci_spi_cleanup(struct spi_device *spi)
569{
570 struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master);
571 struct davinci_spi_dma *davinci_spi_dma;
572
573 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
574
575 if (use_dma && davinci_spi->dma_channels) {
576 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select];
577
578 if ((davinci_spi_dma->dma_rx_channel != -1)
579 && (davinci_spi_dma->dma_tx_channel != -1)) {
580 edma_free_channel(davinci_spi_dma->dma_tx_channel);
581 edma_free_channel(davinci_spi_dma->dma_rx_channel);
582 }
583 }
584}
585
586static int davinci_spi_bufs_prep(struct spi_device *spi,
587 struct davinci_spi *davinci_spi)
588{
589 int op_mode = 0;
590
591 /*
592 * REVISIT unless devices disagree about SPI_LOOP or
593 * SPI_READY (SPI_NO_CS only allows one device!), this
594 * should not need to be done before each message...
595 * optimize for both flags staying cleared.
596 */
597
598 op_mode = SPIPC0_DIFUN_MASK
599 | SPIPC0_DOFUN_MASK
600 | SPIPC0_CLKFUN_MASK;
601 if (!(spi->mode & SPI_NO_CS))
602 op_mode |= 1 << spi->chip_select;
603 if (spi->mode & SPI_READY) 412 if (spi->mode & SPI_READY)
604 op_mode |= SPIPC0_SPIENA_MASK; 413 set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK);
605
606 iowrite32(op_mode, davinci_spi->base + SPIPC0);
607 414
608 if (spi->mode & SPI_LOOP) 415 if (spi->mode & SPI_LOOP)
609 set_io_bits(davinci_spi->base + SPIGCR1, 416 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK);
610 SPIGCR1_LOOPBACK_MASK);
611 else 417 else
612 clear_io_bits(davinci_spi->base + SPIGCR1, 418 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK);
613 SPIGCR1_LOOPBACK_MASK);
614 419
615 return 0; 420 return retval;
616} 421}
617 422
618static int davinci_spi_check_error(struct davinci_spi *davinci_spi, 423static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status)
619 int int_status)
620{ 424{
621 struct device *sdev = davinci_spi->bitbang.master->dev.parent; 425 struct device *sdev = dspi->bitbang.master->dev.parent;
622 426
623 if (int_status & SPIFLG_TIMEOUT_MASK) { 427 if (int_status & SPIFLG_TIMEOUT_MASK) {
624 dev_dbg(sdev, "SPI Time-out Error\n"); 428 dev_dbg(sdev, "SPI Time-out Error\n");
@@ -633,7 +437,7 @@ static int davinci_spi_check_error(struct davinci_spi *davinci_spi,
633 return -EIO; 437 return -EIO;
634 } 438 }
635 439
636 if (davinci_spi->version == SPI_VERSION_2) { 440 if (dspi->version == SPI_VERSION_2) {
637 if (int_status & SPIFLG_DLEN_ERR_MASK) { 441 if (int_status & SPIFLG_DLEN_ERR_MASK) {
638 dev_dbg(sdev, "SPI Data Length Error\n"); 442 dev_dbg(sdev, "SPI Data Length Error\n");
639 return -EIO; 443 return -EIO;
@@ -646,10 +450,6 @@ static int davinci_spi_check_error(struct davinci_spi *davinci_spi,
646 dev_dbg(sdev, "SPI Data Overrun error\n"); 450 dev_dbg(sdev, "SPI Data Overrun error\n");
647 return -EIO; 451 return -EIO;
648 } 452 }
649 if (int_status & SPIFLG_TX_INTR_MASK) {
650 dev_dbg(sdev, "SPI TX intr bit set\n");
651 return -EIO;
652 }
653 if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) { 453 if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
654 dev_dbg(sdev, "SPI Buffer Init Active\n"); 454 dev_dbg(sdev, "SPI Buffer Init Active\n");
655 return -EBUSY; 455 return -EBUSY;
@@ -660,366 +460,339 @@ static int davinci_spi_check_error(struct davinci_spi *davinci_spi,
660} 460}
661 461
662/** 462/**
663 * davinci_spi_bufs - functions which will handle transfer data 463 * davinci_spi_process_events - check for and handle any SPI controller events
664 * @spi: spi device on which data transfer to be done 464 * @dspi: the controller data
665 * @t: spi transfer in which transfer info is filled
666 * 465 *
667 * This function will put data to be transferred into data register 466 * This function will check the SPIFLG register and handle any events that are
668 * of SPI controller and then wait until the completion will be marked 467 * detected there
669 * by the IRQ Handler.
670 */ 468 */
671static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t) 469static int davinci_spi_process_events(struct davinci_spi *dspi)
672{ 470{
673 struct davinci_spi *davinci_spi; 471 u32 buf, status, errors = 0, spidat1;
674 int int_status, count, ret;
675 u8 conv, tmp;
676 u32 tx_data, data1_reg_val;
677 u32 buf_val, flg_val;
678 struct davinci_spi_platform_data *pdata;
679
680 davinci_spi = spi_master_get_devdata(spi->master);
681 pdata = davinci_spi->pdata;
682
683 davinci_spi->tx = t->tx_buf;
684 davinci_spi->rx = t->rx_buf;
685
686 /* convert len to words based on bits_per_word */
687 conv = davinci_spi->slave[spi->chip_select].bytes_per_word;
688 davinci_spi->count = t->len / conv;
689
690 INIT_COMPLETION(davinci_spi->done);
691
692 ret = davinci_spi_bufs_prep(spi, davinci_spi);
693 if (ret)
694 return ret;
695
696 /* Enable SPI */
697 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
698
699 iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) |
700 (pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
701 davinci_spi->base + SPIDELAY);
702
703 count = davinci_spi->count;
704 data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;
705 tmp = ~(0x1 << spi->chip_select);
706
707 clear_io_bits(davinci_spi->base + SPIDEF, ~tmp);
708
709 data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT;
710 472
711 while ((ioread32(davinci_spi->base + SPIBUF) 473 buf = ioread32(dspi->base + SPIBUF);
712 & SPIBUF_RXEMPTY_MASK) == 0)
713 cpu_relax();
714 474
715 /* Determine the command to execute READ or WRITE */ 475 if (dspi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) {
716 if (t->tx_buf) { 476 dspi->get_rx(buf & 0xFFFF, dspi);
717 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); 477 dspi->rcount--;
478 }
718 479
719 while (1) { 480 status = ioread32(dspi->base + SPIFLG);
720 tx_data = davinci_spi->get_tx(davinci_spi);
721 481
722 data1_reg_val &= ~(0xFFFF); 482 if (unlikely(status & SPIFLG_ERROR_MASK)) {
723 data1_reg_val |= (0xFFFF & tx_data); 483 errors = status & SPIFLG_ERROR_MASK;
484 goto out;
485 }
724 486
725 buf_val = ioread32(davinci_spi->base + SPIBUF); 487 if (dspi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) {
726 if ((buf_val & SPIBUF_TXFULL_MASK) == 0) { 488 spidat1 = ioread32(dspi->base + SPIDAT1);
727 iowrite32(data1_reg_val, 489 dspi->wcount--;
728 davinci_spi->base + SPIDAT1); 490 spidat1 &= ~0xFFFF;
491 spidat1 |= 0xFFFF & dspi->get_tx(dspi);
492 iowrite32(spidat1, dspi->base + SPIDAT1);
493 }
729 494
730 count--; 495out:
731 } 496 return errors;
732 while (ioread32(davinci_spi->base + SPIBUF) 497}
733 & SPIBUF_RXEMPTY_MASK)
734 cpu_relax();
735
736 /* getting the returned byte */
737 if (t->rx_buf) {
738 buf_val = ioread32(davinci_spi->base + SPIBUF);
739 davinci_spi->get_rx(buf_val, davinci_spi);
740 }
741 if (count <= 0)
742 break;
743 }
744 } else {
745 if (pdata->poll_mode) {
746 while (1) {
747 /* keeps the serial clock going */
748 if ((ioread32(davinci_spi->base + SPIBUF)
749 & SPIBUF_TXFULL_MASK) == 0)
750 iowrite32(data1_reg_val,
751 davinci_spi->base + SPIDAT1);
752
753 while (ioread32(davinci_spi->base + SPIBUF) &
754 SPIBUF_RXEMPTY_MASK)
755 cpu_relax();
756
757 flg_val = ioread32(davinci_spi->base + SPIFLG);
758 buf_val = ioread32(davinci_spi->base + SPIBUF);
759
760 davinci_spi->get_rx(buf_val, davinci_spi);
761
762 count--;
763 if (count <= 0)
764 break;
765 }
766 } else { /* Receive in Interrupt mode */
767 int i;
768 498
769 for (i = 0; i < davinci_spi->count; i++) { 499static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data)
770 set_io_bits(davinci_spi->base + SPIINT, 500{
771 SPIINT_BITERR_INTR 501 struct davinci_spi *dspi = data;
772 | SPIINT_OVRRUN_INTR 502 struct davinci_spi_dma *dma = &dspi->dma;
773 | SPIINT_RX_INTR);
774 503
775 iowrite32(data1_reg_val, 504 edma_stop(lch);
776 davinci_spi->base + SPIDAT1);
777 505
778 while (ioread32(davinci_spi->base + SPIINT) & 506 if (status == DMA_COMPLETE) {
779 SPIINT_RX_INTR) 507 if (lch == dma->rx_channel)
780 cpu_relax(); 508 dspi->rcount = 0;
781 } 509 if (lch == dma->tx_channel)
782 iowrite32((data1_reg_val & 0x0ffcffff), 510 dspi->wcount = 0;
783 davinci_spi->base + SPIDAT1);
784 }
785 } 511 }
786 512
787 /* 513 if ((!dspi->wcount && !dspi->rcount) || (status != DMA_COMPLETE))
788 * Check for bit error, desync error,parity error,timeout error and 514 complete(&dspi->done);
789 * receive overflow errors
790 */
791 int_status = ioread32(davinci_spi->base + SPIFLG);
792
793 ret = davinci_spi_check_error(davinci_spi, int_status);
794 if (ret != 0)
795 return ret;
796
797 /* SPI Framework maintains the count only in bytes so convert back */
798 davinci_spi->count *= conv;
799
800 return t->len;
801} 515}
802 516
803#define DAVINCI_DMA_DATA_TYPE_S8 0x01 517/**
804#define DAVINCI_DMA_DATA_TYPE_S16 0x02 518 * davinci_spi_bufs - functions which will handle transfer data
805#define DAVINCI_DMA_DATA_TYPE_S32 0x04 519 * @spi: spi device on which data transfer to be done
806 520 * @t: spi transfer in which transfer info is filled
807static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) 521 *
522 * This function will put data to be transferred into data register
523 * of SPI controller and then wait until the completion will be marked
524 * by the IRQ Handler.
525 */
526static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
808{ 527{
809 struct davinci_spi *davinci_spi; 528 struct davinci_spi *dspi;
810 int int_status = 0; 529 int data_type, ret;
811 int count, temp_count; 530 u32 tx_data, spidat1;
812 u8 conv = 1; 531 u32 errors = 0;
813 u8 tmp; 532 struct davinci_spi_config *spicfg;
814 u32 data1_reg_val;
815 struct davinci_spi_dma *davinci_spi_dma;
816 int word_len, data_type, ret;
817 unsigned long tx_reg, rx_reg;
818 struct davinci_spi_platform_data *pdata; 533 struct davinci_spi_platform_data *pdata;
534 unsigned uninitialized_var(rx_buf_count);
819 struct device *sdev; 535 struct device *sdev;
820 536
821 davinci_spi = spi_master_get_devdata(spi->master); 537 dspi = spi_master_get_devdata(spi->master);
822 pdata = davinci_spi->pdata; 538 pdata = dspi->pdata;
823 sdev = davinci_spi->bitbang.master->dev.parent; 539 spicfg = (struct davinci_spi_config *)spi->controller_data;
824 540 if (!spicfg)
825 davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; 541 spicfg = &davinci_spi_default_cfg;
826 542 sdev = dspi->bitbang.master->dev.parent;
827 tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1;
828 rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF;
829
830 davinci_spi->tx = t->tx_buf;
831 davinci_spi->rx = t->rx_buf;
832 543
833 /* convert len to words based on bits_per_word */ 544 /* convert len to words based on bits_per_word */
834 conv = davinci_spi->slave[spi->chip_select].bytes_per_word; 545 data_type = dspi->bytes_per_word[spi->chip_select];
835 davinci_spi->count = t->len / conv;
836
837 INIT_COMPLETION(davinci_spi->done);
838 546
839 init_completion(&davinci_spi_dma->dma_rx_completion); 547 dspi->tx = t->tx_buf;
840 init_completion(&davinci_spi_dma->dma_tx_completion); 548 dspi->rx = t->rx_buf;
549 dspi->wcount = t->len / data_type;
550 dspi->rcount = dspi->wcount;
841 551
842 word_len = conv * 8; 552 spidat1 = ioread32(dspi->base + SPIDAT1);
843
844 if (word_len <= 8)
845 data_type = DAVINCI_DMA_DATA_TYPE_S8;
846 else if (word_len <= 16)
847 data_type = DAVINCI_DMA_DATA_TYPE_S16;
848 else if (word_len <= 32)
849 data_type = DAVINCI_DMA_DATA_TYPE_S32;
850 else
851 return -EINVAL;
852
853 ret = davinci_spi_bufs_prep(spi, davinci_spi);
854 if (ret)
855 return ret;
856 553
857 /* Put delay val if required */ 554 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
858 iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) | 555 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
859 (pdata->t2cdelay << SPI_T2CDELAY_SHIFT),
860 davinci_spi->base + SPIDELAY);
861 556
862 count = davinci_spi->count; /* the number of elements */ 557 INIT_COMPLETION(dspi->done);
863 data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT;
864 558
865 /* CS default = 0xFF */ 559 if (spicfg->io_type == SPI_IO_TYPE_INTR)
866 tmp = ~(0x1 << spi->chip_select); 560 set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
867 561
868 clear_io_bits(davinci_spi->base + SPIDEF, ~tmp); 562 if (spicfg->io_type != SPI_IO_TYPE_DMA) {
869 563 /* start the transfer */
870 data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT; 564 dspi->wcount--;
871 565 tx_data = dspi->get_tx(dspi);
872 /* disable all interrupts for dma transfers */ 566 spidat1 &= 0xFFFF0000;
873 clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); 567 spidat1 |= tx_data & 0xFFFF;
874 /* Disable SPI to write configuration bits in SPIDAT */ 568 iowrite32(spidat1, dspi->base + SPIDAT1);
875 clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
876 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1);
877 /* Enable SPI */
878 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
879
880 while ((ioread32(davinci_spi->base + SPIBUF)
881 & SPIBUF_RXEMPTY_MASK) == 0)
882 cpu_relax();
883
884
885 if (t->tx_buf) {
886 t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count,
887 DMA_TO_DEVICE);
888 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
889 dev_dbg(sdev, "Unable to DMA map a %d bytes"
890 " TX buffer\n", count);
891 return -ENOMEM;
892 }
893 temp_count = count;
894 } else { 569 } else {
895 /* We need TX clocking for RX transaction */ 570 struct davinci_spi_dma *dma;
896 t->tx_dma = dma_map_single(&spi->dev, 571 unsigned long tx_reg, rx_reg;
897 (void *)davinci_spi->tmp_buf, count + 1, 572 struct edmacc_param param;
898 DMA_TO_DEVICE); 573 void *rx_buf;
899 if (dma_mapping_error(&spi->dev, t->tx_dma)) { 574
900 dev_dbg(sdev, "Unable to DMA map a %d bytes" 575 dma = &dspi->dma;
901 " TX tmp buffer\n", count); 576
902 return -ENOMEM; 577 tx_reg = (unsigned long)dspi->pbase + SPIDAT1;
578 rx_reg = (unsigned long)dspi->pbase + SPIBUF;
579
580 /*
581 * Transmit DMA setup
582 *
583 * If there is transmit data, map the transmit buffer, set it
584 * as the source of data and set the source B index to data
585 * size. If there is no transmit data, set the transmit register
586 * as the source of data, and set the source B index to zero.
587 *
588 * The destination is always the transmit register itself. And
589 * the destination never increments.
590 */
591
592 if (t->tx_buf) {
593 t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf,
594 dspi->wcount, DMA_TO_DEVICE);
595 if (dma_mapping_error(&spi->dev, t->tx_dma)) {
596 dev_dbg(sdev, "Unable to DMA map %d bytes"
597 "TX buffer\n", dspi->wcount);
598 return -ENOMEM;
599 }
903 } 600 }
904 temp_count = count + 1;
905 }
906 601
907 edma_set_transfer_params(davinci_spi_dma->dma_tx_channel, 602 param.opt = TCINTEN | EDMA_TCC(dma->tx_channel);
908 data_type, temp_count, 1, 0, ASYNC); 603 param.src = t->tx_buf ? t->tx_dma : tx_reg;
909 edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT); 604 param.a_b_cnt = dspi->wcount << 16 | data_type;
910 edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT); 605 param.dst = tx_reg;
911 edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0); 606 param.src_dst_bidx = t->tx_buf ? data_type : 0;
912 edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0); 607 param.link_bcntrld = 0xffff;
913 608 param.src_dst_cidx = 0;
914 if (t->rx_buf) { 609 param.ccnt = 1;
915 /* initiate transaction */ 610 edma_write_slot(dma->tx_channel, &param);
916 iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); 611 edma_link(dma->tx_channel, dma->dummy_param_slot);
612
613 /*
614 * Receive DMA setup
615 *
616 * If there is receive buffer, use it to receive data. If there
617 * is none provided, use a temporary receive buffer. Set the
618 * destination B index to 0 so effectively only one byte is used
619 * in the temporary buffer (address does not increment).
620 *
621 * The source of receive data is the receive data register. The
622 * source address never increments.
623 */
624
625 if (t->rx_buf) {
626 rx_buf = t->rx_buf;
627 rx_buf_count = dspi->rcount;
628 } else {
629 rx_buf = dspi->rx_tmp_buf;
630 rx_buf_count = sizeof(dspi->rx_tmp_buf);
631 }
917 632
918 t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count, 633 t->rx_dma = dma_map_single(&spi->dev, rx_buf, rx_buf_count,
919 DMA_FROM_DEVICE); 634 DMA_FROM_DEVICE);
920 if (dma_mapping_error(&spi->dev, t->rx_dma)) { 635 if (dma_mapping_error(&spi->dev, t->rx_dma)) {
921 dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", 636 dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
922 count); 637 rx_buf_count);
923 if (t->tx_buf != NULL) 638 if (t->tx_buf)
924 dma_unmap_single(NULL, t->tx_dma, 639 dma_unmap_single(NULL, t->tx_dma, dspi->wcount,
925 count, DMA_TO_DEVICE); 640 DMA_TO_DEVICE);
926 return -ENOMEM; 641 return -ENOMEM;
927 } 642 }
928 edma_set_transfer_params(davinci_spi_dma->dma_rx_channel,
929 data_type, count, 1, 0, ASYNC);
930 edma_set_src(davinci_spi_dma->dma_rx_channel,
931 rx_reg, INCR, W8BIT);
932 edma_set_dest(davinci_spi_dma->dma_rx_channel,
933 t->rx_dma, INCR, W8BIT);
934 edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0);
935 edma_set_dest_index(davinci_spi_dma->dma_rx_channel,
936 data_type, 0);
937 }
938 643
939 if ((t->tx_buf) || (t->rx_buf)) 644 param.opt = TCINTEN | EDMA_TCC(dma->rx_channel);
940 edma_start(davinci_spi_dma->dma_tx_channel); 645 param.src = rx_reg;
646 param.a_b_cnt = dspi->rcount << 16 | data_type;
647 param.dst = t->rx_dma;
648 param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16;
649 param.link_bcntrld = 0xffff;
650 param.src_dst_cidx = 0;
651 param.ccnt = 1;
652 edma_write_slot(dma->rx_channel, &param);
653
654 if (pdata->cshold_bug)
655 iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2);
656
657 edma_start(dma->rx_channel);
658 edma_start(dma->tx_channel);
659 set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
660 }
941 661
942 if (t->rx_buf) 662 /* Wait for the transfer to complete */
943 edma_start(davinci_spi_dma->dma_rx_channel); 663 if (spicfg->io_type != SPI_IO_TYPE_POLL) {
664 wait_for_completion_interruptible(&(dspi->done));
665 } else {
666 while (dspi->rcount > 0 || dspi->wcount > 0) {
667 errors = davinci_spi_process_events(dspi);
668 if (errors)
669 break;
670 cpu_relax();
671 }
672 }
944 673
945 if ((t->rx_buf) || (t->tx_buf)) 674 clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL);
946 davinci_spi_set_dma_req(spi, 1); 675 if (spicfg->io_type == SPI_IO_TYPE_DMA) {
947 676
948 if (t->tx_buf) 677 if (t->tx_buf)
949 wait_for_completion_interruptible( 678 dma_unmap_single(NULL, t->tx_dma, dspi->wcount,
950 &davinci_spi_dma->dma_tx_completion); 679 DMA_TO_DEVICE);
951 680
952 if (t->rx_buf) 681 dma_unmap_single(NULL, t->rx_dma, rx_buf_count,
953 wait_for_completion_interruptible( 682 DMA_FROM_DEVICE);
954 &davinci_spi_dma->dma_rx_completion);
955 683
956 dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE); 684 clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
685 }
957 686
958 if (t->rx_buf) 687 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
959 dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE); 688 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
960 689
961 /* 690 /*
962 * Check for bit error, desync error,parity error,timeout error and 691 * Check for bit error, desync error,parity error,timeout error and
963 * receive overflow errors 692 * receive overflow errors
964 */ 693 */
965 int_status = ioread32(davinci_spi->base + SPIFLG); 694 if (errors) {
966 695 ret = davinci_spi_check_error(dspi, errors);
967 ret = davinci_spi_check_error(davinci_spi, int_status); 696 WARN(!ret, "%s: error reported but no error found!\n",
968 if (ret != 0) 697 dev_name(&spi->dev));
969 return ret; 698 return ret;
699 }
970 700
971 /* SPI Framework maintains the count only in bytes so convert back */ 701 if (dspi->rcount != 0 || dspi->wcount != 0) {
972 davinci_spi->count *= conv; 702 dev_err(sdev, "SPI data transfer error\n");
703 return -EIO;
704 }
973 705
974 return t->len; 706 return t->len;
975} 707}
976 708
977/** 709/**
978 * davinci_spi_irq - IRQ handler for DaVinci SPI 710 * davinci_spi_irq - Interrupt handler for SPI Master Controller
979 * @irq: IRQ number for this SPI Master 711 * @irq: IRQ number for this SPI Master
980 * @context_data: structure for SPI Master controller davinci_spi 712 * @context_data: structure for SPI Master controller davinci_spi
713 *
714 * ISR will determine that interrupt arrives either for READ or WRITE command.
715 * According to command it will do the appropriate action. It will check
716 * transfer length and if it is not zero then dispatch transfer command again.
717 * If transfer length is zero then it will indicate the COMPLETION so that
718 * davinci_spi_bufs function can go ahead.
981 */ 719 */
982static irqreturn_t davinci_spi_irq(s32 irq, void *context_data) 720static irqreturn_t davinci_spi_irq(s32 irq, void *data)
983{ 721{
984 struct davinci_spi *davinci_spi = context_data; 722 struct davinci_spi *dspi = data;
985 u32 int_status, rx_data = 0; 723 int status;
986 irqreturn_t ret = IRQ_NONE;
987 724
988 int_status = ioread32(davinci_spi->base + SPIFLG); 725 status = davinci_spi_process_events(dspi);
726 if (unlikely(status != 0))
727 clear_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
989 728
990 while ((int_status & SPIFLG_RX_INTR_MASK)) { 729 if ((!dspi->rcount && !dspi->wcount) || status)
991 if (likely(int_status & SPIFLG_RX_INTR_MASK)) { 730 complete(&dspi->done);
992 ret = IRQ_HANDLED;
993 731
994 rx_data = ioread32(davinci_spi->base + SPIBUF); 732 return IRQ_HANDLED;
995 davinci_spi->get_rx(rx_data, davinci_spi); 733}
996 734
997 /* Disable Receive Interrupt */ 735static int davinci_spi_request_dma(struct davinci_spi *dspi)
998 iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR), 736{
999 davinci_spi->base + SPIINT); 737 int r;
1000 } else 738 struct davinci_spi_dma *dma = &dspi->dma;
1001 (void)davinci_spi_check_error(davinci_spi, int_status);
1002 739
1003 int_status = ioread32(davinci_spi->base + SPIFLG); 740 r = edma_alloc_channel(dma->rx_channel, davinci_spi_dma_callback, dspi,
741 dma->eventq);
742 if (r < 0) {
743 pr_err("Unable to request DMA channel for SPI RX\n");
744 r = -EAGAIN;
745 goto rx_dma_failed;
1004 } 746 }
1005 747
1006 return ret; 748 r = edma_alloc_channel(dma->tx_channel, davinci_spi_dma_callback, dspi,
749 dma->eventq);
750 if (r < 0) {
751 pr_err("Unable to request DMA channel for SPI TX\n");
752 r = -EAGAIN;
753 goto tx_dma_failed;
754 }
755
756 r = edma_alloc_slot(EDMA_CTLR(dma->tx_channel), EDMA_SLOT_ANY);
757 if (r < 0) {
758 pr_err("Unable to request SPI TX DMA param slot\n");
759 r = -EAGAIN;
760 goto param_failed;
761 }
762 dma->dummy_param_slot = r;
763 edma_link(dma->dummy_param_slot, dma->dummy_param_slot);
764
765 return 0;
766param_failed:
767 edma_free_channel(dma->tx_channel);
768tx_dma_failed:
769 edma_free_channel(dma->rx_channel);
770rx_dma_failed:
771 return r;
1007} 772}
1008 773
1009/** 774/**
1010 * davinci_spi_probe - probe function for SPI Master Controller 775 * davinci_spi_probe - probe function for SPI Master Controller
1011 * @pdev: platform_device structure which contains plateform specific data 776 * @pdev: platform_device structure which contains plateform specific data
777 *
778 * According to Linux Device Model this function will be invoked by Linux
779 * with platform_device struct which contains the device specific info.
780 * This function will map the SPI controller's memory, register IRQ,
781 * Reset SPI controller and setting its registers to default value.
782 * It will invoke spi_bitbang_start to create work queue so that client driver
783 * can register transfer method to work queue.
1012 */ 784 */
1013static int davinci_spi_probe(struct platform_device *pdev) 785static int davinci_spi_probe(struct platform_device *pdev)
1014{ 786{
1015 struct spi_master *master; 787 struct spi_master *master;
1016 struct davinci_spi *davinci_spi; 788 struct davinci_spi *dspi;
1017 struct davinci_spi_platform_data *pdata; 789 struct davinci_spi_platform_data *pdata;
1018 struct resource *r, *mem; 790 struct resource *r, *mem;
1019 resource_size_t dma_rx_chan = SPI_NO_RESOURCE; 791 resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
1020 resource_size_t dma_tx_chan = SPI_NO_RESOURCE; 792 resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
1021 resource_size_t dma_eventq = SPI_NO_RESOURCE; 793 resource_size_t dma_eventq = SPI_NO_RESOURCE;
1022 int i = 0, ret = 0; 794 int i = 0, ret = 0;
795 u32 spipc0;
1023 796
1024 pdata = pdev->dev.platform_data; 797 pdata = pdev->dev.platform_data;
1025 if (pdata == NULL) { 798 if (pdata == NULL) {
@@ -1035,8 +808,8 @@ static int davinci_spi_probe(struct platform_device *pdev)
1035 808
1036 dev_set_drvdata(&pdev->dev, master); 809 dev_set_drvdata(&pdev->dev, master);
1037 810
1038 davinci_spi = spi_master_get_devdata(master); 811 dspi = spi_master_get_devdata(master);
1039 if (davinci_spi == NULL) { 812 if (dspi == NULL) {
1040 ret = -ENOENT; 813 ret = -ENOENT;
1041 goto free_master; 814 goto free_master;
1042 } 815 }
@@ -1047,164 +820,143 @@ static int davinci_spi_probe(struct platform_device *pdev)
1047 goto free_master; 820 goto free_master;
1048 } 821 }
1049 822
1050 davinci_spi->pbase = r->start; 823 dspi->pbase = r->start;
1051 davinci_spi->region_size = resource_size(r); 824 dspi->pdata = pdata;
1052 davinci_spi->pdata = pdata;
1053 825
1054 mem = request_mem_region(r->start, davinci_spi->region_size, 826 mem = request_mem_region(r->start, resource_size(r), pdev->name);
1055 pdev->name);
1056 if (mem == NULL) { 827 if (mem == NULL) {
1057 ret = -EBUSY; 828 ret = -EBUSY;
1058 goto free_master; 829 goto free_master;
1059 } 830 }
1060 831
1061 davinci_spi->base = (struct davinci_spi_reg __iomem *) 832 dspi->base = ioremap(r->start, resource_size(r));
1062 ioremap(r->start, davinci_spi->region_size); 833 if (dspi->base == NULL) {
1063 if (davinci_spi->base == NULL) {
1064 ret = -ENOMEM; 834 ret = -ENOMEM;
1065 goto release_region; 835 goto release_region;
1066 } 836 }
1067 837
1068 davinci_spi->irq = platform_get_irq(pdev, 0); 838 dspi->irq = platform_get_irq(pdev, 0);
1069 if (davinci_spi->irq <= 0) { 839 if (dspi->irq <= 0) {
1070 ret = -EINVAL; 840 ret = -EINVAL;
1071 goto unmap_io; 841 goto unmap_io;
1072 } 842 }
1073 843
1074 ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED, 844 ret = request_irq(dspi->irq, davinci_spi_irq, 0, dev_name(&pdev->dev),
1075 dev_name(&pdev->dev), davinci_spi); 845 dspi);
1076 if (ret) 846 if (ret)
1077 goto unmap_io; 847 goto unmap_io;
1078 848
1079 /* Allocate tmp_buf for tx_buf */ 849 dspi->bitbang.master = spi_master_get(master);
1080 davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL); 850 if (dspi->bitbang.master == NULL) {
1081 if (davinci_spi->tmp_buf == NULL) {
1082 ret = -ENOMEM;
1083 goto irq_free;
1084 }
1085
1086 davinci_spi->bitbang.master = spi_master_get(master);
1087 if (davinci_spi->bitbang.master == NULL) {
1088 ret = -ENODEV; 851 ret = -ENODEV;
1089 goto free_tmp_buf; 852 goto irq_free;
1090 } 853 }
1091 854
1092 davinci_spi->clk = clk_get(&pdev->dev, NULL); 855 dspi->clk = clk_get(&pdev->dev, NULL);
1093 if (IS_ERR(davinci_spi->clk)) { 856 if (IS_ERR(dspi->clk)) {
1094 ret = -ENODEV; 857 ret = -ENODEV;
1095 goto put_master; 858 goto put_master;
1096 } 859 }
1097 clk_enable(davinci_spi->clk); 860 clk_enable(dspi->clk);
1098
1099 861
1100 master->bus_num = pdev->id; 862 master->bus_num = pdev->id;
1101 master->num_chipselect = pdata->num_chipselect; 863 master->num_chipselect = pdata->num_chipselect;
1102 master->setup = davinci_spi_setup; 864 master->setup = davinci_spi_setup;
1103 master->cleanup = davinci_spi_cleanup;
1104
1105 davinci_spi->bitbang.chipselect = davinci_spi_chipselect;
1106 davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer;
1107
1108 davinci_spi->version = pdata->version;
1109 use_dma = pdata->use_dma;
1110
1111 davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
1112 if (davinci_spi->version == SPI_VERSION_2)
1113 davinci_spi->bitbang.flags |= SPI_READY;
1114
1115 if (use_dma) {
1116 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1117 if (r)
1118 dma_rx_chan = r->start;
1119 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1120 if (r)
1121 dma_tx_chan = r->start;
1122 r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
1123 if (r)
1124 dma_eventq = r->start;
1125 }
1126 865
1127 if (!use_dma || 866 dspi->bitbang.chipselect = davinci_spi_chipselect;
1128 dma_rx_chan == SPI_NO_RESOURCE || 867 dspi->bitbang.setup_transfer = davinci_spi_setup_transfer;
1129 dma_tx_chan == SPI_NO_RESOURCE || 868
1130 dma_eventq == SPI_NO_RESOURCE) { 869 dspi->version = pdata->version;
1131 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio; 870
1132 use_dma = 0; 871 dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
1133 } else { 872 if (dspi->version == SPI_VERSION_2)
1134 davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma; 873 dspi->bitbang.flags |= SPI_READY;
1135 davinci_spi->dma_channels = kzalloc(master->num_chipselect 874
1136 * sizeof(struct davinci_spi_dma), GFP_KERNEL); 875 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1137 if (davinci_spi->dma_channels == NULL) { 876 if (r)
1138 ret = -ENOMEM; 877 dma_rx_chan = r->start;
878 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
879 if (r)
880 dma_tx_chan = r->start;
881 r = platform_get_resource(pdev, IORESOURCE_DMA, 2);
882 if (r)
883 dma_eventq = r->start;
884
885 dspi->bitbang.txrx_bufs = davinci_spi_bufs;
886 if (dma_rx_chan != SPI_NO_RESOURCE &&
887 dma_tx_chan != SPI_NO_RESOURCE &&
888 dma_eventq != SPI_NO_RESOURCE) {
889 dspi->dma.rx_channel = dma_rx_chan;
890 dspi->dma.tx_channel = dma_tx_chan;
891 dspi->dma.eventq = dma_eventq;
892
893 ret = davinci_spi_request_dma(dspi);
894 if (ret)
1139 goto free_clk; 895 goto free_clk;
1140 }
1141 896
1142 for (i = 0; i < master->num_chipselect; i++) { 897 dev_info(&pdev->dev, "DMA: supported\n");
1143 davinci_spi->dma_channels[i].dma_rx_channel = -1; 898 dev_info(&pdev->dev, "DMA: RX channel: %d, TX channel: %d, "
1144 davinci_spi->dma_channels[i].dma_rx_sync_dev = 899 "event queue: %d\n", dma_rx_chan, dma_tx_chan,
1145 dma_rx_chan;
1146 davinci_spi->dma_channels[i].dma_tx_channel = -1;
1147 davinci_spi->dma_channels[i].dma_tx_sync_dev =
1148 dma_tx_chan;
1149 davinci_spi->dma_channels[i].eventq = dma_eventq;
1150 }
1151 dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n"
1152 "Using RX channel = %d , TX channel = %d and "
1153 "event queue = %d", dma_rx_chan, dma_tx_chan,
1154 dma_eventq); 900 dma_eventq);
1155 } 901 }
1156 902
1157 davinci_spi->get_rx = davinci_spi_rx_buf_u8; 903 dspi->get_rx = davinci_spi_rx_buf_u8;
1158 davinci_spi->get_tx = davinci_spi_tx_buf_u8; 904 dspi->get_tx = davinci_spi_tx_buf_u8;
1159 905
1160 init_completion(&davinci_spi->done); 906 init_completion(&dspi->done);
1161 907
1162 /* Reset In/OUT SPI module */ 908 /* Reset In/OUT SPI module */
1163 iowrite32(0, davinci_spi->base + SPIGCR0); 909 iowrite32(0, dspi->base + SPIGCR0);
1164 udelay(100); 910 udelay(100);
1165 iowrite32(1, davinci_spi->base + SPIGCR0); 911 iowrite32(1, dspi->base + SPIGCR0);
1166 912
1167 /* Clock internal */ 913 /* Set up SPIPC0. CS and ENA init is done in davinci_spi_setup */
1168 if (davinci_spi->pdata->clk_internal) 914 spipc0 = SPIPC0_DIFUN_MASK | SPIPC0_DOFUN_MASK | SPIPC0_CLKFUN_MASK;
1169 set_io_bits(davinci_spi->base + SPIGCR1, 915 iowrite32(spipc0, dspi->base + SPIPC0);
1170 SPIGCR1_CLKMOD_MASK);
1171 else
1172 clear_io_bits(davinci_spi->base + SPIGCR1,
1173 SPIGCR1_CLKMOD_MASK);
1174 916
1175 /* master mode default */ 917 /* initialize chip selects */
1176 set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK); 918 if (pdata->chip_sel) {
919 for (i = 0; i < pdata->num_chipselect; i++) {
920 if (pdata->chip_sel[i] != SPI_INTERN_CS)
921 gpio_direction_output(pdata->chip_sel[i], 1);
922 }
923 }
1177 924
1178 if (davinci_spi->pdata->intr_level) 925 if (pdata->intr_line)
1179 iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL); 926 iowrite32(SPI_INTLVL_1, dspi->base + SPILVL);
1180 else 927 else
1181 iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL); 928 iowrite32(SPI_INTLVL_0, dspi->base + SPILVL);
1182 929
1183 ret = spi_bitbang_start(&davinci_spi->bitbang); 930 iowrite32(CS_DEFAULT, dspi->base + SPIDEF);
1184 if (ret)
1185 goto free_clk;
1186 931
1187 dev_info(&pdev->dev, "Controller at 0x%p \n", davinci_spi->base); 932 /* master mode default */
933 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK);
934 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
935 set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
936
937 ret = spi_bitbang_start(&dspi->bitbang);
938 if (ret)
939 goto free_dma;
1188 940
1189 if (!pdata->poll_mode) 941 dev_info(&pdev->dev, "Controller at 0x%p\n", dspi->base);
1190 dev_info(&pdev->dev, "Operating in interrupt mode"
1191 " using IRQ %d\n", davinci_spi->irq);
1192 942
1193 return ret; 943 return ret;
1194 944
945free_dma:
946 edma_free_channel(dspi->dma.tx_channel);
947 edma_free_channel(dspi->dma.rx_channel);
948 edma_free_slot(dspi->dma.dummy_param_slot);
1195free_clk: 949free_clk:
1196 clk_disable(davinci_spi->clk); 950 clk_disable(dspi->clk);
1197 clk_put(davinci_spi->clk); 951 clk_put(dspi->clk);
1198put_master: 952put_master:
1199 spi_master_put(master); 953 spi_master_put(master);
1200free_tmp_buf:
1201 kfree(davinci_spi->tmp_buf);
1202irq_free: 954irq_free:
1203 free_irq(davinci_spi->irq, davinci_spi); 955 free_irq(dspi->irq, dspi);
1204unmap_io: 956unmap_io:
1205 iounmap(davinci_spi->base); 957 iounmap(dspi->base);
1206release_region: 958release_region:
1207 release_mem_region(davinci_spi->pbase, davinci_spi->region_size); 959 release_mem_region(dspi->pbase, resource_size(r));
1208free_master: 960free_master:
1209 kfree(master); 961 kfree(master);
1210err: 962err:
@@ -1222,27 +974,31 @@ err:
1222 */ 974 */
1223static int __exit davinci_spi_remove(struct platform_device *pdev) 975static int __exit davinci_spi_remove(struct platform_device *pdev)
1224{ 976{
1225 struct davinci_spi *davinci_spi; 977 struct davinci_spi *dspi;
1226 struct spi_master *master; 978 struct spi_master *master;
979 struct resource *r;
1227 980
1228 master = dev_get_drvdata(&pdev->dev); 981 master = dev_get_drvdata(&pdev->dev);
1229 davinci_spi = spi_master_get_devdata(master); 982 dspi = spi_master_get_devdata(master);
1230 983
1231 spi_bitbang_stop(&davinci_spi->bitbang); 984 spi_bitbang_stop(&dspi->bitbang);
1232 985
1233 clk_disable(davinci_spi->clk); 986 clk_disable(dspi->clk);
1234 clk_put(davinci_spi->clk); 987 clk_put(dspi->clk);
1235 spi_master_put(master); 988 spi_master_put(master);
1236 kfree(davinci_spi->tmp_buf); 989 free_irq(dspi->irq, dspi);
1237 free_irq(davinci_spi->irq, davinci_spi); 990 iounmap(dspi->base);
1238 iounmap(davinci_spi->base); 991 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1239 release_mem_region(davinci_spi->pbase, davinci_spi->region_size); 992 release_mem_region(dspi->pbase, resource_size(r));
1240 993
1241 return 0; 994 return 0;
1242} 995}
1243 996
1244static struct platform_driver davinci_spi_driver = { 997static struct platform_driver davinci_spi_driver = {
1245 .driver.name = "spi_davinci", 998 .driver = {
999 .name = "spi_davinci",
1000 .owner = THIS_MODULE,
1001 },
1246 .remove = __exit_p(davinci_spi_remove), 1002 .remove = __exit_p(davinci_spi_remove),
1247}; 1003};
1248 1004
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c
index 0838c79861e4..22af77f98816 100644
--- a/drivers/spi/dw_spi.c
+++ b/drivers/spi/dw_spi.c
@@ -164,20 +164,23 @@ static inline void mrst_spi_debugfs_remove(struct dw_spi *dws)
164 164
165static void wait_till_not_busy(struct dw_spi *dws) 165static void wait_till_not_busy(struct dw_spi *dws)
166{ 166{
167 unsigned long end = jiffies + 1 + usecs_to_jiffies(1000); 167 unsigned long end = jiffies + 1 + usecs_to_jiffies(5000);
168 168
169 while (time_before(jiffies, end)) { 169 while (time_before(jiffies, end)) {
170 if (!(dw_readw(dws, sr) & SR_BUSY)) 170 if (!(dw_readw(dws, sr) & SR_BUSY))
171 return; 171 return;
172 cpu_relax();
172 } 173 }
173 dev_err(&dws->master->dev, 174 dev_err(&dws->master->dev,
174 "DW SPI: Status keeps busy for 1000us after a read/write!\n"); 175 "DW SPI: Status keeps busy for 5000us after a read/write!\n");
175} 176}
176 177
177static void flush(struct dw_spi *dws) 178static void flush(struct dw_spi *dws)
178{ 179{
179 while (dw_readw(dws, sr) & SR_RF_NOT_EMPT) 180 while (dw_readw(dws, sr) & SR_RF_NOT_EMPT) {
180 dw_readw(dws, dr); 181 dw_readw(dws, dr);
182 cpu_relax();
183 }
181 184
182 wait_till_not_busy(dws); 185 wait_till_not_busy(dws);
183} 186}
@@ -285,8 +288,10 @@ static void *next_transfer(struct dw_spi *dws)
285 */ 288 */
286static int map_dma_buffers(struct dw_spi *dws) 289static int map_dma_buffers(struct dw_spi *dws)
287{ 290{
288 if (!dws->cur_msg->is_dma_mapped || !dws->dma_inited 291 if (!dws->cur_msg->is_dma_mapped
289 || !dws->cur_chip->enable_dma) 292 || !dws->dma_inited
293 || !dws->cur_chip->enable_dma
294 || !dws->dma_ops)
290 return 0; 295 return 0;
291 296
292 if (dws->cur_transfer->tx_dma) 297 if (dws->cur_transfer->tx_dma)
@@ -338,7 +343,7 @@ static void int_error_stop(struct dw_spi *dws, const char *msg)
338 tasklet_schedule(&dws->pump_transfers); 343 tasklet_schedule(&dws->pump_transfers);
339} 344}
340 345
341static void transfer_complete(struct dw_spi *dws) 346void dw_spi_xfer_done(struct dw_spi *dws)
342{ 347{
343 /* Update total byte transfered return count actual bytes read */ 348 /* Update total byte transfered return count actual bytes read */
344 dws->cur_msg->actual_length += dws->len; 349 dws->cur_msg->actual_length += dws->len;
@@ -353,6 +358,7 @@ static void transfer_complete(struct dw_spi *dws)
353 } else 358 } else
354 tasklet_schedule(&dws->pump_transfers); 359 tasklet_schedule(&dws->pump_transfers);
355} 360}
361EXPORT_SYMBOL_GPL(dw_spi_xfer_done);
356 362
357static irqreturn_t interrupt_transfer(struct dw_spi *dws) 363static irqreturn_t interrupt_transfer(struct dw_spi *dws)
358{ 364{
@@ -384,7 +390,7 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws)
384 if (dws->tx_end > dws->tx) 390 if (dws->tx_end > dws->tx)
385 spi_umask_intr(dws, SPI_INT_TXEI); 391 spi_umask_intr(dws, SPI_INT_TXEI);
386 else 392 else
387 transfer_complete(dws); 393 dw_spi_xfer_done(dws);
388 } 394 }
389 395
390 return IRQ_HANDLED; 396 return IRQ_HANDLED;
@@ -419,11 +425,7 @@ static void poll_transfer(struct dw_spi *dws)
419 */ 425 */
420 dws->read(dws); 426 dws->read(dws);
421 427
422 transfer_complete(dws); 428 dw_spi_xfer_done(dws);
423}
424
425static void dma_transfer(struct dw_spi *dws, int cs_change)
426{
427} 429}
428 430
429static void pump_transfers(unsigned long data) 431static void pump_transfers(unsigned long data)
@@ -592,7 +594,7 @@ static void pump_transfers(unsigned long data)
592 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); 594 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
593 spi_chip_sel(dws, spi->chip_select); 595 spi_chip_sel(dws, spi->chip_select);
594 596
595 /* Set the interrupt mask, for poll mode just diable all int */ 597 /* Set the interrupt mask, for poll mode just disable all int */
596 spi_mask_intr(dws, 0xff); 598 spi_mask_intr(dws, 0xff);
597 if (imask) 599 if (imask)
598 spi_umask_intr(dws, imask); 600 spi_umask_intr(dws, imask);
@@ -605,7 +607,7 @@ static void pump_transfers(unsigned long data)
605 } 607 }
606 608
607 if (dws->dma_mapped) 609 if (dws->dma_mapped)
608 dma_transfer(dws, cs_change); 610 dws->dma_ops->dma_transfer(dws, cs_change);
609 611
610 if (chip->poll_mode) 612 if (chip->poll_mode)
611 poll_transfer(dws); 613 poll_transfer(dws);
@@ -901,11 +903,17 @@ int __devinit dw_spi_add_host(struct dw_spi *dws)
901 master->setup = dw_spi_setup; 903 master->setup = dw_spi_setup;
902 master->transfer = dw_spi_transfer; 904 master->transfer = dw_spi_transfer;
903 905
904 dws->dma_inited = 0;
905
906 /* Basic HW init */ 906 /* Basic HW init */
907 spi_hw_init(dws); 907 spi_hw_init(dws);
908 908
909 if (dws->dma_ops && dws->dma_ops->dma_init) {
910 ret = dws->dma_ops->dma_init(dws);
911 if (ret) {
912 dev_warn(&master->dev, "DMA init failed\n");
913 dws->dma_inited = 0;
914 }
915 }
916
909 /* Initial and start queue */ 917 /* Initial and start queue */
910 ret = init_queue(dws); 918 ret = init_queue(dws);
911 if (ret) { 919 if (ret) {
@@ -930,6 +938,8 @@ int __devinit dw_spi_add_host(struct dw_spi *dws)
930 938
931err_queue_alloc: 939err_queue_alloc:
932 destroy_queue(dws); 940 destroy_queue(dws);
941 if (dws->dma_ops && dws->dma_ops->dma_exit)
942 dws->dma_ops->dma_exit(dws);
933err_diable_hw: 943err_diable_hw:
934 spi_enable_chip(dws, 0); 944 spi_enable_chip(dws, 0);
935 free_irq(dws->irq, dws); 945 free_irq(dws->irq, dws);
@@ -938,7 +948,7 @@ err_free_master:
938exit: 948exit:
939 return ret; 949 return ret;
940} 950}
941EXPORT_SYMBOL(dw_spi_add_host); 951EXPORT_SYMBOL_GPL(dw_spi_add_host);
942 952
943void __devexit dw_spi_remove_host(struct dw_spi *dws) 953void __devexit dw_spi_remove_host(struct dw_spi *dws)
944{ 954{
@@ -954,6 +964,8 @@ void __devexit dw_spi_remove_host(struct dw_spi *dws)
954 dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not " 964 dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not "
955 "complete, message memory not freed\n"); 965 "complete, message memory not freed\n");
956 966
967 if (dws->dma_ops && dws->dma_ops->dma_exit)
968 dws->dma_ops->dma_exit(dws);
957 spi_enable_chip(dws, 0); 969 spi_enable_chip(dws, 0);
958 /* Disable clk */ 970 /* Disable clk */
959 spi_set_clk(dws, 0); 971 spi_set_clk(dws, 0);
@@ -962,7 +974,7 @@ void __devexit dw_spi_remove_host(struct dw_spi *dws)
962 /* Disconnect from the SPI framework */ 974 /* Disconnect from the SPI framework */
963 spi_unregister_master(dws->master); 975 spi_unregister_master(dws->master);
964} 976}
965EXPORT_SYMBOL(dw_spi_remove_host); 977EXPORT_SYMBOL_GPL(dw_spi_remove_host);
966 978
967int dw_spi_suspend_host(struct dw_spi *dws) 979int dw_spi_suspend_host(struct dw_spi *dws)
968{ 980{
@@ -975,7 +987,7 @@ int dw_spi_suspend_host(struct dw_spi *dws)
975 spi_set_clk(dws, 0); 987 spi_set_clk(dws, 0);
976 return ret; 988 return ret;
977} 989}
978EXPORT_SYMBOL(dw_spi_suspend_host); 990EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
979 991
980int dw_spi_resume_host(struct dw_spi *dws) 992int dw_spi_resume_host(struct dw_spi *dws)
981{ 993{
@@ -987,7 +999,7 @@ int dw_spi_resume_host(struct dw_spi *dws)
987 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); 999 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
988 return ret; 1000 return ret;
989} 1001}
990EXPORT_SYMBOL(dw_spi_resume_host); 1002EXPORT_SYMBOL_GPL(dw_spi_resume_host);
991 1003
992MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>"); 1004MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
993MODULE_DESCRIPTION("Driver for DesignWare SPI controller core"); 1005MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
diff --git a/drivers/spi/dw_spi_mid.c b/drivers/spi/dw_spi_mid.c
new file mode 100644
index 000000000000..c91c966e0717
--- /dev/null
+++ b/drivers/spi/dw_spi_mid.c
@@ -0,0 +1,223 @@
1/*
2 * dw_spi_mid.c - special handling for DW core on Intel MID platform
3 *
4 * Copyright (c) 2009, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#include <linux/dma-mapping.h>
21#include <linux/dmaengine.h>
22#include <linux/interrupt.h>
23#include <linux/slab.h>
24#include <linux/spi/spi.h>
25#include <linux/spi/dw_spi.h>
26
27#ifdef CONFIG_SPI_DW_MID_DMA
28#include <linux/intel_mid_dma.h>
29#include <linux/pci.h>
30
31struct mid_dma {
32 struct intel_mid_dma_slave dmas_tx;
33 struct intel_mid_dma_slave dmas_rx;
34};
35
36static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param)
37{
38 struct dw_spi *dws = param;
39
40 return dws->dmac && (&dws->dmac->dev == chan->device->dev);
41}
42
43static int mid_spi_dma_init(struct dw_spi *dws)
44{
45 struct mid_dma *dw_dma = dws->dma_priv;
46 struct intel_mid_dma_slave *rxs, *txs;
47 dma_cap_mask_t mask;
48
49 /*
50 * Get pci device for DMA controller, currently it could only
51 * be the DMA controller of either Moorestown or Medfield
52 */
53 dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0813, NULL);
54 if (!dws->dmac)
55 dws->dmac = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
56
57 dma_cap_zero(mask);
58 dma_cap_set(DMA_SLAVE, mask);
59
60 /* 1. Init rx channel */
61 dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws);
62 if (!dws->rxchan)
63 goto err_exit;
64 rxs = &dw_dma->dmas_rx;
65 rxs->hs_mode = LNW_DMA_HW_HS;
66 rxs->cfg_mode = LNW_DMA_PER_TO_MEM;
67 dws->rxchan->private = rxs;
68
69 /* 2. Init tx channel */
70 dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, dws);
71 if (!dws->txchan)
72 goto free_rxchan;
73 txs = &dw_dma->dmas_tx;
74 txs->hs_mode = LNW_DMA_HW_HS;
75 txs->cfg_mode = LNW_DMA_MEM_TO_PER;
76 dws->txchan->private = txs;
77
78 dws->dma_inited = 1;
79 return 0;
80
81free_rxchan:
82 dma_release_channel(dws->rxchan);
83err_exit:
84 return -1;
85
86}
87
88static void mid_spi_dma_exit(struct dw_spi *dws)
89{
90 dma_release_channel(dws->txchan);
91 dma_release_channel(dws->rxchan);
92}
93
94/*
95 * dws->dma_chan_done is cleared before the dma transfer starts,
96 * callback for rx/tx channel will each increment it by 1.
97 * Reaching 2 means the whole spi transaction is done.
98 */
99static void dw_spi_dma_done(void *arg)
100{
101 struct dw_spi *dws = arg;
102
103 if (++dws->dma_chan_done != 2)
104 return;
105 dw_spi_xfer_done(dws);
106}
107
108static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
109{
110 struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
111 struct dma_chan *txchan, *rxchan;
112 struct dma_slave_config txconf, rxconf;
113 u16 dma_ctrl = 0;
114
115 /* 1. setup DMA related registers */
116 if (cs_change) {
117 spi_enable_chip(dws, 0);
118 dw_writew(dws, dmardlr, 0xf);
119 dw_writew(dws, dmatdlr, 0x10);
120 if (dws->tx_dma)
121 dma_ctrl |= 0x2;
122 if (dws->rx_dma)
123 dma_ctrl |= 0x1;
124 dw_writew(dws, dmacr, dma_ctrl);
125 spi_enable_chip(dws, 1);
126 }
127
128 dws->dma_chan_done = 0;
129 txchan = dws->txchan;
130 rxchan = dws->rxchan;
131
132 /* 2. Prepare the TX dma transfer */
133 txconf.direction = DMA_TO_DEVICE;
134 txconf.dst_addr = dws->dma_addr;
135 txconf.dst_maxburst = LNW_DMA_MSIZE_16;
136 txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
137 txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
138
139 txchan->device->device_control(txchan, DMA_SLAVE_CONFIG,
140 (unsigned long) &txconf);
141
142 memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl));
143 dws->tx_sgl.dma_address = dws->tx_dma;
144 dws->tx_sgl.length = dws->len;
145
146 txdesc = txchan->device->device_prep_slave_sg(txchan,
147 &dws->tx_sgl,
148 1,
149 DMA_TO_DEVICE,
150 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
151 txdesc->callback = dw_spi_dma_done;
152 txdesc->callback_param = dws;
153
154 /* 3. Prepare the RX dma transfer */
155 rxconf.direction = DMA_FROM_DEVICE;
156 rxconf.src_addr = dws->dma_addr;
157 rxconf.src_maxburst = LNW_DMA_MSIZE_16;
158 rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
159 rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
160
161 rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG,
162 (unsigned long) &rxconf);
163
164 memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl));
165 dws->rx_sgl.dma_address = dws->rx_dma;
166 dws->rx_sgl.length = dws->len;
167
168 rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
169 &dws->rx_sgl,
170 1,
171 DMA_FROM_DEVICE,
172 DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
173 rxdesc->callback = dw_spi_dma_done;
174 rxdesc->callback_param = dws;
175
176 /* rx must be started before tx due to spi instinct */
177 rxdesc->tx_submit(rxdesc);
178 txdesc->tx_submit(txdesc);
179 return 0;
180}
181
182static struct dw_spi_dma_ops mid_dma_ops = {
183 .dma_init = mid_spi_dma_init,
184 .dma_exit = mid_spi_dma_exit,
185 .dma_transfer = mid_spi_dma_transfer,
186};
187#endif
188
189/* Some specific info for SPI0 controller on Moorestown */
190
191/* HW info for MRST CLk Control Unit, one 32b reg */
192#define MRST_SPI_CLK_BASE 100000000 /* 100m */
193#define MRST_CLK_SPI0_REG 0xff11d86c
194#define CLK_SPI_BDIV_OFFSET 0
195#define CLK_SPI_BDIV_MASK 0x00000007
196#define CLK_SPI_CDIV_OFFSET 9
197#define CLK_SPI_CDIV_MASK 0x00000e00
198#define CLK_SPI_DISABLE_OFFSET 8
199
200int dw_spi_mid_init(struct dw_spi *dws)
201{
202 u32 *clk_reg, clk_cdiv;
203
204 clk_reg = ioremap_nocache(MRST_CLK_SPI0_REG, 16);
205 if (!clk_reg)
206 return -ENOMEM;
207
208 /* get SPI controller operating freq info */
209 clk_cdiv = (readl(clk_reg) & CLK_SPI_CDIV_MASK) >> CLK_SPI_CDIV_OFFSET;
210 dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
211 iounmap(clk_reg);
212
213 dws->num_cs = 16;
214 dws->fifo_len = 40; /* FIFO has 40 words buffer */
215
216#ifdef CONFIG_SPI_DW_MID_DMA
217 dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
218 if (!dws->dma_priv)
219 return -ENOMEM;
220 dws->dma_ops = &mid_dma_ops;
221#endif
222 return 0;
223}
diff --git a/drivers/spi/dw_spi_pci.c b/drivers/spi/dw_spi_pci.c
index 1f52755dc878..49ec3aa1219f 100644
--- a/drivers/spi/dw_spi_pci.c
+++ b/drivers/spi/dw_spi_pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * mrst_spi_pci.c - PCI interface driver for DW SPI Core 2 * dw_spi_pci.c - PCI interface driver for DW SPI Core
3 * 3 *
4 * Copyright (c) 2009, Intel Corporation. 4 * Copyright (c) 2009, Intel Corporation.
5 * 5 *
@@ -26,8 +26,8 @@
26#define DRIVER_NAME "dw_spi_pci" 26#define DRIVER_NAME "dw_spi_pci"
27 27
28struct dw_spi_pci { 28struct dw_spi_pci {
29 struct pci_dev *pdev; 29 struct pci_dev *pdev;
30 struct dw_spi dws; 30 struct dw_spi dws;
31}; 31};
32 32
33static int __devinit spi_pci_probe(struct pci_dev *pdev, 33static int __devinit spi_pci_probe(struct pci_dev *pdev,
@@ -72,9 +72,17 @@ static int __devinit spi_pci_probe(struct pci_dev *pdev,
72 dws->parent_dev = &pdev->dev; 72 dws->parent_dev = &pdev->dev;
73 dws->bus_num = 0; 73 dws->bus_num = 0;
74 dws->num_cs = 4; 74 dws->num_cs = 4;
75 dws->max_freq = 25000000; /* for Moorestwon */
76 dws->irq = pdev->irq; 75 dws->irq = pdev->irq;
77 dws->fifo_len = 40; /* FIFO has 40 words buffer */ 76
77 /*
78 * Specific handling for Intel MID paltforms, like dma setup,
79 * clock rate, FIFO depth.
80 */
81 if (pdev->device == 0x0800) {
82 ret = dw_spi_mid_init(dws);
83 if (ret)
84 goto err_unmap;
85 }
78 86
79 ret = dw_spi_add_host(dws); 87 ret = dw_spi_add_host(dws);
80 if (ret) 88 if (ret)
@@ -140,7 +148,7 @@ static int spi_resume(struct pci_dev *pdev)
140#endif 148#endif
141 149
142static const struct pci_device_id pci_ids[] __devinitdata = { 150static const struct pci_device_id pci_ids[] __devinitdata = {
143 /* Intel Moorestown platform SPI controller 0 */ 151 /* Intel MID platform SPI controller 0 */
144 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) }, 152 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
145 {}, 153 {},
146}; 154};
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c
index 983fbbfce76e..8a904c1c8485 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/mpc52xx_psc_spi.c
@@ -363,7 +363,7 @@ static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id)
363} 363}
364 364
365/* bus_num is used only for the case dev->platform_data == NULL */ 365/* bus_num is used only for the case dev->platform_data == NULL */
366static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, 366static int __devinit mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
367 u32 size, unsigned int irq, s16 bus_num) 367 u32 size, unsigned int irq, s16 bus_num)
368{ 368{
369 struct fsl_spi_platform_data *pdata = dev->platform_data; 369 struct fsl_spi_platform_data *pdata = dev->platform_data;
@@ -450,22 +450,7 @@ free_master:
450 return ret; 450 return ret;
451} 451}
452 452
453static int __exit mpc52xx_psc_spi_do_remove(struct device *dev) 453static int __devinit mpc52xx_psc_spi_of_probe(struct platform_device *op,
454{
455 struct spi_master *master = dev_get_drvdata(dev);
456 struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master);
457
458 flush_workqueue(mps->workqueue);
459 destroy_workqueue(mps->workqueue);
460 spi_unregister_master(master);
461 free_irq(mps->irq, mps);
462 if (mps->psc)
463 iounmap(mps->psc);
464
465 return 0;
466}
467
468static int __init mpc52xx_psc_spi_of_probe(struct platform_device *op,
469 const struct of_device_id *match) 454 const struct of_device_id *match)
470{ 455{
471 const u32 *regaddr_p; 456 const u32 *regaddr_p;
@@ -495,9 +480,19 @@ static int __init mpc52xx_psc_spi_of_probe(struct platform_device *op,
495 irq_of_parse_and_map(op->dev.of_node, 0), id); 480 irq_of_parse_and_map(op->dev.of_node, 0), id);
496} 481}
497 482
498static int __exit mpc52xx_psc_spi_of_remove(struct platform_device *op) 483static int __devexit mpc52xx_psc_spi_of_remove(struct platform_device *op)
499{ 484{
500 return mpc52xx_psc_spi_do_remove(&op->dev); 485 struct spi_master *master = dev_get_drvdata(&op->dev);
486 struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master);
487
488 flush_workqueue(mps->workqueue);
489 destroy_workqueue(mps->workqueue);
490 spi_unregister_master(master);
491 free_irq(mps->irq, mps);
492 if (mps->psc)
493 iounmap(mps->psc);
494
495 return 0;
501} 496}
502 497
503static const struct of_device_id mpc52xx_psc_spi_of_match[] = { 498static const struct of_device_id mpc52xx_psc_spi_of_match[] = {
@@ -510,7 +505,7 @@ MODULE_DEVICE_TABLE(of, mpc52xx_psc_spi_of_match);
510 505
511static struct of_platform_driver mpc52xx_psc_spi_of_driver = { 506static struct of_platform_driver mpc52xx_psc_spi_of_driver = {
512 .probe = mpc52xx_psc_spi_of_probe, 507 .probe = mpc52xx_psc_spi_of_probe,
513 .remove = __exit_p(mpc52xx_psc_spi_of_remove), 508 .remove = __devexit_p(mpc52xx_psc_spi_of_remove),
514 .driver = { 509 .driver = {
515 .name = "mpc52xx-psc-spi", 510 .name = "mpc52xx-psc-spi",
516 .owner = THIS_MODULE, 511 .owner = THIS_MODULE,
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index 951a160fc27f..abb1ffbf3d20 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -397,7 +397,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
397 397
398 if (tx != NULL) { 398 if (tx != NULL) {
399 wait_for_completion(&mcspi_dma->dma_tx_completion); 399 wait_for_completion(&mcspi_dma->dma_tx_completion);
400 dma_unmap_single(NULL, xfer->tx_dma, count, DMA_TO_DEVICE); 400 dma_unmap_single(&spi->dev, xfer->tx_dma, count, DMA_TO_DEVICE);
401 401
402 /* for TX_ONLY mode, be sure all words have shifted out */ 402 /* for TX_ONLY mode, be sure all words have shifted out */
403 if (rx == NULL) { 403 if (rx == NULL) {
@@ -412,7 +412,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
412 412
413 if (rx != NULL) { 413 if (rx != NULL) {
414 wait_for_completion(&mcspi_dma->dma_rx_completion); 414 wait_for_completion(&mcspi_dma->dma_rx_completion);
415 dma_unmap_single(NULL, xfer->rx_dma, count, DMA_FROM_DEVICE); 415 dma_unmap_single(&spi->dev, xfer->rx_dma, count, DMA_FROM_DEVICE);
416 omap2_mcspi_set_enable(spi, 0); 416 omap2_mcspi_set_enable(spi, 0);
417 417
418 if (l & OMAP2_MCSPI_CHCONF_TURBO) { 418 if (l & OMAP2_MCSPI_CHCONF_TURBO) {
@@ -1025,11 +1025,6 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
1025 if (m->is_dma_mapped || len < DMA_MIN_BYTES) 1025 if (m->is_dma_mapped || len < DMA_MIN_BYTES)
1026 continue; 1026 continue;
1027 1027
1028 /* Do DMA mapping "early" for better error reporting and
1029 * dcache use. Note that if dma_unmap_single() ever starts
1030 * to do real work on ARM, we'd need to clean up mappings
1031 * for previous transfers on *ALL* exits of this loop...
1032 */
1033 if (tx_buf != NULL) { 1028 if (tx_buf != NULL) {
1034 t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf, 1029 t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf,
1035 len, DMA_TO_DEVICE); 1030 len, DMA_TO_DEVICE);
@@ -1046,7 +1041,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
1046 dev_dbg(&spi->dev, "dma %cX %d bytes error\n", 1041 dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
1047 'R', len); 1042 'R', len);
1048 if (tx_buf != NULL) 1043 if (tx_buf != NULL)
1049 dma_unmap_single(NULL, t->tx_dma, 1044 dma_unmap_single(&spi->dev, t->tx_dma,
1050 len, DMA_TO_DEVICE); 1045 len, DMA_TO_DEVICE);
1051 return -EINVAL; 1046 return -EINVAL;
1052 } 1047 }
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index e76b1afafe07..95928833855b 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -23,11 +23,11 @@
23#include <linux/errno.h> 23#include <linux/errno.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/spi/pxa2xx_spi.h>
26#include <linux/dma-mapping.h> 27#include <linux/dma-mapping.h>
27#include <linux/spi/spi.h> 28#include <linux/spi/spi.h>
28#include <linux/workqueue.h> 29#include <linux/workqueue.h>
29#include <linux/delay.h> 30#include <linux/delay.h>
30#include <linux/clk.h>
31#include <linux/gpio.h> 31#include <linux/gpio.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33 33
@@ -35,9 +35,6 @@
35#include <asm/irq.h> 35#include <asm/irq.h>
36#include <asm/delay.h> 36#include <asm/delay.h>
37 37
38#include <mach/dma.h>
39#include <plat/ssp.h>
40#include <mach/pxa2xx_spi.h>
41 38
42MODULE_AUTHOR("Stephen Street"); 39MODULE_AUTHOR("Stephen Street");
43MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); 40MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
@@ -46,8 +43,6 @@ MODULE_ALIAS("platform:pxa2xx-spi");
46 43
47#define MAX_BUSES 3 44#define MAX_BUSES 3
48 45
49#define RX_THRESH_DFLT 8
50#define TX_THRESH_DFLT 8
51#define TIMOUT_DFLT 1000 46#define TIMOUT_DFLT 1000
52 47
53#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) 48#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
@@ -168,7 +163,10 @@ struct chip_data {
168 u8 enable_dma; 163 u8 enable_dma;
169 u8 bits_per_word; 164 u8 bits_per_word;
170 u32 speed_hz; 165 u32 speed_hz;
171 int gpio_cs; 166 union {
167 int gpio_cs;
168 unsigned int frm;
169 };
172 int gpio_cs_inverted; 170 int gpio_cs_inverted;
173 int (*write)(struct driver_data *drv_data); 171 int (*write)(struct driver_data *drv_data);
174 int (*read)(struct driver_data *drv_data); 172 int (*read)(struct driver_data *drv_data);
@@ -181,6 +179,11 @@ static void cs_assert(struct driver_data *drv_data)
181{ 179{
182 struct chip_data *chip = drv_data->cur_chip; 180 struct chip_data *chip = drv_data->cur_chip;
183 181
182 if (drv_data->ssp_type == CE4100_SSP) {
183 write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr);
184 return;
185 }
186
184 if (chip->cs_control) { 187 if (chip->cs_control) {
185 chip->cs_control(PXA2XX_CS_ASSERT); 188 chip->cs_control(PXA2XX_CS_ASSERT);
186 return; 189 return;
@@ -194,6 +197,9 @@ static void cs_deassert(struct driver_data *drv_data)
194{ 197{
195 struct chip_data *chip = drv_data->cur_chip; 198 struct chip_data *chip = drv_data->cur_chip;
196 199
200 if (drv_data->ssp_type == CE4100_SSP)
201 return;
202
197 if (chip->cs_control) { 203 if (chip->cs_control) {
198 chip->cs_control(PXA2XX_CS_DEASSERT); 204 chip->cs_control(PXA2XX_CS_DEASSERT);
199 return; 205 return;
@@ -203,6 +209,25 @@ static void cs_deassert(struct driver_data *drv_data)
203 gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); 209 gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted);
204} 210}
205 211
212static void write_SSSR_CS(struct driver_data *drv_data, u32 val)
213{
214 void __iomem *reg = drv_data->ioaddr;
215
216 if (drv_data->ssp_type == CE4100_SSP)
217 val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK;
218
219 write_SSSR(val, reg);
220}
221
222static int pxa25x_ssp_comp(struct driver_data *drv_data)
223{
224 if (drv_data->ssp_type == PXA25x_SSP)
225 return 1;
226 if (drv_data->ssp_type == CE4100_SSP)
227 return 1;
228 return 0;
229}
230
206static int flush(struct driver_data *drv_data) 231static int flush(struct driver_data *drv_data)
207{ 232{
208 unsigned long limit = loops_per_jiffy << 1; 233 unsigned long limit = loops_per_jiffy << 1;
@@ -214,7 +239,7 @@ static int flush(struct driver_data *drv_data)
214 read_SSDR(reg); 239 read_SSDR(reg);
215 } 240 }
216 } while ((read_SSSR(reg) & SSSR_BSY) && --limit); 241 } while ((read_SSSR(reg) & SSSR_BSY) && --limit);
217 write_SSSR(SSSR_ROR, reg); 242 write_SSSR_CS(drv_data, SSSR_ROR);
218 243
219 return limit; 244 return limit;
220} 245}
@@ -224,7 +249,7 @@ static int null_writer(struct driver_data *drv_data)
224 void __iomem *reg = drv_data->ioaddr; 249 void __iomem *reg = drv_data->ioaddr;
225 u8 n_bytes = drv_data->n_bytes; 250 u8 n_bytes = drv_data->n_bytes;
226 251
227 if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00) 252 if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
228 || (drv_data->tx == drv_data->tx_end)) 253 || (drv_data->tx == drv_data->tx_end))
229 return 0; 254 return 0;
230 255
@@ -252,7 +277,7 @@ static int u8_writer(struct driver_data *drv_data)
252{ 277{
253 void __iomem *reg = drv_data->ioaddr; 278 void __iomem *reg = drv_data->ioaddr;
254 279
255 if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00) 280 if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
256 || (drv_data->tx == drv_data->tx_end)) 281 || (drv_data->tx == drv_data->tx_end))
257 return 0; 282 return 0;
258 283
@@ -279,7 +304,7 @@ static int u16_writer(struct driver_data *drv_data)
279{ 304{
280 void __iomem *reg = drv_data->ioaddr; 305 void __iomem *reg = drv_data->ioaddr;
281 306
282 if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00) 307 if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
283 || (drv_data->tx == drv_data->tx_end)) 308 || (drv_data->tx == drv_data->tx_end))
284 return 0; 309 return 0;
285 310
@@ -306,7 +331,7 @@ static int u32_writer(struct driver_data *drv_data)
306{ 331{
307 void __iomem *reg = drv_data->ioaddr; 332 void __iomem *reg = drv_data->ioaddr;
308 333
309 if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00) 334 if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
310 || (drv_data->tx == drv_data->tx_end)) 335 || (drv_data->tx == drv_data->tx_end))
311 return 0; 336 return 0;
312 337
@@ -507,9 +532,9 @@ static void dma_error_stop(struct driver_data *drv_data, const char *msg)
507 /* Stop and reset */ 532 /* Stop and reset */
508 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 533 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
509 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 534 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
510 write_SSSR(drv_data->clear_sr, reg); 535 write_SSSR_CS(drv_data, drv_data->clear_sr);
511 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 536 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
512 if (drv_data->ssp_type != PXA25x_SSP) 537 if (!pxa25x_ssp_comp(drv_data))
513 write_SSTO(0, reg); 538 write_SSTO(0, reg);
514 flush(drv_data); 539 flush(drv_data);
515 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 540 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
@@ -529,7 +554,7 @@ static void dma_transfer_complete(struct driver_data *drv_data)
529 554
530 /* Clear and disable interrupts on SSP and DMA channels*/ 555 /* Clear and disable interrupts on SSP and DMA channels*/
531 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); 556 write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
532 write_SSSR(drv_data->clear_sr, reg); 557 write_SSSR_CS(drv_data, drv_data->clear_sr);
533 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; 558 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
534 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; 559 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
535 560
@@ -622,7 +647,7 @@ static irqreturn_t dma_transfer(struct driver_data *drv_data)
622 647
623 /* Clear and disable timeout interrupt, do the rest in 648 /* Clear and disable timeout interrupt, do the rest in
624 * dma_transfer_complete */ 649 * dma_transfer_complete */
625 if (drv_data->ssp_type != PXA25x_SSP) 650 if (!pxa25x_ssp_comp(drv_data))
626 write_SSTO(0, reg); 651 write_SSTO(0, reg);
627 652
628 /* finish this transfer, start the next */ 653 /* finish this transfer, start the next */
@@ -635,14 +660,26 @@ static irqreturn_t dma_transfer(struct driver_data *drv_data)
635 return IRQ_NONE; 660 return IRQ_NONE;
636} 661}
637 662
663static void reset_sccr1(struct driver_data *drv_data)
664{
665 void __iomem *reg = drv_data->ioaddr;
666 struct chip_data *chip = drv_data->cur_chip;
667 u32 sccr1_reg;
668
669 sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1;
670 sccr1_reg &= ~SSCR1_RFT;
671 sccr1_reg |= chip->threshold;
672 write_SSCR1(sccr1_reg, reg);
673}
674
638static void int_error_stop(struct driver_data *drv_data, const char* msg) 675static void int_error_stop(struct driver_data *drv_data, const char* msg)
639{ 676{
640 void __iomem *reg = drv_data->ioaddr; 677 void __iomem *reg = drv_data->ioaddr;
641 678
642 /* Stop and reset SSP */ 679 /* Stop and reset SSP */
643 write_SSSR(drv_data->clear_sr, reg); 680 write_SSSR_CS(drv_data, drv_data->clear_sr);
644 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); 681 reset_sccr1(drv_data);
645 if (drv_data->ssp_type != PXA25x_SSP) 682 if (!pxa25x_ssp_comp(drv_data))
646 write_SSTO(0, reg); 683 write_SSTO(0, reg);
647 flush(drv_data); 684 flush(drv_data);
648 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 685 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
@@ -658,9 +695,9 @@ static void int_transfer_complete(struct driver_data *drv_data)
658 void __iomem *reg = drv_data->ioaddr; 695 void __iomem *reg = drv_data->ioaddr;
659 696
660 /* Stop SSP */ 697 /* Stop SSP */
661 write_SSSR(drv_data->clear_sr, reg); 698 write_SSSR_CS(drv_data, drv_data->clear_sr);
662 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); 699 reset_sccr1(drv_data);
663 if (drv_data->ssp_type != PXA25x_SSP) 700 if (!pxa25x_ssp_comp(drv_data))
664 write_SSTO(0, reg); 701 write_SSTO(0, reg);
665 702
666 /* Update total byte transfered return count actual bytes read */ 703 /* Update total byte transfered return count actual bytes read */
@@ -714,24 +751,34 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
714 } 751 }
715 752
716 if (drv_data->tx == drv_data->tx_end) { 753 if (drv_data->tx == drv_data->tx_end) {
717 write_SSCR1(read_SSCR1(reg) & ~SSCR1_TIE, reg); 754 u32 bytes_left;
718 /* PXA25x_SSP has no timeout, read trailing bytes */ 755 u32 sccr1_reg;
719 if (drv_data->ssp_type == PXA25x_SSP) { 756
720 if (!wait_ssp_rx_stall(reg)) 757 sccr1_reg = read_SSCR1(reg);
721 { 758 sccr1_reg &= ~SSCR1_TIE;
722 int_error_stop(drv_data, "interrupt_transfer: " 759
723 "rx stall failed"); 760 /*
724 return IRQ_HANDLED; 761 * PXA25x_SSP has no timeout, set up rx threshould for the
725 } 762 * remaing RX bytes.
726 if (!drv_data->read(drv_data)) 763 */
727 { 764 if (pxa25x_ssp_comp(drv_data)) {
728 int_error_stop(drv_data, 765
729 "interrupt_transfer: " 766 sccr1_reg &= ~SSCR1_RFT;
730 "trailing byte read failed"); 767
731 return IRQ_HANDLED; 768 bytes_left = drv_data->rx_end - drv_data->rx;
769 switch (drv_data->n_bytes) {
770 case 4:
771 bytes_left >>= 1;
772 case 2:
773 bytes_left >>= 1;
732 } 774 }
733 int_transfer_complete(drv_data); 775
776 if (bytes_left > RX_THRESH_DFLT)
777 bytes_left = RX_THRESH_DFLT;
778
779 sccr1_reg |= SSCR1_RxTresh(bytes_left);
734 } 780 }
781 write_SSCR1(sccr1_reg, reg);
735 } 782 }
736 783
737 /* We did something */ 784 /* We did something */
@@ -742,14 +789,26 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
742{ 789{
743 struct driver_data *drv_data = dev_id; 790 struct driver_data *drv_data = dev_id;
744 void __iomem *reg = drv_data->ioaddr; 791 void __iomem *reg = drv_data->ioaddr;
792 u32 sccr1_reg = read_SSCR1(reg);
793 u32 mask = drv_data->mask_sr;
794 u32 status;
795
796 status = read_SSSR(reg);
797
798 /* Ignore possible writes if we don't need to write */
799 if (!(sccr1_reg & SSCR1_TIE))
800 mask &= ~SSSR_TFS;
801
802 if (!(status & mask))
803 return IRQ_NONE;
745 804
746 if (!drv_data->cur_msg) { 805 if (!drv_data->cur_msg) {
747 806
748 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); 807 write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
749 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); 808 write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
750 if (drv_data->ssp_type != PXA25x_SSP) 809 if (!pxa25x_ssp_comp(drv_data))
751 write_SSTO(0, reg); 810 write_SSTO(0, reg);
752 write_SSSR(drv_data->clear_sr, reg); 811 write_SSSR_CS(drv_data, drv_data->clear_sr);
753 812
754 dev_err(&drv_data->pdev->dev, "bad message state " 813 dev_err(&drv_data->pdev->dev, "bad message state "
755 "in interrupt handler\n"); 814 "in interrupt handler\n");
@@ -862,7 +921,7 @@ static unsigned int ssp_get_clk_div(struct ssp_device *ssp, int rate)
862{ 921{
863 unsigned long ssp_clk = clk_get_rate(ssp->clk); 922 unsigned long ssp_clk = clk_get_rate(ssp->clk);
864 923
865 if (ssp->type == PXA25x_SSP) 924 if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
866 return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8; 925 return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8;
867 else 926 else
868 return ((ssp_clk / rate - 1) & 0xfff) << 8; 927 return ((ssp_clk / rate - 1) & 0xfff) << 8;
@@ -1088,7 +1147,7 @@ static void pump_transfers(unsigned long data)
1088 1147
1089 /* Clear status */ 1148 /* Clear status */
1090 cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1; 1149 cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
1091 write_SSSR(drv_data->clear_sr, reg); 1150 write_SSSR_CS(drv_data, drv_data->clear_sr);
1092 } 1151 }
1093 1152
1094 /* see if we need to reload the config registers */ 1153 /* see if we need to reload the config registers */
@@ -1098,7 +1157,7 @@ static void pump_transfers(unsigned long data)
1098 1157
1099 /* stop the SSP, and update the other bits */ 1158 /* stop the SSP, and update the other bits */
1100 write_SSCR0(cr0 & ~SSCR0_SSE, reg); 1159 write_SSCR0(cr0 & ~SSCR0_SSE, reg);
1101 if (drv_data->ssp_type != PXA25x_SSP) 1160 if (!pxa25x_ssp_comp(drv_data))
1102 write_SSTO(chip->timeout, reg); 1161 write_SSTO(chip->timeout, reg);
1103 /* first set CR1 without interrupt and service enables */ 1162 /* first set CR1 without interrupt and service enables */
1104 write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg); 1163 write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg);
@@ -1106,7 +1165,7 @@ static void pump_transfers(unsigned long data)
1106 write_SSCR0(cr0, reg); 1165 write_SSCR0(cr0, reg);
1107 1166
1108 } else { 1167 } else {
1109 if (drv_data->ssp_type != PXA25x_SSP) 1168 if (!pxa25x_ssp_comp(drv_data))
1110 write_SSTO(chip->timeout, reg); 1169 write_SSTO(chip->timeout, reg);
1111 } 1170 }
1112 1171
@@ -1233,14 +1292,13 @@ static int setup(struct spi_device *spi)
1233 uint tx_thres = TX_THRESH_DFLT; 1292 uint tx_thres = TX_THRESH_DFLT;
1234 uint rx_thres = RX_THRESH_DFLT; 1293 uint rx_thres = RX_THRESH_DFLT;
1235 1294
1236 if (drv_data->ssp_type != PXA25x_SSP 1295 if (!pxa25x_ssp_comp(drv_data)
1237 && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) { 1296 && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) {
1238 dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " 1297 dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
1239 "b/w not 4-32 for type non-PXA25x_SSP\n", 1298 "b/w not 4-32 for type non-PXA25x_SSP\n",
1240 drv_data->ssp_type, spi->bits_per_word); 1299 drv_data->ssp_type, spi->bits_per_word);
1241 return -EINVAL; 1300 return -EINVAL;
1242 } 1301 } else if (pxa25x_ssp_comp(drv_data)
1243 else if (drv_data->ssp_type == PXA25x_SSP
1244 && (spi->bits_per_word < 4 1302 && (spi->bits_per_word < 4
1245 || spi->bits_per_word > 16)) { 1303 || spi->bits_per_word > 16)) {
1246 dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " 1304 dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
@@ -1259,7 +1317,17 @@ static int setup(struct spi_device *spi)
1259 return -ENOMEM; 1317 return -ENOMEM;
1260 } 1318 }
1261 1319
1262 chip->gpio_cs = -1; 1320 if (drv_data->ssp_type == CE4100_SSP) {
1321 if (spi->chip_select > 4) {
1322 dev_err(&spi->dev, "failed setup: "
1323 "cs number must not be > 4.\n");
1324 kfree(chip);
1325 return -EINVAL;
1326 }
1327
1328 chip->frm = spi->chip_select;
1329 } else
1330 chip->gpio_cs = -1;
1263 chip->enable_dma = 0; 1331 chip->enable_dma = 0;
1264 chip->timeout = TIMOUT_DFLT; 1332 chip->timeout = TIMOUT_DFLT;
1265 chip->dma_burst_size = drv_data->master_info->enable_dma ? 1333 chip->dma_burst_size = drv_data->master_info->enable_dma ?
@@ -1315,7 +1383,7 @@ static int setup(struct spi_device *spi)
1315 | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0); 1383 | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
1316 1384
1317 /* NOTE: PXA25x_SSP _could_ use external clocking ... */ 1385 /* NOTE: PXA25x_SSP _could_ use external clocking ... */
1318 if (drv_data->ssp_type != PXA25x_SSP) 1386 if (!pxa25x_ssp_comp(drv_data))
1319 dev_dbg(&spi->dev, "%ld Hz actual, %s\n", 1387 dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
1320 clk_get_rate(ssp->clk) 1388 clk_get_rate(ssp->clk)
1321 / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)), 1389 / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)),
@@ -1350,23 +1418,27 @@ static int setup(struct spi_device *spi)
1350 1418
1351 spi_set_ctldata(spi, chip); 1419 spi_set_ctldata(spi, chip);
1352 1420
1421 if (drv_data->ssp_type == CE4100_SSP)
1422 return 0;
1423
1353 return setup_cs(spi, chip, chip_info); 1424 return setup_cs(spi, chip, chip_info);
1354} 1425}
1355 1426
1356static void cleanup(struct spi_device *spi) 1427static void cleanup(struct spi_device *spi)
1357{ 1428{
1358 struct chip_data *chip = spi_get_ctldata(spi); 1429 struct chip_data *chip = spi_get_ctldata(spi);
1430 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1359 1431
1360 if (!chip) 1432 if (!chip)
1361 return; 1433 return;
1362 1434
1363 if (gpio_is_valid(chip->gpio_cs)) 1435 if (drv_data->ssp_type != CE4100_SSP && gpio_is_valid(chip->gpio_cs))
1364 gpio_free(chip->gpio_cs); 1436 gpio_free(chip->gpio_cs);
1365 1437
1366 kfree(chip); 1438 kfree(chip);
1367} 1439}
1368 1440
1369static int __init init_queue(struct driver_data *drv_data) 1441static int __devinit init_queue(struct driver_data *drv_data)
1370{ 1442{
1371 INIT_LIST_HEAD(&drv_data->queue); 1443 INIT_LIST_HEAD(&drv_data->queue);
1372 spin_lock_init(&drv_data->lock); 1444 spin_lock_init(&drv_data->lock);
@@ -1454,7 +1526,7 @@ static int destroy_queue(struct driver_data *drv_data)
1454 return 0; 1526 return 0;
1455} 1527}
1456 1528
1457static int __init pxa2xx_spi_probe(struct platform_device *pdev) 1529static int __devinit pxa2xx_spi_probe(struct platform_device *pdev)
1458{ 1530{
1459 struct device *dev = &pdev->dev; 1531 struct device *dev = &pdev->dev;
1460 struct pxa2xx_spi_master *platform_info; 1532 struct pxa2xx_spi_master *platform_info;
@@ -1484,6 +1556,10 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev)
1484 drv_data->pdev = pdev; 1556 drv_data->pdev = pdev;
1485 drv_data->ssp = ssp; 1557 drv_data->ssp = ssp;
1486 1558
1559 master->dev.parent = &pdev->dev;
1560#ifdef CONFIG_OF
1561 master->dev.of_node = pdev->dev.of_node;
1562#endif
1487 /* the spi->mode bits understood by this driver: */ 1563 /* the spi->mode bits understood by this driver: */
1488 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1564 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1489 1565
@@ -1500,7 +1576,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev)
1500 1576
1501 drv_data->ioaddr = ssp->mmio_base; 1577 drv_data->ioaddr = ssp->mmio_base;
1502 drv_data->ssdr_physical = ssp->phys_base + SSDR; 1578 drv_data->ssdr_physical = ssp->phys_base + SSDR;
1503 if (ssp->type == PXA25x_SSP) { 1579 if (pxa25x_ssp_comp(drv_data)) {
1504 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE; 1580 drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
1505 drv_data->dma_cr1 = 0; 1581 drv_data->dma_cr1 = 0;
1506 drv_data->clear_sr = SSSR_ROR; 1582 drv_data->clear_sr = SSSR_ROR;
@@ -1512,7 +1588,8 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev)
1512 drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR; 1588 drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
1513 } 1589 }
1514 1590
1515 status = request_irq(ssp->irq, ssp_int, 0, dev_name(dev), drv_data); 1591 status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev),
1592 drv_data);
1516 if (status < 0) { 1593 if (status < 0) {
1517 dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq); 1594 dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq);
1518 goto out_error_master_alloc; 1595 goto out_error_master_alloc;
@@ -1561,7 +1638,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev)
1561 | SSCR0_Motorola 1638 | SSCR0_Motorola
1562 | SSCR0_DataSize(8), 1639 | SSCR0_DataSize(8),
1563 drv_data->ioaddr); 1640 drv_data->ioaddr);
1564 if (drv_data->ssp_type != PXA25x_SSP) 1641 if (!pxa25x_ssp_comp(drv_data))
1565 write_SSTO(0, drv_data->ioaddr); 1642 write_SSTO(0, drv_data->ioaddr);
1566 write_SSPSP(0, drv_data->ioaddr); 1643 write_SSPSP(0, drv_data->ioaddr);
1567 1644
@@ -1723,13 +1800,14 @@ static struct platform_driver driver = {
1723 .pm = &pxa2xx_spi_pm_ops, 1800 .pm = &pxa2xx_spi_pm_ops,
1724#endif 1801#endif
1725 }, 1802 },
1803 .probe = pxa2xx_spi_probe,
1726 .remove = pxa2xx_spi_remove, 1804 .remove = pxa2xx_spi_remove,
1727 .shutdown = pxa2xx_spi_shutdown, 1805 .shutdown = pxa2xx_spi_shutdown,
1728}; 1806};
1729 1807
1730static int __init pxa2xx_spi_init(void) 1808static int __init pxa2xx_spi_init(void)
1731{ 1809{
1732 return platform_driver_probe(&driver, pxa2xx_spi_probe); 1810 return platform_driver_register(&driver);
1733} 1811}
1734subsys_initcall(pxa2xx_spi_init); 1812subsys_initcall(pxa2xx_spi_init);
1735 1813
diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c
new file mode 100644
index 000000000000..351d8a375b57
--- /dev/null
+++ b/drivers/spi/pxa2xx_spi_pci.c
@@ -0,0 +1,201 @@
1/*
2 * CE4100's SPI device is more or less the same one as found on PXA
3 *
4 */
5#include <linux/pci.h>
6#include <linux/platform_device.h>
7#include <linux/of_device.h>
8#include <linux/spi/pxa2xx_spi.h>
9
10struct awesome_struct {
11 struct ssp_device ssp;
12 struct platform_device spi_pdev;
13 struct pxa2xx_spi_master spi_pdata;
14};
15
16static DEFINE_MUTEX(ssp_lock);
17static LIST_HEAD(ssp_list);
18
19struct ssp_device *pxa_ssp_request(int port, const char *label)
20{
21 struct ssp_device *ssp = NULL;
22
23 mutex_lock(&ssp_lock);
24
25 list_for_each_entry(ssp, &ssp_list, node) {
26 if (ssp->port_id == port && ssp->use_count == 0) {
27 ssp->use_count++;
28 ssp->label = label;
29 break;
30 }
31 }
32
33 mutex_unlock(&ssp_lock);
34
35 if (&ssp->node == &ssp_list)
36 return NULL;
37
38 return ssp;
39}
40EXPORT_SYMBOL_GPL(pxa_ssp_request);
41
42void pxa_ssp_free(struct ssp_device *ssp)
43{
44 mutex_lock(&ssp_lock);
45 if (ssp->use_count) {
46 ssp->use_count--;
47 ssp->label = NULL;
48 } else
49 dev_err(&ssp->pdev->dev, "device already free\n");
50 mutex_unlock(&ssp_lock);
51}
52EXPORT_SYMBOL_GPL(pxa_ssp_free);
53
54static void plat_dev_release(struct device *dev)
55{
56 struct awesome_struct *as = container_of(dev,
57 struct awesome_struct, spi_pdev.dev);
58
59 of_device_node_put(&as->spi_pdev.dev);
60}
61
62static int __devinit ce4100_spi_probe(struct pci_dev *dev,
63 const struct pci_device_id *ent)
64{
65 int ret;
66 resource_size_t phys_beg;
67 resource_size_t phys_len;
68 struct awesome_struct *spi_info;
69 struct platform_device *pdev;
70 struct pxa2xx_spi_master *spi_pdata;
71 struct ssp_device *ssp;
72
73 ret = pci_enable_device(dev);
74 if (ret)
75 return ret;
76
77 phys_beg = pci_resource_start(dev, 0);
78 phys_len = pci_resource_len(dev, 0);
79
80 if (!request_mem_region(phys_beg, phys_len,
81 "CE4100 SPI")) {
82 dev_err(&dev->dev, "Can't request register space.\n");
83 ret = -EBUSY;
84 return ret;
85 }
86
87 spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL);
88 if (!spi_info) {
89 ret = -ENOMEM;
90 goto err_kz;
91 }
92 ssp = &spi_info->ssp;
93 pdev = &spi_info->spi_pdev;
94 spi_pdata = &spi_info->spi_pdata;
95
96 pdev->name = "pxa2xx-spi";
97 pdev->id = dev->devfn;
98 pdev->dev.parent = &dev->dev;
99 pdev->dev.platform_data = &spi_info->spi_pdata;
100
101#ifdef CONFIG_OF
102 pdev->dev.of_node = dev->dev.of_node;
103#endif
104 pdev->dev.release = plat_dev_release;
105
106 spi_pdata->num_chipselect = dev->devfn;
107
108 ssp->phys_base = pci_resource_start(dev, 0);
109 ssp->mmio_base = ioremap(phys_beg, phys_len);
110 if (!ssp->mmio_base) {
111 dev_err(&pdev->dev, "failed to ioremap() registers\n");
112 ret = -EIO;
113 goto err_remap;
114 }
115 ssp->irq = dev->irq;
116 ssp->port_id = pdev->id;
117 ssp->type = PXA25x_SSP;
118
119 mutex_lock(&ssp_lock);
120 list_add(&ssp->node, &ssp_list);
121 mutex_unlock(&ssp_lock);
122
123 pci_set_drvdata(dev, spi_info);
124
125 ret = platform_device_register(pdev);
126 if (ret)
127 goto err_dev_add;
128
129 return ret;
130
131err_dev_add:
132 pci_set_drvdata(dev, NULL);
133 mutex_lock(&ssp_lock);
134 list_del(&ssp->node);
135 mutex_unlock(&ssp_lock);
136 iounmap(ssp->mmio_base);
137
138err_remap:
139 kfree(spi_info);
140
141err_kz:
142 release_mem_region(phys_beg, phys_len);
143
144 return ret;
145}
146
147static void __devexit ce4100_spi_remove(struct pci_dev *dev)
148{
149 struct awesome_struct *spi_info;
150 struct platform_device *pdev;
151 struct ssp_device *ssp;
152
153 spi_info = pci_get_drvdata(dev);
154
155 ssp = &spi_info->ssp;
156 pdev = &spi_info->spi_pdev;
157
158 platform_device_unregister(pdev);
159
160 iounmap(ssp->mmio_base);
161 release_mem_region(pci_resource_start(dev, 0),
162 pci_resource_len(dev, 0));
163
164 mutex_lock(&ssp_lock);
165 list_del(&ssp->node);
166 mutex_unlock(&ssp_lock);
167
168 pci_set_drvdata(dev, NULL);
169 pci_disable_device(dev);
170 kfree(spi_info);
171}
172
173static struct pci_device_id ce4100_spi_devices[] __devinitdata = {
174
175 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) },
176 { },
177};
178MODULE_DEVICE_TABLE(pci, ce4100_spi_devices);
179
180static struct pci_driver ce4100_spi_driver = {
181 .name = "ce4100_spi",
182 .id_table = ce4100_spi_devices,
183 .probe = ce4100_spi_probe,
184 .remove = __devexit_p(ce4100_spi_remove),
185};
186
187static int __init ce4100_spi_init(void)
188{
189 return pci_register_driver(&ce4100_spi_driver);
190}
191module_init(ce4100_spi_init);
192
193static void __exit ce4100_spi_exit(void)
194{
195 pci_unregister_driver(&ce4100_spi_driver);
196}
197module_exit(ce4100_spi_exit);
198
199MODULE_DESCRIPTION("CE4100 PCI-SPI glue code for PXA's driver");
200MODULE_LICENSE("GPL v2");
201MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
index 55a38e2c6c13..9469564e6888 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi_imx.c
@@ -66,7 +66,6 @@ enum spi_imx_devtype {
66 SPI_IMX_VER_0_5, 66 SPI_IMX_VER_0_5,
67 SPI_IMX_VER_0_7, 67 SPI_IMX_VER_0_7,
68 SPI_IMX_VER_2_3, 68 SPI_IMX_VER_2_3,
69 SPI_IMX_VER_AUTODETECT,
70}; 69};
71 70
72struct spi_imx_data; 71struct spi_imx_data;
@@ -720,9 +719,6 @@ static void spi_imx_cleanup(struct spi_device *spi)
720 719
721static struct platform_device_id spi_imx_devtype[] = { 720static struct platform_device_id spi_imx_devtype[] = {
722 { 721 {
723 .name = DRIVER_NAME,
724 .driver_data = SPI_IMX_VER_AUTODETECT,
725 }, {
726 .name = "imx1-cspi", 722 .name = "imx1-cspi",
727 .driver_data = SPI_IMX_VER_IMX1, 723 .driver_data = SPI_IMX_VER_IMX1,
728 }, { 724 }, {
@@ -802,30 +798,8 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
802 798
803 init_completion(&spi_imx->xfer_done); 799 init_completion(&spi_imx->xfer_done);
804 800
805 if (pdev->id_entry->driver_data == SPI_IMX_VER_AUTODETECT) { 801 spi_imx->devtype_data =
806 if (cpu_is_mx25() || cpu_is_mx35()) 802 spi_imx_devtype_data[pdev->id_entry->driver_data];
807 spi_imx->devtype_data =
808 spi_imx_devtype_data[SPI_IMX_VER_0_7];
809 else if (cpu_is_mx25() || cpu_is_mx31() || cpu_is_mx35())
810 spi_imx->devtype_data =
811 spi_imx_devtype_data[SPI_IMX_VER_0_4];
812 else if (cpu_is_mx27() || cpu_is_mx21())
813 spi_imx->devtype_data =
814 spi_imx_devtype_data[SPI_IMX_VER_0_0];
815 else if (cpu_is_mx1())
816 spi_imx->devtype_data =
817 spi_imx_devtype_data[SPI_IMX_VER_IMX1];
818 else
819 BUG();
820 } else
821 spi_imx->devtype_data =
822 spi_imx_devtype_data[pdev->id_entry->driver_data];
823
824 if (!spi_imx->devtype_data.intctrl) {
825 dev_err(&pdev->dev, "no support for this device compiled in\n");
826 ret = -ENODEV;
827 goto out_gpio_free;
828 }
829 803
830 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 804 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
831 if (!res) { 805 if (!res) {
@@ -847,7 +821,7 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
847 } 821 }
848 822
849 spi_imx->irq = platform_get_irq(pdev, 0); 823 spi_imx->irq = platform_get_irq(pdev, 0);
850 if (spi_imx->irq <= 0) { 824 if (spi_imx->irq < 0) {
851 ret = -EINVAL; 825 ret = -EINVAL;
852 goto out_iounmap; 826 goto out_iounmap;
853 } 827 }
diff --git a/drivers/spi/spi_nuc900.c b/drivers/spi/spi_nuc900.c
index dff63be0d0a8..d5be18b3078c 100644
--- a/drivers/spi/spi_nuc900.c
+++ b/drivers/spi/spi_nuc900.c
@@ -449,7 +449,7 @@ err_iomap:
449 release_mem_region(hw->res->start, resource_size(hw->res)); 449 release_mem_region(hw->res->start, resource_size(hw->res));
450 kfree(hw->ioarea); 450 kfree(hw->ioarea);
451err_pdata: 451err_pdata:
452 spi_master_put(hw->master);; 452 spi_master_put(hw->master);
453 453
454err_nomem: 454err_nomem:
455 return err; 455 return err;
diff --git a/drivers/spi/spi_topcliff_pch.c b/drivers/spi/spi_topcliff_pch.c
index 58e187f45ec7..79e48d451137 100644
--- a/drivers/spi/spi_topcliff_pch.c
+++ b/drivers/spi/spi_topcliff_pch.c
@@ -267,7 +267,7 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
267 if (reg_spsr_val & SPSR_FI_BIT) { 267 if (reg_spsr_val & SPSR_FI_BIT) {
268 /* disable FI & RFI interrupts */ 268 /* disable FI & RFI interrupts */
269 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, 269 pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
270 SPCR_FIE_BIT | SPCR_TFIE_BIT); 270 SPCR_FIE_BIT | SPCR_RFIE_BIT);
271 271
272 /* transfer is completed;inform pch_spi_process_messages */ 272 /* transfer is completed;inform pch_spi_process_messages */
273 data->transfer_complete = true; 273 data->transfer_complete = true;
@@ -677,15 +677,15 @@ static void pch_spi_set_ir(struct pch_spi_data *data)
677{ 677{
678 /* enable interrupts */ 678 /* enable interrupts */
679 if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH) { 679 if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH) {
680 /* set receive threhold to PCH_RX_THOLD */ 680 /* set receive threshold to PCH_RX_THOLD */
681 pch_spi_setclr_reg(data->master, PCH_SPCR, 681 pch_spi_setclr_reg(data->master, PCH_SPCR,
682 PCH_RX_THOLD << SPCR_TFIC_FIELD, 682 PCH_RX_THOLD << SPCR_RFIC_FIELD,
683 ~MASK_TFIC_SPCR_BITS); 683 ~MASK_RFIC_SPCR_BITS);
684 /* enable FI and RFI interrupts */ 684 /* enable FI and RFI interrupts */
685 pch_spi_setclr_reg(data->master, PCH_SPCR, 685 pch_spi_setclr_reg(data->master, PCH_SPCR,
686 SPCR_RFIE_BIT | SPCR_TFIE_BIT, 0); 686 SPCR_RFIE_BIT | SPCR_FIE_BIT, 0);
687 } else { 687 } else {
688 /* set receive threhold to maximum */ 688 /* set receive threshold to maximum */
689 pch_spi_setclr_reg(data->master, PCH_SPCR, 689 pch_spi_setclr_reg(data->master, PCH_SPCR,
690 PCH_RX_THOLD_MAX << SPCR_TFIC_FIELD, 690 PCH_RX_THOLD_MAX << SPCR_TFIC_FIELD,
691 ~MASK_TFIC_SPCR_BITS); 691 ~MASK_TFIC_SPCR_BITS);
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 80f2db5bcfd6..7adaef62a991 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -1,26 +1,27 @@
1/* 1/*
2 * xilinx_spi.c
3 *
4 * Xilinx SPI controller driver (master mode only) 2 * Xilinx SPI controller driver (master mode only)
5 * 3 *
6 * Author: MontaVista Software, Inc. 4 * Author: MontaVista Software, Inc.
7 * source@mvista.com 5 * source@mvista.com
8 * 6 *
9 * 2002-2007 (c) MontaVista Software, Inc. This file is licensed under the 7 * Copyright (c) 2010 Secret Lab Technologies, Ltd.
10 * terms of the GNU General Public License version 2. This program is licensed 8 * Copyright (c) 2009 Intel Corporation
11 * "as is" without any warranty of any kind, whether express or implied. 9 * 2002-2007 (c) MontaVista Software, Inc.
10
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
12 */ 14 */
13 15
14#include <linux/module.h> 16#include <linux/module.h>
15#include <linux/init.h> 17#include <linux/init.h>
16#include <linux/interrupt.h> 18#include <linux/interrupt.h>
17 19#include <linux/of.h>
20#include <linux/platform_device.h>
18#include <linux/spi/spi.h> 21#include <linux/spi/spi.h>
19#include <linux/spi/spi_bitbang.h> 22#include <linux/spi/spi_bitbang.h>
20#include <linux/io.h>
21
22#include "xilinx_spi.h"
23#include <linux/spi/xilinx_spi.h> 23#include <linux/spi/xilinx_spi.h>
24#include <linux/io.h>
24 25
25#define XILINX_SPI_NAME "xilinx_spi" 26#define XILINX_SPI_NAME "xilinx_spi"
26 27
@@ -350,19 +351,22 @@ static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
350 return IRQ_HANDLED; 351 return IRQ_HANDLED;
351} 352}
352 353
354#ifdef CONFIG_OF
355static const struct of_device_id xilinx_spi_of_match[] = {
356 { .compatible = "xlnx,xps-spi-2.00.a", },
357 { .compatible = "xlnx,xps-spi-2.00.b", },
358 {}
359};
360MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
361#endif
362
353struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, 363struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
354 u32 irq, s16 bus_num) 364 u32 irq, s16 bus_num, int num_cs, int little_endian, int bits_per_word)
355{ 365{
356 struct spi_master *master; 366 struct spi_master *master;
357 struct xilinx_spi *xspi; 367 struct xilinx_spi *xspi;
358 struct xspi_platform_data *pdata = dev->platform_data;
359 int ret; 368 int ret;
360 369
361 if (!pdata) {
362 dev_err(dev, "No platform data attached\n");
363 return NULL;
364 }
365
366 master = spi_alloc_master(dev, sizeof(struct xilinx_spi)); 370 master = spi_alloc_master(dev, sizeof(struct xilinx_spi));
367 if (!master) 371 if (!master)
368 return NULL; 372 return NULL;
@@ -389,21 +393,21 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
389 } 393 }
390 394
391 master->bus_num = bus_num; 395 master->bus_num = bus_num;
392 master->num_chipselect = pdata->num_chipselect; 396 master->num_chipselect = num_cs;
393#ifdef CONFIG_OF 397#ifdef CONFIG_OF
394 master->dev.of_node = dev->of_node; 398 master->dev.of_node = dev->of_node;
395#endif 399#endif
396 400
397 xspi->mem = *mem; 401 xspi->mem = *mem;
398 xspi->irq = irq; 402 xspi->irq = irq;
399 if (pdata->little_endian) { 403 if (little_endian) {
400 xspi->read_fn = xspi_read32; 404 xspi->read_fn = xspi_read32;
401 xspi->write_fn = xspi_write32; 405 xspi->write_fn = xspi_write32;
402 } else { 406 } else {
403 xspi->read_fn = xspi_read32_be; 407 xspi->read_fn = xspi_read32_be;
404 xspi->write_fn = xspi_write32_be; 408 xspi->write_fn = xspi_write32_be;
405 } 409 }
406 xspi->bits_per_word = pdata->bits_per_word; 410 xspi->bits_per_word = bits_per_word;
407 if (xspi->bits_per_word == 8) { 411 if (xspi->bits_per_word == 8) {
408 xspi->tx_fn = xspi_tx8; 412 xspi->tx_fn = xspi_tx8;
409 xspi->rx_fn = xspi_rx8; 413 xspi->rx_fn = xspi_rx8;
@@ -462,6 +466,97 @@ void xilinx_spi_deinit(struct spi_master *master)
462} 466}
463EXPORT_SYMBOL(xilinx_spi_deinit); 467EXPORT_SYMBOL(xilinx_spi_deinit);
464 468
469static int __devinit xilinx_spi_probe(struct platform_device *dev)
470{
471 struct xspi_platform_data *pdata;
472 struct resource *r;
473 int irq, num_cs = 0, little_endian = 0, bits_per_word = 8;
474 struct spi_master *master;
475 u8 i;
476
477 pdata = dev->dev.platform_data;
478 if (pdata) {
479 num_cs = pdata->num_chipselect;
480 little_endian = pdata->little_endian;
481 bits_per_word = pdata->bits_per_word;
482 }
483
484#ifdef CONFIG_OF
485 if (dev->dev.of_node) {
486 const __be32 *prop;
487 int len;
488
489 /* number of slave select bits is required */
490 prop = of_get_property(dev->dev.of_node, "xlnx,num-ss-bits",
491 &len);
492 if (prop && len >= sizeof(*prop))
493 num_cs = __be32_to_cpup(prop);
494 }
495#endif
496
497 if (!num_cs) {
498 dev_err(&dev->dev, "Missing slave select configuration data\n");
499 return -EINVAL;
500 }
501
502
503 r = platform_get_resource(dev, IORESOURCE_MEM, 0);
504 if (!r)
505 return -ENODEV;
506
507 irq = platform_get_irq(dev, 0);
508 if (irq < 0)
509 return -ENXIO;
510
511 master = xilinx_spi_init(&dev->dev, r, irq, dev->id, num_cs,
512 little_endian, bits_per_word);
513 if (!master)
514 return -ENODEV;
515
516 if (pdata) {
517 for (i = 0; i < pdata->num_devices; i++)
518 spi_new_device(master, pdata->devices + i);
519 }
520
521 platform_set_drvdata(dev, master);
522 return 0;
523}
524
525static int __devexit xilinx_spi_remove(struct platform_device *dev)
526{
527 xilinx_spi_deinit(platform_get_drvdata(dev));
528 platform_set_drvdata(dev, 0);
529
530 return 0;
531}
532
533/* work with hotplug and coldplug */
534MODULE_ALIAS("platform:" XILINX_SPI_NAME);
535
536static struct platform_driver xilinx_spi_driver = {
537 .probe = xilinx_spi_probe,
538 .remove = __devexit_p(xilinx_spi_remove),
539 .driver = {
540 .name = XILINX_SPI_NAME,
541 .owner = THIS_MODULE,
542#ifdef CONFIG_OF
543 .of_match_table = xilinx_spi_of_match,
544#endif
545 },
546};
547
548static int __init xilinx_spi_pltfm_init(void)
549{
550 return platform_driver_register(&xilinx_spi_driver);
551}
552module_init(xilinx_spi_pltfm_init);
553
554static void __exit xilinx_spi_pltfm_exit(void)
555{
556 platform_driver_unregister(&xilinx_spi_driver);
557}
558module_exit(xilinx_spi_pltfm_exit);
559
465MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>"); 560MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
466MODULE_DESCRIPTION("Xilinx SPI driver"); 561MODULE_DESCRIPTION("Xilinx SPI driver");
467MODULE_LICENSE("GPL"); 562MODULE_LICENSE("GPL");
diff --git a/drivers/spi/xilinx_spi.h b/drivers/spi/xilinx_spi.h
deleted file mode 100644
index d211accf68d2..000000000000
--- a/drivers/spi/xilinx_spi.h
+++ /dev/null
@@ -1,32 +0,0 @@
1/*
2 * Xilinx SPI device driver API and platform data header file
3 *
4 * Copyright (c) 2009 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20#ifndef _XILINX_SPI_H_
21#define _XILINX_SPI_H_
22
23#include <linux/spi/spi.h>
24#include <linux/spi/spi_bitbang.h>
25
26#define XILINX_SPI_NAME "xilinx_spi"
27
28struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
29 u32 irq, s16 bus_num);
30
31void xilinx_spi_deinit(struct spi_master *master);
32#endif
diff --git a/drivers/spi/xilinx_spi_of.c b/drivers/spi/xilinx_spi_of.c
deleted file mode 100644
index b66c2dbf20a5..000000000000
--- a/drivers/spi/xilinx_spi_of.c
+++ /dev/null
@@ -1,133 +0,0 @@
1/*
2 * Xilinx SPI OF device driver
3 *
4 * Copyright (c) 2009 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20/* Supports:
21 * Xilinx SPI devices as OF devices
22 *
23 * Inspired by xilinx_spi.c, 2002-2007 (c) MontaVista Software, Inc.
24 */
25
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/interrupt.h>
29#include <linux/io.h>
30#include <linux/slab.h>
31
32#include <linux/of_address.h>
33#include <linux/of_platform.h>
34#include <linux/of_device.h>
35#include <linux/of_spi.h>
36
37#include <linux/spi/xilinx_spi.h>
38#include "xilinx_spi.h"
39
40
41static int __devinit xilinx_spi_of_probe(struct platform_device *ofdev,
42 const struct of_device_id *match)
43{
44 struct spi_master *master;
45 struct xspi_platform_data *pdata;
46 struct resource r_mem;
47 struct resource r_irq;
48 int rc = 0;
49 const u32 *prop;
50 int len;
51
52 rc = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem);
53 if (rc) {
54 dev_warn(&ofdev->dev, "invalid address\n");
55 return rc;
56 }
57
58 rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq);
59 if (rc == NO_IRQ) {
60 dev_warn(&ofdev->dev, "no IRQ found\n");
61 return -ENODEV;
62 }
63
64 ofdev->dev.platform_data =
65 kzalloc(sizeof(struct xspi_platform_data), GFP_KERNEL);
66 pdata = ofdev->dev.platform_data;
67 if (!pdata)
68 return -ENOMEM;
69
70 /* number of slave select bits is required */
71 prop = of_get_property(ofdev->dev.of_node, "xlnx,num-ss-bits", &len);
72 if (!prop || len < sizeof(*prop)) {
73 dev_warn(&ofdev->dev, "no 'xlnx,num-ss-bits' property\n");
74 return -EINVAL;
75 }
76 pdata->num_chipselect = *prop;
77 pdata->bits_per_word = 8;
78 master = xilinx_spi_init(&ofdev->dev, &r_mem, r_irq.start, -1);
79 if (!master)
80 return -ENODEV;
81
82 dev_set_drvdata(&ofdev->dev, master);
83
84 return 0;
85}
86
87static int __devexit xilinx_spi_remove(struct platform_device *ofdev)
88{
89 xilinx_spi_deinit(dev_get_drvdata(&ofdev->dev));
90 dev_set_drvdata(&ofdev->dev, 0);
91 kfree(ofdev->dev.platform_data);
92 ofdev->dev.platform_data = NULL;
93 return 0;
94}
95
96static int __exit xilinx_spi_of_remove(struct platform_device *op)
97{
98 return xilinx_spi_remove(op);
99}
100
101static const struct of_device_id xilinx_spi_of_match[] = {
102 { .compatible = "xlnx,xps-spi-2.00.a", },
103 { .compatible = "xlnx,xps-spi-2.00.b", },
104 {}
105};
106
107MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
108
109static struct of_platform_driver xilinx_spi_of_driver = {
110 .probe = xilinx_spi_of_probe,
111 .remove = __exit_p(xilinx_spi_of_remove),
112 .driver = {
113 .name = "xilinx-xps-spi",
114 .owner = THIS_MODULE,
115 .of_match_table = xilinx_spi_of_match,
116 },
117};
118
119static int __init xilinx_spi_of_init(void)
120{
121 return of_register_platform_driver(&xilinx_spi_of_driver);
122}
123module_init(xilinx_spi_of_init);
124
125static void __exit xilinx_spi_of_exit(void)
126{
127 of_unregister_platform_driver(&xilinx_spi_of_driver);
128}
129module_exit(xilinx_spi_of_exit);
130
131MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
132MODULE_DESCRIPTION("Xilinx SPI platform driver");
133MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/xilinx_spi_pltfm.c b/drivers/spi/xilinx_spi_pltfm.c
deleted file mode 100644
index 24debac646a9..000000000000
--- a/drivers/spi/xilinx_spi_pltfm.c
+++ /dev/null
@@ -1,102 +0,0 @@
1/*
2 * Support for Xilinx SPI platform devices
3 * Copyright (c) 2009 Intel Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19/* Supports:
20 * Xilinx SPI devices as platform devices
21 *
22 * Inspired by xilinx_spi.c, 2002-2007 (c) MontaVista Software, Inc.
23 */
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
29#include <linux/platform_device.h>
30
31#include <linux/spi/spi.h>
32#include <linux/spi/spi_bitbang.h>
33#include <linux/spi/xilinx_spi.h>
34
35#include "xilinx_spi.h"
36
37static int __devinit xilinx_spi_probe(struct platform_device *dev)
38{
39 struct xspi_platform_data *pdata;
40 struct resource *r;
41 int irq;
42 struct spi_master *master;
43 u8 i;
44
45 pdata = dev->dev.platform_data;
46 if (!pdata)
47 return -ENODEV;
48
49 r = platform_get_resource(dev, IORESOURCE_MEM, 0);
50 if (!r)
51 return -ENODEV;
52
53 irq = platform_get_irq(dev, 0);
54 if (irq < 0)
55 return -ENXIO;
56
57 master = xilinx_spi_init(&dev->dev, r, irq, dev->id);
58 if (!master)
59 return -ENODEV;
60
61 for (i = 0; i < pdata->num_devices; i++)
62 spi_new_device(master, pdata->devices + i);
63
64 platform_set_drvdata(dev, master);
65 return 0;
66}
67
68static int __devexit xilinx_spi_remove(struct platform_device *dev)
69{
70 xilinx_spi_deinit(platform_get_drvdata(dev));
71 platform_set_drvdata(dev, 0);
72
73 return 0;
74}
75
76/* work with hotplug and coldplug */
77MODULE_ALIAS("platform:" XILINX_SPI_NAME);
78
79static struct platform_driver xilinx_spi_driver = {
80 .probe = xilinx_spi_probe,
81 .remove = __devexit_p(xilinx_spi_remove),
82 .driver = {
83 .name = XILINX_SPI_NAME,
84 .owner = THIS_MODULE,
85 },
86};
87
88static int __init xilinx_spi_pltfm_init(void)
89{
90 return platform_driver_register(&xilinx_spi_driver);
91}
92module_init(xilinx_spi_pltfm_init);
93
94static void __exit xilinx_spi_pltfm_exit(void)
95{
96 platform_driver_unregister(&xilinx_spi_driver);
97}
98module_exit(xilinx_spi_pltfm_exit);
99
100MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
101MODULE_DESCRIPTION("Xilinx SPI platform driver");
102MODULE_LICENSE("GPL v2");