aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/spi/omap-spi.txt20
-rw-r--r--Documentation/spi/spi-summary58
-rw-r--r--drivers/spi/Kconfig36
-rw-r--r--drivers/spi/Makefile4
-rw-r--r--drivers/spi/spi-bcm63xx.c486
-rw-r--r--drivers/spi/spi-dw-pci.c2
-rw-r--r--drivers/spi/spi-fsl-espi.c14
-rw-r--r--drivers/spi/spi-imx.c11
-rw-r--r--drivers/spi/spi-nuc900.c2
-rw-r--r--drivers/spi/spi-omap2-mcspi.c56
-rw-r--r--drivers/spi/spi-pl022.c286
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c2
-rw-r--r--drivers/spi/spi-rspi.c521
-rw-r--r--drivers/spi/spi-s3c64xx.c232
-rw-r--r--drivers/spi/spi-sh-hspi.c331
-rw-r--r--drivers/spi/spi-sh.c25
-rw-r--r--drivers/spi/spi-sirf.c687
-rw-r--r--drivers/spi/spi-topcliff-pch.c113
-rw-r--r--drivers/spi/spi.c347
-rw-r--r--include/linux/amba/pl022.h3
-rw-r--r--include/linux/spi/sh_hspi.h23
-rw-r--r--include/linux/spi/spi.h53
22 files changed, 2890 insertions, 422 deletions
diff --git a/Documentation/devicetree/bindings/spi/omap-spi.txt b/Documentation/devicetree/bindings/spi/omap-spi.txt
new file mode 100644
index 000000000000..81df374adbb9
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/omap-spi.txt
@@ -0,0 +1,20 @@
1OMAP2+ McSPI device
2
3Required properties:
4- compatible :
5 - "ti,omap2-spi" for OMAP2 & OMAP3.
6 - "ti,omap4-spi" for OMAP4+.
7- ti,spi-num-cs : Number of chipselect supported by the instance.
8- ti,hwmods: Name of the hwmod associated to the McSPI
9
10
11Example:
12
13mcspi1: mcspi@1 {
14 #address-cells = <1>;
15 #size-cells = <0>;
16 compatible = "ti,omap4-mcspi";
17 ti,hwmods = "mcspi1";
18 ti,spi-num-cs = <4>;
19};
20
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary
index 4884cb33845d..7312ec14dd89 100644
--- a/Documentation/spi/spi-summary
+++ b/Documentation/spi/spi-summary
@@ -1,7 +1,7 @@
1Overview of Linux kernel SPI support 1Overview of Linux kernel SPI support
2==================================== 2====================================
3 3
421-May-2007 402-Feb-2012
5 5
6What is SPI? 6What is SPI?
7------------ 7------------
@@ -483,9 +483,9 @@ also initialize its own internal state. (See below about bus numbering
483and those methods.) 483and those methods.)
484 484
485After you initialize the spi_master, then use spi_register_master() to 485After you initialize the spi_master, then use spi_register_master() to
486publish it to the rest of the system. At that time, device nodes for 486publish it to the rest of the system. At that time, device nodes for the
487the controller and any predeclared spi devices will be made available, 487controller and any predeclared spi devices will be made available, and
488and the driver model core will take care of binding them to drivers. 488the driver model core will take care of binding them to drivers.
489 489
490If you need to remove your SPI controller driver, spi_unregister_master() 490If you need to remove your SPI controller driver, spi_unregister_master()
491will reverse the effect of spi_register_master(). 491will reverse the effect of spi_register_master().
@@ -521,21 +521,53 @@ SPI MASTER METHODS
521 ** When you code setup(), ASSUME that the controller 521 ** When you code setup(), ASSUME that the controller
522 ** is actively processing transfers for another device. 522 ** is actively processing transfers for another device.
523 523
524 master->transfer(struct spi_device *spi, struct spi_message *message)
525 This must not sleep. Its responsibility is arrange that the
526 transfer happens and its complete() callback is issued. The two
527 will normally happen later, after other transfers complete, and
528 if the controller is idle it will need to be kickstarted.
529
530 master->cleanup(struct spi_device *spi) 524 master->cleanup(struct spi_device *spi)
531 Your controller driver may use spi_device.controller_state to hold 525 Your controller driver may use spi_device.controller_state to hold
532 state it dynamically associates with that device. If you do that, 526 state it dynamically associates with that device. If you do that,
533 be sure to provide the cleanup() method to free that state. 527 be sure to provide the cleanup() method to free that state.
534 528
529 master->prepare_transfer_hardware(struct spi_master *master)
530 This will be called by the queue mechanism to signal to the driver
531 that a message is coming in soon, so the subsystem requests the
532 driver to prepare the transfer hardware by issuing this call.
533 This may sleep.
534
535 master->unprepare_transfer_hardware(struct spi_master *master)
536 This will be called by the queue mechanism to signal to the driver
537 that there are no more messages pending in the queue and it may
538 relax the hardware (e.g. by power management calls). This may sleep.
539
540 master->transfer_one_message(struct spi_master *master,
541 struct spi_message *mesg)
542 The subsystem calls the driver to transfer a single message while
543 queuing transfers that arrive in the meantime. When the driver is
544 finished with this message, it must call
545 spi_finalize_current_message() so the subsystem can issue the next
546 transfer. This may sleep.
547
548 DEPRECATED METHODS
549
550 master->transfer(struct spi_device *spi, struct spi_message *message)
551 This must not sleep. Its responsibility is arrange that the
552 transfer happens and its complete() callback is issued. The two
553 will normally happen later, after other transfers complete, and
554 if the controller is idle it will need to be kickstarted. This
555 method is not used on queued controllers and must be NULL if
556 transfer_one_message() and (un)prepare_transfer_hardware() are
557 implemented.
558
535 559
536SPI MESSAGE QUEUE 560SPI MESSAGE QUEUE
537 561
538The bulk of the driver will be managing the I/O queue fed by transfer(). 562If you are happy with the standard queueing mechanism provided by the
563SPI subsystem, just implement the queued methods specified above. Using
564the message queue has the upside of centralizing a lot of code and
565providing pure process-context execution of methods. The message queue
566can also be elevated to realtime priority on high-priority SPI traffic.
567
568Unless the queueing mechanism in the SPI subsystem is selected, the bulk
569of the driver will be managing the I/O queue fed by the now deprecated
570function transfer().
539 571
540That queue could be purely conceptual. For example, a driver used only 572That queue could be purely conceptual. For example, a driver used only
541for low-frequency sensor access might be fine using synchronous PIO. 573for low-frequency sensor access might be fine using synchronous PIO.
@@ -561,4 +593,6 @@ Stephen Street
561Mark Underwood 593Mark Underwood
562Andrew Victor 594Andrew Victor
563Vitaly Wool 595Vitaly Wool
564 596Grant Likely
597Mark Brown
598Linus Walleij
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 8293658e7cf9..0b06e360628a 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -94,6 +94,12 @@ config SPI_AU1550
94 If you say yes to this option, support will be included for the 94 If you say yes to this option, support will be included for the
95 PSC SPI controller found on Au1550, Au1200 and Au1300 series. 95 PSC SPI controller found on Au1550, Au1200 and Au1300 series.
96 96
97config SPI_BCM63XX
98 tristate "Broadcom BCM63xx SPI controller"
99 depends on BCM63XX
100 help
101 Enable support for the SPI controller on the Broadcom BCM63xx SoCs.
102
97config SPI_BITBANG 103config SPI_BITBANG
98 tristate "Utilities for Bitbanging SPI masters" 104 tristate "Utilities for Bitbanging SPI masters"
99 help 105 help
@@ -126,7 +132,7 @@ config SPI_COLDFIRE_QSPI
126 132
127config SPI_DAVINCI 133config SPI_DAVINCI
128 tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller" 134 tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
129 depends on SPI_MASTER && ARCH_DAVINCI 135 depends on ARCH_DAVINCI
130 select SPI_BITBANG 136 select SPI_BITBANG
131 help 137 help
132 SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. 138 SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
@@ -188,7 +194,7 @@ config SPI_MPC52xx_PSC
188 194
189config SPI_MPC512x_PSC 195config SPI_MPC512x_PSC
190 tristate "Freescale MPC512x PSC SPI controller" 196 tristate "Freescale MPC512x PSC SPI controller"
191 depends on SPI_MASTER && PPC_MPC512x 197 depends on PPC_MPC512x
192 help 198 help
193 This enables using the Freescale MPC5121 Programmable Serial 199 This enables using the Freescale MPC5121 Programmable Serial
194 Controller in SPI master mode. 200 Controller in SPI master mode.
@@ -238,7 +244,7 @@ config SPI_OMAP24XX
238 244
239config SPI_OMAP_100K 245config SPI_OMAP_100K
240 tristate "OMAP SPI 100K" 246 tristate "OMAP SPI 100K"
241 depends on SPI_MASTER && (ARCH_OMAP850 || ARCH_OMAP730) 247 depends on ARCH_OMAP850 || ARCH_OMAP730
242 help 248 help
243 OMAP SPI 100K master controller for omap7xx boards. 249 OMAP SPI 100K master controller for omap7xx boards.
244 250
@@ -262,7 +268,7 @@ config SPI_PL022
262 268
263config SPI_PPC4xx 269config SPI_PPC4xx
264 tristate "PPC4xx SPI Controller" 270 tristate "PPC4xx SPI Controller"
265 depends on PPC32 && 4xx && SPI_MASTER 271 depends on PPC32 && 4xx
266 select SPI_BITBANG 272 select SPI_BITBANG
267 help 273 help
268 This selects a driver for the PPC4xx SPI Controller. 274 This selects a driver for the PPC4xx SPI Controller.
@@ -279,6 +285,12 @@ config SPI_PXA2XX
279config SPI_PXA2XX_PCI 285config SPI_PXA2XX_PCI
280 def_bool SPI_PXA2XX && X86_32 && PCI 286 def_bool SPI_PXA2XX && X86_32 && PCI
281 287
288config SPI_RSPI
289 tristate "Renesas RSPI controller"
290 depends on SUPERH
291 help
292 SPI driver for Renesas RSPI blocks.
293
282config SPI_S3C24XX 294config SPI_S3C24XX
283 tristate "Samsung S3C24XX series SPI" 295 tristate "Samsung S3C24XX series SPI"
284 depends on ARCH_S3C2410 && EXPERIMENTAL 296 depends on ARCH_S3C2410 && EXPERIMENTAL
@@ -324,9 +336,22 @@ config SPI_SH_SCI
324 help 336 help
325 SPI driver for SuperH SCI blocks. 337 SPI driver for SuperH SCI blocks.
326 338
339config SPI_SH_HSPI
340 tristate "SuperH HSPI controller"
341 depends on ARCH_SHMOBILE
342 help
343 SPI driver for SuperH HSPI blocks.
344
345config SPI_SIRF
346 tristate "CSR SiRFprimaII SPI controller"
347 depends on ARCH_PRIMA2
348 select SPI_BITBANG
349 help
350 SPI driver for CSR SiRFprimaII SoCs
351
327config SPI_STMP3XXX 352config SPI_STMP3XXX
328 tristate "Freescale STMP37xx/378x SPI/SSP controller" 353 tristate "Freescale STMP37xx/378x SPI/SSP controller"
329 depends on ARCH_STMP3XXX && SPI_MASTER 354 depends on ARCH_STMP3XXX
330 help 355 help
331 SPI driver for Freescale STMP37xx/378x SoC SSP interface 356 SPI driver for Freescale STMP37xx/378x SoC SSP interface
332 357
@@ -384,7 +409,6 @@ config SPI_NUC900
384 409
385config SPI_DESIGNWARE 410config SPI_DESIGNWARE
386 tristate "DesignWare SPI controller core support" 411 tristate "DesignWare SPI controller core support"
387 depends on SPI_MASTER
388 help 412 help
389 general driver for SPI controller core from DesignWare 413 general driver for SPI controller core from DesignWare
390 414
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 61c3261c388c..a1d48e0ba3dc 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_SPI_ALTERA) += spi-altera.o
14obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o 14obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
15obj-$(CONFIG_SPI_ATH79) += spi-ath79.o 15obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
16obj-$(CONFIG_SPI_AU1550) += spi-au1550.o 16obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
17obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
17obj-$(CONFIG_SPI_BFIN) += spi-bfin5xx.o 18obj-$(CONFIG_SPI_BFIN) += spi-bfin5xx.o
18obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o 19obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o
19obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o 20obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
@@ -44,13 +45,16 @@ obj-$(CONFIG_SPI_PL022) += spi-pl022.o
44obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o 45obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
45obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx.o 46obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx.o
46obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o 47obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
48obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
47obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o 49obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
48spi-s3c24xx-hw-y := spi-s3c24xx.o 50spi-s3c24xx-hw-y := spi-s3c24xx.o
49spi-s3c24xx-hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi-s3c24xx-fiq.o 51spi-s3c24xx-hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi-s3c24xx-fiq.o
50obj-$(CONFIG_SPI_S3C64XX) += spi-s3c64xx.o 52obj-$(CONFIG_SPI_S3C64XX) += spi-s3c64xx.o
51obj-$(CONFIG_SPI_SH) += spi-sh.o 53obj-$(CONFIG_SPI_SH) += spi-sh.o
54obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
52obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o 55obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
53obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o 56obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
57obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
54obj-$(CONFIG_SPI_STMP3XXX) += spi-stmp.o 58obj-$(CONFIG_SPI_STMP3XXX) += spi-stmp.o
55obj-$(CONFIG_SPI_TEGRA) += spi-tegra.o 59obj-$(CONFIG_SPI_TEGRA) += spi-tegra.o
56obj-$(CONFIG_SPI_TI_SSP) += spi-ti-ssp.o 60obj-$(CONFIG_SPI_TI_SSP) += spi-ti-ssp.o
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
new file mode 100644
index 000000000000..f01b2648452e
--- /dev/null
+++ b/drivers/spi/spi-bcm63xx.c
@@ -0,0 +1,486 @@
1/*
2 * Broadcom BCM63xx SPI controller support
3 *
4 * Copyright (C) 2009-2011 Florian Fainelli <florian@openwrt.org>
5 * Copyright (C) 2010 Tanguy Bouzeloc <tanguy.bouzeloc@efixo.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the
19 * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
20 */
21
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/clk.h>
25#include <linux/io.h>
26#include <linux/module.h>
27#include <linux/platform_device.h>
28#include <linux/delay.h>
29#include <linux/interrupt.h>
30#include <linux/spi/spi.h>
31#include <linux/completion.h>
32#include <linux/err.h>
33
34#include <bcm63xx_dev_spi.h>
35
36#define PFX KBUILD_MODNAME
37#define DRV_VER "0.1.2"
38
39struct bcm63xx_spi {
40 spinlock_t lock;
41 int stopping;
42 struct completion done;
43
44 void __iomem *regs;
45 int irq;
46
47 /* Platform data */
48 u32 speed_hz;
49 unsigned fifo_size;
50
51 /* Data buffers */
52 const unsigned char *tx_ptr;
53 unsigned char *rx_ptr;
54
55 /* data iomem */
56 u8 __iomem *tx_io;
57 const u8 __iomem *rx_io;
58
59 int remaining_bytes;
60
61 struct clk *clk;
62 struct platform_device *pdev;
63};
64
65static inline u8 bcm_spi_readb(struct bcm63xx_spi *bs,
66 unsigned int offset)
67{
68 return bcm_readb(bs->regs + bcm63xx_spireg(offset));
69}
70
71static inline u16 bcm_spi_readw(struct bcm63xx_spi *bs,
72 unsigned int offset)
73{
74 return bcm_readw(bs->regs + bcm63xx_spireg(offset));
75}
76
77static inline void bcm_spi_writeb(struct bcm63xx_spi *bs,
78 u8 value, unsigned int offset)
79{
80 bcm_writeb(value, bs->regs + bcm63xx_spireg(offset));
81}
82
83static inline void bcm_spi_writew(struct bcm63xx_spi *bs,
84 u16 value, unsigned int offset)
85{
86 bcm_writew(value, bs->regs + bcm63xx_spireg(offset));
87}
88
89static const unsigned bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = {
90 { 20000000, SPI_CLK_20MHZ },
91 { 12500000, SPI_CLK_12_50MHZ },
92 { 6250000, SPI_CLK_6_250MHZ },
93 { 3125000, SPI_CLK_3_125MHZ },
94 { 1563000, SPI_CLK_1_563MHZ },
95 { 781000, SPI_CLK_0_781MHZ },
96 { 391000, SPI_CLK_0_391MHZ }
97};
98
99static int bcm63xx_spi_setup_transfer(struct spi_device *spi,
100 struct spi_transfer *t)
101{
102 struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
103 u8 bits_per_word;
104 u8 clk_cfg, reg;
105 u32 hz;
106 int i;
107
108 bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
109 hz = (t) ? t->speed_hz : spi->max_speed_hz;
110 if (bits_per_word != 8) {
111 dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
112 __func__, bits_per_word);
113 return -EINVAL;
114 }
115
116 if (spi->chip_select > spi->master->num_chipselect) {
117 dev_err(&spi->dev, "%s, unsupported slave %d\n",
118 __func__, spi->chip_select);
119 return -EINVAL;
120 }
121
122 /* Find the closest clock configuration */
123 for (i = 0; i < SPI_CLK_MASK; i++) {
124 if (hz <= bcm63xx_spi_freq_table[i][0]) {
125 clk_cfg = bcm63xx_spi_freq_table[i][1];
126 break;
127 }
128 }
129
130 /* No matching configuration found, default to lowest */
131 if (i == SPI_CLK_MASK)
132 clk_cfg = SPI_CLK_0_391MHZ;
133
134 /* clear existing clock configuration bits of the register */
135 reg = bcm_spi_readb(bs, SPI_CLK_CFG);
136 reg &= ~SPI_CLK_MASK;
137 reg |= clk_cfg;
138
139 bcm_spi_writeb(bs, reg, SPI_CLK_CFG);
140 dev_dbg(&spi->dev, "Setting clock register to %02x (hz %d)\n",
141 clk_cfg, hz);
142
143 return 0;
144}
145
146/* the spi->mode bits understood by this driver: */
147#define MODEBITS (SPI_CPOL | SPI_CPHA)
148
149static int bcm63xx_spi_setup(struct spi_device *spi)
150{
151 struct bcm63xx_spi *bs;
152 int ret;
153
154 bs = spi_master_get_devdata(spi->master);
155
156 if (bs->stopping)
157 return -ESHUTDOWN;
158
159 if (!spi->bits_per_word)
160 spi->bits_per_word = 8;
161
162 if (spi->mode & ~MODEBITS) {
163 dev_err(&spi->dev, "%s, unsupported mode bits %x\n",
164 __func__, spi->mode & ~MODEBITS);
165 return -EINVAL;
166 }
167
168 ret = bcm63xx_spi_setup_transfer(spi, NULL);
169 if (ret < 0) {
170 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
171 spi->mode & ~MODEBITS);
172 return ret;
173 }
174
175 dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
176 __func__, spi->mode & MODEBITS, spi->bits_per_word, 0);
177
178 return 0;
179}
180
181/* Fill the TX FIFO with as many bytes as possible */
182static void bcm63xx_spi_fill_tx_fifo(struct bcm63xx_spi *bs)
183{
184 u8 size;
185
186 /* Fill the Tx FIFO with as many bytes as possible */
187 size = bs->remaining_bytes < bs->fifo_size ? bs->remaining_bytes :
188 bs->fifo_size;
189 memcpy_toio(bs->tx_io, bs->tx_ptr, size);
190 bs->remaining_bytes -= size;
191}
192
193static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
194{
195 struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
196 u16 msg_ctl;
197 u16 cmd;
198
199 dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
200 t->tx_buf, t->rx_buf, t->len);
201
202 /* Transmitter is inhibited */
203 bs->tx_ptr = t->tx_buf;
204 bs->rx_ptr = t->rx_buf;
205 init_completion(&bs->done);
206
207 if (t->tx_buf) {
208 bs->remaining_bytes = t->len;
209 bcm63xx_spi_fill_tx_fifo(bs);
210 }
211
212 /* Enable the command done interrupt which
213 * we use to determine completion of a command */
214 bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
215
216 /* Fill in the Message control register */
217 msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT);
218
219 if (t->rx_buf && t->tx_buf)
220 msg_ctl |= (SPI_FD_RW << SPI_MSG_TYPE_SHIFT);
221 else if (t->rx_buf)
222 msg_ctl |= (SPI_HD_R << SPI_MSG_TYPE_SHIFT);
223 else if (t->tx_buf)
224 msg_ctl |= (SPI_HD_W << SPI_MSG_TYPE_SHIFT);
225
226 bcm_spi_writew(bs, msg_ctl, SPI_MSG_CTL);
227
228 /* Issue the transfer */
229 cmd = SPI_CMD_START_IMMEDIATE;
230 cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
231 cmd |= (spi->chip_select << SPI_CMD_DEVICE_ID_SHIFT);
232 bcm_spi_writew(bs, cmd, SPI_CMD);
233 wait_for_completion(&bs->done);
234
235 /* Disable the CMD_DONE interrupt */
236 bcm_spi_writeb(bs, 0, SPI_INT_MASK);
237
238 return t->len - bs->remaining_bytes;
239}
240
241static int bcm63xx_transfer(struct spi_device *spi, struct spi_message *m)
242{
243 struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
244 struct spi_transfer *t;
245 int ret = 0;
246
247 if (unlikely(list_empty(&m->transfers)))
248 return -EINVAL;
249
250 if (bs->stopping)
251 return -ESHUTDOWN;
252
253 list_for_each_entry(t, &m->transfers, transfer_list) {
254 ret += bcm63xx_txrx_bufs(spi, t);
255 }
256
257 m->complete(m->context);
258
259 return ret;
260}
261
262/* This driver supports single master mode only. Hence
263 * CMD_DONE is the only interrupt we care about
264 */
265static irqreturn_t bcm63xx_spi_interrupt(int irq, void *dev_id)
266{
267 struct spi_master *master = (struct spi_master *)dev_id;
268 struct bcm63xx_spi *bs = spi_master_get_devdata(master);
269 u8 intr;
270 u16 cmd;
271
272 /* Read interupts and clear them immediately */
273 intr = bcm_spi_readb(bs, SPI_INT_STATUS);
274 bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
275 bcm_spi_writeb(bs, 0, SPI_INT_MASK);
276
277 /* A tansfer completed */
278 if (intr & SPI_INTR_CMD_DONE) {
279 u8 rx_tail;
280
281 rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL);
282
283 /* Read out all the data */
284 if (rx_tail)
285 memcpy_fromio(bs->rx_ptr, bs->rx_io, rx_tail);
286
287 /* See if there is more data to send */
288 if (bs->remaining_bytes > 0) {
289 bcm63xx_spi_fill_tx_fifo(bs);
290
291 /* Start the transfer */
292 bcm_spi_writew(bs, SPI_HD_W << SPI_MSG_TYPE_SHIFT,
293 SPI_MSG_CTL);
294 cmd = bcm_spi_readw(bs, SPI_CMD);
295 cmd |= SPI_CMD_START_IMMEDIATE;
296 cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
297 bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
298 bcm_spi_writew(bs, cmd, SPI_CMD);
299 } else {
300 complete(&bs->done);
301 }
302 }
303
304 return IRQ_HANDLED;
305}
306
307
308static int __devinit bcm63xx_spi_probe(struct platform_device *pdev)
309{
310 struct resource *r;
311 struct device *dev = &pdev->dev;
312 struct bcm63xx_spi_pdata *pdata = pdev->dev.platform_data;
313 int irq;
314 struct spi_master *master;
315 struct clk *clk;
316 struct bcm63xx_spi *bs;
317 int ret;
318
319 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
320 if (!r) {
321 dev_err(dev, "no iomem\n");
322 ret = -ENXIO;
323 goto out;
324 }
325
326 irq = platform_get_irq(pdev, 0);
327 if (irq < 0) {
328 dev_err(dev, "no irq\n");
329 ret = -ENXIO;
330 goto out;
331 }
332
333 clk = clk_get(dev, "spi");
334 if (IS_ERR(clk)) {
335 dev_err(dev, "no clock for device\n");
336 ret = PTR_ERR(clk);
337 goto out;
338 }
339
340 master = spi_alloc_master(dev, sizeof(*bs));
341 if (!master) {
342 dev_err(dev, "out of memory\n");
343 ret = -ENOMEM;
344 goto out_clk;
345 }
346
347 bs = spi_master_get_devdata(master);
348 init_completion(&bs->done);
349
350 platform_set_drvdata(pdev, master);
351 bs->pdev = pdev;
352
353 if (!devm_request_mem_region(&pdev->dev, r->start,
354 resource_size(r), PFX)) {
355 dev_err(dev, "iomem request failed\n");
356 ret = -ENXIO;
357 goto out_err;
358 }
359
360 bs->regs = devm_ioremap_nocache(&pdev->dev, r->start,
361 resource_size(r));
362 if (!bs->regs) {
363 dev_err(dev, "unable to ioremap regs\n");
364 ret = -ENOMEM;
365 goto out_err;
366 }
367
368 bs->irq = irq;
369 bs->clk = clk;
370 bs->fifo_size = pdata->fifo_size;
371
372 ret = devm_request_irq(&pdev->dev, irq, bcm63xx_spi_interrupt, 0,
373 pdev->name, master);
374 if (ret) {
375 dev_err(dev, "unable to request irq\n");
376 goto out_err;
377 }
378
379 master->bus_num = pdata->bus_num;
380 master->num_chipselect = pdata->num_chipselect;
381 master->setup = bcm63xx_spi_setup;
382 master->transfer = bcm63xx_transfer;
383 bs->speed_hz = pdata->speed_hz;
384 bs->stopping = 0;
385 bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA));
386 bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA));
387 spin_lock_init(&bs->lock);
388
389 /* Initialize hardware */
390 clk_enable(bs->clk);
391 bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
392
393 /* register and we are done */
394 ret = spi_register_master(master);
395 if (ret) {
396 dev_err(dev, "spi register failed\n");
397 goto out_clk_disable;
398 }
399
400 dev_info(dev, "at 0x%08x (irq %d, FIFOs size %d) v%s\n",
401 r->start, irq, bs->fifo_size, DRV_VER);
402
403 return 0;
404
405out_clk_disable:
406 clk_disable(clk);
407out_err:
408 platform_set_drvdata(pdev, NULL);
409 spi_master_put(master);
410out_clk:
411 clk_put(clk);
412out:
413 return ret;
414}
415
416static int __devexit bcm63xx_spi_remove(struct platform_device *pdev)
417{
418 struct spi_master *master = platform_get_drvdata(pdev);
419 struct bcm63xx_spi *bs = spi_master_get_devdata(master);
420
421 /* reset spi block */
422 bcm_spi_writeb(bs, 0, SPI_INT_MASK);
423 spin_lock(&bs->lock);
424 bs->stopping = 1;
425
426 /* HW shutdown */
427 clk_disable(bs->clk);
428 clk_put(bs->clk);
429
430 spin_unlock(&bs->lock);
431 platform_set_drvdata(pdev, 0);
432 spi_unregister_master(master);
433
434 return 0;
435}
436
437#ifdef CONFIG_PM
438static int bcm63xx_spi_suspend(struct device *dev)
439{
440 struct spi_master *master =
441 platform_get_drvdata(to_platform_device(dev));
442 struct bcm63xx_spi *bs = spi_master_get_devdata(master);
443
444 clk_disable(bs->clk);
445
446 return 0;
447}
448
449static int bcm63xx_spi_resume(struct device *dev)
450{
451 struct spi_master *master =
452 platform_get_drvdata(to_platform_device(dev));
453 struct bcm63xx_spi *bs = spi_master_get_devdata(master);
454
455 clk_enable(bs->clk);
456
457 return 0;
458}
459
460static const struct dev_pm_ops bcm63xx_spi_pm_ops = {
461 .suspend = bcm63xx_spi_suspend,
462 .resume = bcm63xx_spi_resume,
463};
464
465#define BCM63XX_SPI_PM_OPS (&bcm63xx_spi_pm_ops)
466#else
467#define BCM63XX_SPI_PM_OPS NULL
468#endif
469
470static struct platform_driver bcm63xx_spi_driver = {
471 .driver = {
472 .name = "bcm63xx-spi",
473 .owner = THIS_MODULE,
474 .pm = BCM63XX_SPI_PM_OPS,
475 },
476 .probe = bcm63xx_spi_probe,
477 .remove = __devexit_p(bcm63xx_spi_remove),
478};
479
480module_platform_driver(bcm63xx_spi_driver);
481
482MODULE_ALIAS("platform:bcm63xx_spi");
483MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
484MODULE_AUTHOR("Tanguy Bouzeloc <tanguy.bouzeloc@efixo.com>");
485MODULE_DESCRIPTION("Broadcom BCM63xx SPI Controller driver");
486MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
index f64250ea1611..14f7cc9523f0 100644
--- a/drivers/spi/spi-dw-pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -149,7 +149,7 @@ static int spi_resume(struct pci_dev *pdev)
149#define spi_resume NULL 149#define spi_resume NULL
150#endif 150#endif
151 151
152static const struct pci_device_id pci_ids[] __devinitdata = { 152static DEFINE_PCI_DEVICE_TABLE(pci_ids) = {
153 /* Intel MID platform SPI controller 0 */ 153 /* Intel MID platform SPI controller 0 */
154 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) }, 154 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0800) },
155 {}, 155 {},
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index d770f03705c3..7523a2429d09 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -180,18 +180,20 @@ static int fsl_espi_setup_transfer(struct spi_device *spi,
180 180
181 if ((mpc8xxx_spi->spibrg / hz) > 64) { 181 if ((mpc8xxx_spi->spibrg / hz) > 64) {
182 cs->hw_mode |= CSMODE_DIV16; 182 cs->hw_mode |= CSMODE_DIV16;
183 pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1; 183 pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 16 * 4);
184 184
185 WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " 185 WARN_ONCE(pm > 33, "%s: Requested speed is too low: %d Hz. "
186 "Will use %d Hz instead.\n", dev_name(&spi->dev), 186 "Will use %d Hz instead.\n", dev_name(&spi->dev),
187 hz, mpc8xxx_spi->spibrg / 1024); 187 hz, mpc8xxx_spi->spibrg / (4 * 16 * (32 + 1)));
188 if (pm > 16) 188 if (pm > 33)
189 pm = 16; 189 pm = 33;
190 } else { 190 } else {
191 pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; 191 pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 4);
192 } 192 }
193 if (pm) 193 if (pm)
194 pm--; 194 pm--;
195 if (pm < 2)
196 pm = 2;
195 197
196 cs->hw_mode |= CSMODE_PM(pm); 198 cs->hw_mode |= CSMODE_PM(pm);
197 199
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index c6e697f5e007..31054e3de4c1 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -793,13 +793,8 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
793 793
794 ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME); 794 ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
795 if (ret) { 795 if (ret) {
796 while (i > 0) {
797 i--;
798 if (spi_imx->chipselect[i] >= 0)
799 gpio_free(spi_imx->chipselect[i]);
800 }
801 dev_err(&pdev->dev, "can't get cs gpios\n"); 796 dev_err(&pdev->dev, "can't get cs gpios\n");
802 goto out_master_put; 797 goto out_gpio_free;
803 } 798 }
804 } 799 }
805 800
@@ -881,10 +876,10 @@ out_iounmap:
881out_release_mem: 876out_release_mem:
882 release_mem_region(res->start, resource_size(res)); 877 release_mem_region(res->start, resource_size(res));
883out_gpio_free: 878out_gpio_free:
884 for (i = 0; i < master->num_chipselect; i++) 879 while (--i >= 0) {
885 if (spi_imx->chipselect[i] >= 0) 880 if (spi_imx->chipselect[i] >= 0)
886 gpio_free(spi_imx->chipselect[i]); 881 gpio_free(spi_imx->chipselect[i]);
887out_master_put: 882 }
888 spi_master_put(master); 883 spi_master_put(master);
889 kfree(master); 884 kfree(master);
890 platform_set_drvdata(pdev, NULL); 885 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index 182e9c873822..dae8be229c5d 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -360,8 +360,6 @@ static int __devinit nuc900_spi_probe(struct platform_device *pdev)
360 } 360 }
361 361
362 hw = spi_master_get_devdata(master); 362 hw = spi_master_get_devdata(master);
363 memset(hw, 0, sizeof(struct nuc900_spi));
364
365 hw->master = spi_master_get(master); 363 hw->master = spi_master_get(master);
366 hw->pdata = pdev->dev.platform_data; 364 hw->pdata = pdev->dev.platform_data;
367 hw->dev = &pdev->dev; 365 hw->dev = &pdev->dev;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 0b0dfb71c640..bb9274c2526d 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -34,6 +34,8 @@
34#include <linux/io.h> 34#include <linux/io.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/pm_runtime.h> 36#include <linux/pm_runtime.h>
37#include <linux/of.h>
38#include <linux/of_device.h>
37 39
38#include <linux/spi/spi.h> 40#include <linux/spi/spi.h>
39 41
@@ -1079,15 +1081,39 @@ static int omap_mcspi_runtime_resume(struct device *dev)
1079 return 0; 1081 return 0;
1080} 1082}
1081 1083
1084static struct omap2_mcspi_platform_config omap2_pdata = {
1085 .regs_offset = 0,
1086};
1087
1088static struct omap2_mcspi_platform_config omap4_pdata = {
1089 .regs_offset = OMAP4_MCSPI_REG_OFFSET,
1090};
1091
1092static const struct of_device_id omap_mcspi_of_match[] = {
1093 {
1094 .compatible = "ti,omap2-mcspi",
1095 .data = &omap2_pdata,
1096 },
1097 {
1098 .compatible = "ti,omap4-mcspi",
1099 .data = &omap4_pdata,
1100 },
1101 { },
1102};
1103MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
1082 1104
1083static int __init omap2_mcspi_probe(struct platform_device *pdev) 1105static int __init omap2_mcspi_probe(struct platform_device *pdev)
1084{ 1106{
1085 struct spi_master *master; 1107 struct spi_master *master;
1086 struct omap2_mcspi_platform_config *pdata = pdev->dev.platform_data; 1108 struct omap2_mcspi_platform_config *pdata;
1087 struct omap2_mcspi *mcspi; 1109 struct omap2_mcspi *mcspi;
1088 struct resource *r; 1110 struct resource *r;
1089 int status = 0, i; 1111 int status = 0, i;
1090 char wq_name[20]; 1112 char wq_name[20];
1113 u32 regs_offset = 0;
1114 static int bus_num = 1;
1115 struct device_node *node = pdev->dev.of_node;
1116 const struct of_device_id *match;
1091 1117
1092 master = spi_alloc_master(&pdev->dev, sizeof *mcspi); 1118 master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
1093 if (master == NULL) { 1119 if (master == NULL) {
@@ -1098,13 +1124,26 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
1098 /* the spi->mode bits understood by this driver: */ 1124 /* the spi->mode bits understood by this driver: */
1099 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; 1125 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1100 1126
1101 if (pdev->id != -1)
1102 master->bus_num = pdev->id;
1103
1104 master->setup = omap2_mcspi_setup; 1127 master->setup = omap2_mcspi_setup;
1105 master->transfer = omap2_mcspi_transfer; 1128 master->transfer = omap2_mcspi_transfer;
1106 master->cleanup = omap2_mcspi_cleanup; 1129 master->cleanup = omap2_mcspi_cleanup;
1107 master->num_chipselect = pdata->num_cs; 1130 master->dev.of_node = node;
1131
1132 match = of_match_device(omap_mcspi_of_match, &pdev->dev);
1133 if (match) {
1134 u32 num_cs = 1; /* default number of chipselect */
1135 pdata = match->data;
1136
1137 of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
1138 master->num_chipselect = num_cs;
1139 master->bus_num = bus_num++;
1140 } else {
1141 pdata = pdev->dev.platform_data;
1142 master->num_chipselect = pdata->num_cs;
1143 if (pdev->id != -1)
1144 master->bus_num = pdev->id;
1145 }
1146 regs_offset = pdata->regs_offset;
1108 1147
1109 dev_set_drvdata(&pdev->dev, master); 1148 dev_set_drvdata(&pdev->dev, master);
1110 1149
@@ -1124,8 +1163,8 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
1124 goto free_master; 1163 goto free_master;
1125 } 1164 }
1126 1165
1127 r->start += pdata->regs_offset; 1166 r->start += regs_offset;
1128 r->end += pdata->regs_offset; 1167 r->end += regs_offset;
1129 mcspi->phys = r->start; 1168 mcspi->phys = r->start;
1130 if (!request_mem_region(r->start, resource_size(r), 1169 if (!request_mem_region(r->start, resource_size(r),
1131 dev_name(&pdev->dev))) { 1170 dev_name(&pdev->dev))) {
@@ -1285,7 +1324,8 @@ static struct platform_driver omap2_mcspi_driver = {
1285 .driver = { 1324 .driver = {
1286 .name = "omap2_mcspi", 1325 .name = "omap2_mcspi",
1287 .owner = THIS_MODULE, 1326 .owner = THIS_MODULE,
1288 .pm = &omap2_mcspi_pm_ops 1327 .pm = &omap2_mcspi_pm_ops,
1328 .of_match_table = omap_mcspi_of_match,
1289 }, 1329 },
1290 .remove = __exit_p(omap2_mcspi_remove), 1330 .remove = __exit_p(omap2_mcspi_remove),
1291}; 1331};
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index f37ad2271ad5..dc8485d1e883 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -29,7 +29,6 @@
29#include <linux/errno.h> 29#include <linux/errno.h>
30#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31#include <linux/spi/spi.h> 31#include <linux/spi/spi.h>
32#include <linux/workqueue.h>
33#include <linux/delay.h> 32#include <linux/delay.h>
34#include <linux/clk.h> 33#include <linux/clk.h>
35#include <linux/err.h> 34#include <linux/err.h>
@@ -330,12 +329,13 @@ struct vendor_data {
330 * @clk: outgoing clock "SPICLK" for the SPI bus 329 * @clk: outgoing clock "SPICLK" for the SPI bus
331 * @master: SPI framework hookup 330 * @master: SPI framework hookup
332 * @master_info: controller-specific data from machine setup 331 * @master_info: controller-specific data from machine setup
333 * @workqueue: a workqueue on which any spi_message request is queued 332 * @kworker: thread struct for message pump
334 * @pump_messages: work struct for scheduling work to the workqueue 333 * @kworker_task: pointer to task for message pump kworker thread
334 * @pump_messages: work struct for scheduling work to the message pump
335 * @queue_lock: spinlock to syncronise access to message queue 335 * @queue_lock: spinlock to syncronise access to message queue
336 * @queue: message queue 336 * @queue: message queue
337 * @busy: workqueue is busy 337 * @busy: message pump is busy
338 * @running: workqueue is running 338 * @running: message pump is running
339 * @pump_transfers: Tasklet used in Interrupt Transfer mode 339 * @pump_transfers: Tasklet used in Interrupt Transfer mode
340 * @cur_msg: Pointer to current spi_message being processed 340 * @cur_msg: Pointer to current spi_message being processed
341 * @cur_transfer: Pointer to current spi_transfer 341 * @cur_transfer: Pointer to current spi_transfer
@@ -365,14 +365,7 @@ struct pl022 {
365 struct clk *clk; 365 struct clk *clk;
366 struct spi_master *master; 366 struct spi_master *master;
367 struct pl022_ssp_controller *master_info; 367 struct pl022_ssp_controller *master_info;
368 /* Driver message queue */ 368 /* Message per-transfer pump */
369 struct workqueue_struct *workqueue;
370 struct work_struct pump_messages;
371 spinlock_t queue_lock;
372 struct list_head queue;
373 bool busy;
374 bool running;
375 /* Message transfer pump */
376 struct tasklet_struct pump_transfers; 369 struct tasklet_struct pump_transfers;
377 struct spi_message *cur_msg; 370 struct spi_message *cur_msg;
378 struct spi_transfer *cur_transfer; 371 struct spi_transfer *cur_transfer;
@@ -394,6 +387,7 @@ struct pl022 {
394 struct sg_table sgt_rx; 387 struct sg_table sgt_rx;
395 struct sg_table sgt_tx; 388 struct sg_table sgt_tx;
396 char *dummypage; 389 char *dummypage;
390 bool dma_running;
397#endif 391#endif
398}; 392};
399 393
@@ -448,8 +442,6 @@ static void null_cs_control(u32 command)
448static void giveback(struct pl022 *pl022) 442static void giveback(struct pl022 *pl022)
449{ 443{
450 struct spi_transfer *last_transfer; 444 struct spi_transfer *last_transfer;
451 unsigned long flags;
452 struct spi_message *msg;
453 pl022->next_msg_cs_active = false; 445 pl022->next_msg_cs_active = false;
454 446
455 last_transfer = list_entry(pl022->cur_msg->transfers.prev, 447 last_transfer = list_entry(pl022->cur_msg->transfers.prev,
@@ -477,15 +469,8 @@ static void giveback(struct pl022 *pl022)
477 * sent the current message could be unloaded, which 469 * sent the current message could be unloaded, which
478 * could invalidate the cs_control() callback... 470 * could invalidate the cs_control() callback...
479 */ 471 */
480
481 /* get a pointer to the next message, if any */ 472 /* get a pointer to the next message, if any */
482 spin_lock_irqsave(&pl022->queue_lock, flags); 473 next_msg = spi_get_next_queued_message(pl022->master);
483 if (list_empty(&pl022->queue))
484 next_msg = NULL;
485 else
486 next_msg = list_entry(pl022->queue.next,
487 struct spi_message, queue);
488 spin_unlock_irqrestore(&pl022->queue_lock, flags);
489 474
490 /* 475 /*
491 * see if the next and current messages point 476 * see if the next and current messages point
@@ -497,19 +482,13 @@ static void giveback(struct pl022 *pl022)
497 pl022->cur_chip->cs_control(SSP_CHIP_DESELECT); 482 pl022->cur_chip->cs_control(SSP_CHIP_DESELECT);
498 else 483 else
499 pl022->next_msg_cs_active = true; 484 pl022->next_msg_cs_active = true;
485
500 } 486 }
501 487
502 spin_lock_irqsave(&pl022->queue_lock, flags);
503 msg = pl022->cur_msg;
504 pl022->cur_msg = NULL; 488 pl022->cur_msg = NULL;
505 pl022->cur_transfer = NULL; 489 pl022->cur_transfer = NULL;
506 pl022->cur_chip = NULL; 490 pl022->cur_chip = NULL;
507 queue_work(pl022->workqueue, &pl022->pump_messages); 491 spi_finalize_current_message(pl022->master);
508 spin_unlock_irqrestore(&pl022->queue_lock, flags);
509
510 msg->state = NULL;
511 if (msg->complete)
512 msg->complete(msg->context);
513} 492}
514 493
515/** 494/**
@@ -1063,6 +1042,7 @@ static int configure_dma(struct pl022 *pl022)
1063 dmaengine_submit(txdesc); 1042 dmaengine_submit(txdesc);
1064 dma_async_issue_pending(rxchan); 1043 dma_async_issue_pending(rxchan);
1065 dma_async_issue_pending(txchan); 1044 dma_async_issue_pending(txchan);
1045 pl022->dma_running = true;
1066 1046
1067 return 0; 1047 return 0;
1068 1048
@@ -1141,11 +1121,12 @@ static void terminate_dma(struct pl022 *pl022)
1141 dmaengine_terminate_all(rxchan); 1121 dmaengine_terminate_all(rxchan);
1142 dmaengine_terminate_all(txchan); 1122 dmaengine_terminate_all(txchan);
1143 unmap_free_dma_scatter(pl022); 1123 unmap_free_dma_scatter(pl022);
1124 pl022->dma_running = false;
1144} 1125}
1145 1126
1146static void pl022_dma_remove(struct pl022 *pl022) 1127static void pl022_dma_remove(struct pl022 *pl022)
1147{ 1128{
1148 if (pl022->busy) 1129 if (pl022->dma_running)
1149 terminate_dma(pl022); 1130 terminate_dma(pl022);
1150 if (pl022->dma_tx_channel) 1131 if (pl022->dma_tx_channel)
1151 dma_release_channel(pl022->dma_tx_channel); 1132 dma_release_channel(pl022->dma_tx_channel);
@@ -1493,73 +1474,20 @@ out:
1493 return; 1474 return;
1494} 1475}
1495 1476
1496/** 1477static int pl022_transfer_one_message(struct spi_master *master,
1497 * pump_messages - Workqueue function which processes spi message queue 1478 struct spi_message *msg)
1498 * @data: pointer to private data of SSP driver
1499 *
1500 * This function checks if there is any spi message in the queue that
1501 * needs processing and delegate control to appropriate function
1502 * do_polling_transfer()/do_interrupt_dma_transfer()
1503 * based on the kind of the transfer
1504 *
1505 */
1506static void pump_messages(struct work_struct *work)
1507{ 1479{
1508 struct pl022 *pl022 = 1480 struct pl022 *pl022 = spi_master_get_devdata(master);
1509 container_of(work, struct pl022, pump_messages);
1510 unsigned long flags;
1511 bool was_busy = false;
1512
1513 /* Lock queue and check for queue work */
1514 spin_lock_irqsave(&pl022->queue_lock, flags);
1515 if (list_empty(&pl022->queue) || !pl022->running) {
1516 if (pl022->busy) {
1517 /* nothing more to do - disable spi/ssp and power off */
1518 writew((readw(SSP_CR1(pl022->virtbase)) &
1519 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
1520
1521 if (pl022->master_info->autosuspend_delay > 0) {
1522 pm_runtime_mark_last_busy(&pl022->adev->dev);
1523 pm_runtime_put_autosuspend(&pl022->adev->dev);
1524 } else {
1525 pm_runtime_put(&pl022->adev->dev);
1526 }
1527 }
1528 pl022->busy = false;
1529 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1530 return;
1531 }
1532
1533 /* Make sure we are not already running a message */
1534 if (pl022->cur_msg) {
1535 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1536 return;
1537 }
1538 /* Extract head of queue */
1539 pl022->cur_msg =
1540 list_entry(pl022->queue.next, struct spi_message, queue);
1541
1542 list_del_init(&pl022->cur_msg->queue);
1543 if (pl022->busy)
1544 was_busy = true;
1545 else
1546 pl022->busy = true;
1547 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1548 1481
1549 /* Initial message state */ 1482 /* Initial message state */
1550 pl022->cur_msg->state = STATE_START; 1483 pl022->cur_msg = msg;
1551 pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next, 1484 msg->state = STATE_START;
1552 struct spi_transfer, transfer_list); 1485
1486 pl022->cur_transfer = list_entry(msg->transfers.next,
1487 struct spi_transfer, transfer_list);
1553 1488
1554 /* Setup the SPI using the per chip configuration */ 1489 /* Setup the SPI using the per chip configuration */
1555 pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); 1490 pl022->cur_chip = spi_get_ctldata(msg->spi);
1556 if (!was_busy)
1557 /*
1558 * We enable the core voltage and clocks here, then the clocks
1559 * and core will be disabled when this workqueue is run again
1560 * and there is no more work to be done.
1561 */
1562 pm_runtime_get_sync(&pl022->adev->dev);
1563 1491
1564 restore_state(pl022); 1492 restore_state(pl022);
1565 flush(pl022); 1493 flush(pl022);
@@ -1568,95 +1496,37 @@ static void pump_messages(struct work_struct *work)
1568 do_polling_transfer(pl022); 1496 do_polling_transfer(pl022);
1569 else 1497 else
1570 do_interrupt_dma_transfer(pl022); 1498 do_interrupt_dma_transfer(pl022);
1571}
1572
1573static int __init init_queue(struct pl022 *pl022)
1574{
1575 INIT_LIST_HEAD(&pl022->queue);
1576 spin_lock_init(&pl022->queue_lock);
1577
1578 pl022->running = false;
1579 pl022->busy = false;
1580
1581 tasklet_init(&pl022->pump_transfers, pump_transfers,
1582 (unsigned long)pl022);
1583
1584 INIT_WORK(&pl022->pump_messages, pump_messages);
1585 pl022->workqueue = create_singlethread_workqueue(
1586 dev_name(pl022->master->dev.parent));
1587 if (pl022->workqueue == NULL)
1588 return -EBUSY;
1589 1499
1590 return 0; 1500 return 0;
1591} 1501}
1592 1502
1593static int start_queue(struct pl022 *pl022) 1503static int pl022_prepare_transfer_hardware(struct spi_master *master)
1594{ 1504{
1595 unsigned long flags; 1505 struct pl022 *pl022 = spi_master_get_devdata(master);
1596
1597 spin_lock_irqsave(&pl022->queue_lock, flags);
1598
1599 if (pl022->running || pl022->busy) {
1600 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1601 return -EBUSY;
1602 }
1603
1604 pl022->running = true;
1605 pl022->cur_msg = NULL;
1606 pl022->cur_transfer = NULL;
1607 pl022->cur_chip = NULL;
1608 pl022->next_msg_cs_active = false;
1609 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1610
1611 queue_work(pl022->workqueue, &pl022->pump_messages);
1612 1506
1507 /*
1508 * Just make sure we have all we need to run the transfer by syncing
1509 * with the runtime PM framework.
1510 */
1511 pm_runtime_get_sync(&pl022->adev->dev);
1613 return 0; 1512 return 0;
1614} 1513}
1615 1514
1616static int stop_queue(struct pl022 *pl022) 1515static int pl022_unprepare_transfer_hardware(struct spi_master *master)
1617{ 1516{
1618 unsigned long flags; 1517 struct pl022 *pl022 = spi_master_get_devdata(master);
1619 unsigned limit = 500;
1620 int status = 0;
1621 1518
1622 spin_lock_irqsave(&pl022->queue_lock, flags); 1519 /* nothing more to do - disable spi/ssp and power off */
1520 writew((readw(SSP_CR1(pl022->virtbase)) &
1521 (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
1623 1522
1624 /* This is a bit lame, but is optimized for the common execution path. 1523 if (pl022->master_info->autosuspend_delay > 0) {
1625 * A wait_queue on the pl022->busy could be used, but then the common 1524 pm_runtime_mark_last_busy(&pl022->adev->dev);
1626 * execution path (pump_messages) would be required to call wake_up or 1525 pm_runtime_put_autosuspend(&pl022->adev->dev);
1627 * friends on every SPI message. Do this instead */ 1526 } else {
1628 while ((!list_empty(&pl022->queue) || pl022->busy) && limit--) { 1527 pm_runtime_put(&pl022->adev->dev);
1629 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1630 msleep(10);
1631 spin_lock_irqsave(&pl022->queue_lock, flags);
1632 } 1528 }
1633 1529
1634 if (!list_empty(&pl022->queue) || pl022->busy)
1635 status = -EBUSY;
1636 else
1637 pl022->running = false;
1638
1639 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1640
1641 return status;
1642}
1643
1644static int destroy_queue(struct pl022 *pl022)
1645{
1646 int status;
1647
1648 status = stop_queue(pl022);
1649 /* we are unloading the module or failing to load (only two calls
1650 * to this routine), and neither call can handle a return value.
1651 * However, destroy_workqueue calls flush_workqueue, and that will
1652 * block until all work is done. If the reason that stop_queue
1653 * timed out is that the work will never finish, then it does no
1654 * good to call destroy_workqueue, so return anyway. */
1655 if (status != 0)
1656 return status;
1657
1658 destroy_workqueue(pl022->workqueue);
1659
1660 return 0; 1530 return 0;
1661} 1531}
1662 1532
@@ -1776,38 +1646,6 @@ static int verify_controller_parameters(struct pl022 *pl022,
1776 return 0; 1646 return 0;
1777} 1647}
1778 1648
1779/**
1780 * pl022_transfer - transfer function registered to SPI master framework
1781 * @spi: spi device which is requesting transfer
1782 * @msg: spi message which is to handled is queued to driver queue
1783 *
1784 * This function is registered to the SPI framework for this SPI master
1785 * controller. It will queue the spi_message in the queue of driver if
1786 * the queue is not stopped and return.
1787 */
1788static int pl022_transfer(struct spi_device *spi, struct spi_message *msg)
1789{
1790 struct pl022 *pl022 = spi_master_get_devdata(spi->master);
1791 unsigned long flags;
1792
1793 spin_lock_irqsave(&pl022->queue_lock, flags);
1794
1795 if (!pl022->running) {
1796 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1797 return -ESHUTDOWN;
1798 }
1799 msg->actual_length = 0;
1800 msg->status = -EINPROGRESS;
1801 msg->state = STATE_START;
1802
1803 list_add_tail(&msg->queue, &pl022->queue);
1804 if (pl022->running && !pl022->busy)
1805 queue_work(pl022->workqueue, &pl022->pump_messages);
1806
1807 spin_unlock_irqrestore(&pl022->queue_lock, flags);
1808 return 0;
1809}
1810
1811static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr) 1649static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr)
1812{ 1650{
1813 return rate / (cpsdvsr * (1 + scr)); 1651 return rate / (cpsdvsr * (1 + scr));
@@ -2170,7 +2008,10 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2170 master->num_chipselect = platform_info->num_chipselect; 2008 master->num_chipselect = platform_info->num_chipselect;
2171 master->cleanup = pl022_cleanup; 2009 master->cleanup = pl022_cleanup;
2172 master->setup = pl022_setup; 2010 master->setup = pl022_setup;
2173 master->transfer = pl022_transfer; 2011 master->prepare_transfer_hardware = pl022_prepare_transfer_hardware;
2012 master->transfer_one_message = pl022_transfer_one_message;
2013 master->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware;
2014 master->rt = platform_info->rt;
2174 2015
2175 /* 2016 /*
2176 * Supports mode 0-3, loopback, and active low CS. Transfers are 2017 * Supports mode 0-3, loopback, and active low CS. Transfers are
@@ -2214,6 +2055,10 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2214 goto err_no_clk_en; 2055 goto err_no_clk_en;
2215 } 2056 }
2216 2057
2058 /* Initialize transfer pump */
2059 tasklet_init(&pl022->pump_transfers, pump_transfers,
2060 (unsigned long)pl022);
2061
2217 /* Disable SSP */ 2062 /* Disable SSP */
2218 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), 2063 writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
2219 SSP_CR1(pl022->virtbase)); 2064 SSP_CR1(pl022->virtbase));
@@ -2233,17 +2078,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2233 platform_info->enable_dma = 0; 2078 platform_info->enable_dma = 0;
2234 } 2079 }
2235 2080
2236 /* Initialize and start queue */
2237 status = init_queue(pl022);
2238 if (status != 0) {
2239 dev_err(&adev->dev, "probe - problem initializing queue\n");
2240 goto err_init_queue;
2241 }
2242 status = start_queue(pl022);
2243 if (status != 0) {
2244 dev_err(&adev->dev, "probe - problem starting queue\n");
2245 goto err_start_queue;
2246 }
2247 /* Register with the SPI framework */ 2081 /* Register with the SPI framework */
2248 amba_set_drvdata(adev, pl022); 2082 amba_set_drvdata(adev, pl022);
2249 status = spi_register_master(master); 2083 status = spi_register_master(master);
@@ -2269,9 +2103,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
2269 return 0; 2103 return 0;
2270 2104
2271 err_spi_register: 2105 err_spi_register:
2272 err_start_queue:
2273 err_init_queue:
2274 destroy_queue(pl022);
2275 if (platform_info->enable_dma) 2106 if (platform_info->enable_dma)
2276 pl022_dma_remove(pl022); 2107 pl022_dma_remove(pl022);
2277 2108
@@ -2307,9 +2138,6 @@ pl022_remove(struct amba_device *adev)
2307 */ 2138 */
2308 pm_runtime_get_noresume(&adev->dev); 2139 pm_runtime_get_noresume(&adev->dev);
2309 2140
2310 /* Remove the queue */
2311 if (destroy_queue(pl022) != 0)
2312 dev_err(&adev->dev, "queue remove failed\n");
2313 load_ssp_default_config(pl022); 2141 load_ssp_default_config(pl022);
2314 if (pl022->master_info->enable_dma) 2142 if (pl022->master_info->enable_dma)
2315 pl022_dma_remove(pl022); 2143 pl022_dma_remove(pl022);
@@ -2331,12 +2159,12 @@ pl022_remove(struct amba_device *adev)
2331static int pl022_suspend(struct device *dev) 2159static int pl022_suspend(struct device *dev)
2332{ 2160{
2333 struct pl022 *pl022 = dev_get_drvdata(dev); 2161 struct pl022 *pl022 = dev_get_drvdata(dev);
2334 int status = 0; 2162 int ret;
2335 2163
2336 status = stop_queue(pl022); 2164 ret = spi_master_suspend(pl022->master);
2337 if (status) { 2165 if (ret) {
2338 dev_warn(dev, "suspend cannot stop queue\n"); 2166 dev_warn(dev, "cannot suspend master\n");
2339 return status; 2167 return ret;
2340 } 2168 }
2341 2169
2342 dev_dbg(dev, "suspended\n"); 2170 dev_dbg(dev, "suspended\n");
@@ -2346,16 +2174,16 @@ static int pl022_suspend(struct device *dev)
2346static int pl022_resume(struct device *dev) 2174static int pl022_resume(struct device *dev)
2347{ 2175{
2348 struct pl022 *pl022 = dev_get_drvdata(dev); 2176 struct pl022 *pl022 = dev_get_drvdata(dev);
2349 int status = 0; 2177 int ret;
2350 2178
2351 /* Start the queue running */ 2179 /* Start the queue running */
2352 status = start_queue(pl022); 2180 ret = spi_master_resume(pl022->master);
2353 if (status) 2181 if (ret)
2354 dev_err(dev, "problem starting queue (%d)\n", status); 2182 dev_err(dev, "problem starting queue (%d)\n", ret);
2355 else 2183 else
2356 dev_dbg(dev, "resumed\n"); 2184 dev_dbg(dev, "resumed\n");
2357 2185
2358 return status; 2186 return ret;
2359} 2187}
2360#endif /* CONFIG_PM */ 2188#endif /* CONFIG_PM */
2361 2189
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 8caa07d58e69..3fb44afe27b4 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -151,7 +151,7 @@ static void __devexit ce4100_spi_remove(struct pci_dev *dev)
151 kfree(spi_info); 151 kfree(spi_info);
152} 152}
153 153
154static struct pci_device_id ce4100_spi_devices[] __devinitdata = { 154static DEFINE_PCI_DEVICE_TABLE(ce4100_spi_devices) = {
155 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) }, 155 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) },
156 { }, 156 { },
157}; 157};
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
new file mode 100644
index 000000000000..354f170eab95
--- /dev/null
+++ b/drivers/spi/spi-rspi.c
@@ -0,0 +1,521 @@
1/*
2 * SH RSPI driver
3 *
4 * Copyright (C) 2012 Renesas Solutions Corp.
5 *
6 * Based on spi-sh.c:
7 * Copyright (C) 2011 Renesas Solutions Corp.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/sched.h>
27#include <linux/errno.h>
28#include <linux/list.h>
29#include <linux/workqueue.h>
30#include <linux/interrupt.h>
31#include <linux/platform_device.h>
32#include <linux/io.h>
33#include <linux/clk.h>
34#include <linux/spi/spi.h>
35
36#define RSPI_SPCR 0x00
37#define RSPI_SSLP 0x01
38#define RSPI_SPPCR 0x02
39#define RSPI_SPSR 0x03
40#define RSPI_SPDR 0x04
41#define RSPI_SPSCR 0x08
42#define RSPI_SPSSR 0x09
43#define RSPI_SPBR 0x0a
44#define RSPI_SPDCR 0x0b
45#define RSPI_SPCKD 0x0c
46#define RSPI_SSLND 0x0d
47#define RSPI_SPND 0x0e
48#define RSPI_SPCR2 0x0f
49#define RSPI_SPCMD0 0x10
50#define RSPI_SPCMD1 0x12
51#define RSPI_SPCMD2 0x14
52#define RSPI_SPCMD3 0x16
53#define RSPI_SPCMD4 0x18
54#define RSPI_SPCMD5 0x1a
55#define RSPI_SPCMD6 0x1c
56#define RSPI_SPCMD7 0x1e
57
58/* SPCR */
59#define SPCR_SPRIE 0x80
60#define SPCR_SPE 0x40
61#define SPCR_SPTIE 0x20
62#define SPCR_SPEIE 0x10
63#define SPCR_MSTR 0x08
64#define SPCR_MODFEN 0x04
65#define SPCR_TXMD 0x02
66#define SPCR_SPMS 0x01
67
68/* SSLP */
69#define SSLP_SSL1P 0x02
70#define SSLP_SSL0P 0x01
71
72/* SPPCR */
73#define SPPCR_MOIFE 0x20
74#define SPPCR_MOIFV 0x10
75#define SPPCR_SPOM 0x04
76#define SPPCR_SPLP2 0x02
77#define SPPCR_SPLP 0x01
78
79/* SPSR */
80#define SPSR_SPRF 0x80
81#define SPSR_SPTEF 0x20
82#define SPSR_PERF 0x08
83#define SPSR_MODF 0x04
84#define SPSR_IDLNF 0x02
85#define SPSR_OVRF 0x01
86
87/* SPSCR */
88#define SPSCR_SPSLN_MASK 0x07
89
90/* SPSSR */
91#define SPSSR_SPECM_MASK 0x70
92#define SPSSR_SPCP_MASK 0x07
93
94/* SPDCR */
95#define SPDCR_SPLW 0x20
96#define SPDCR_SPRDTD 0x10
97#define SPDCR_SLSEL1 0x08
98#define SPDCR_SLSEL0 0x04
99#define SPDCR_SLSEL_MASK 0x0c
100#define SPDCR_SPFC1 0x02
101#define SPDCR_SPFC0 0x01
102
103/* SPCKD */
104#define SPCKD_SCKDL_MASK 0x07
105
106/* SSLND */
107#define SSLND_SLNDL_MASK 0x07
108
109/* SPND */
110#define SPND_SPNDL_MASK 0x07
111
112/* SPCR2 */
113#define SPCR2_PTE 0x08
114#define SPCR2_SPIE 0x04
115#define SPCR2_SPOE 0x02
116#define SPCR2_SPPE 0x01
117
118/* SPCMDn */
119#define SPCMD_SCKDEN 0x8000
120#define SPCMD_SLNDEN 0x4000
121#define SPCMD_SPNDEN 0x2000
122#define SPCMD_LSBF 0x1000
123#define SPCMD_SPB_MASK 0x0f00
124#define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK)
125#define SPCMD_SPB_20BIT 0x0000
126#define SPCMD_SPB_24BIT 0x0100
127#define SPCMD_SPB_32BIT 0x0200
128#define SPCMD_SSLKP 0x0080
129#define SPCMD_SSLA_MASK 0x0030
130#define SPCMD_BRDV_MASK 0x000c
131#define SPCMD_CPOL 0x0002
132#define SPCMD_CPHA 0x0001
133
134struct rspi_data {
135 void __iomem *addr;
136 u32 max_speed_hz;
137 struct spi_master *master;
138 struct list_head queue;
139 struct work_struct ws;
140 wait_queue_head_t wait;
141 spinlock_t lock;
142 struct clk *clk;
143 unsigned char spsr;
144};
145
146static void rspi_write8(struct rspi_data *rspi, u8 data, u16 offset)
147{
148 iowrite8(data, rspi->addr + offset);
149}
150
151static void rspi_write16(struct rspi_data *rspi, u16 data, u16 offset)
152{
153 iowrite16(data, rspi->addr + offset);
154}
155
156static u8 rspi_read8(struct rspi_data *rspi, u16 offset)
157{
158 return ioread8(rspi->addr + offset);
159}
160
161static u16 rspi_read16(struct rspi_data *rspi, u16 offset)
162{
163 return ioread16(rspi->addr + offset);
164}
165
166static unsigned char rspi_calc_spbr(struct rspi_data *rspi)
167{
168 int tmp;
169 unsigned char spbr;
170
171 tmp = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1;
172 spbr = clamp(tmp, 0, 255);
173
174 return spbr;
175}
176
177static void rspi_enable_irq(struct rspi_data *rspi, u8 enable)
178{
179 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
180}
181
182static void rspi_disable_irq(struct rspi_data *rspi, u8 disable)
183{
184 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
185}
186
187static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
188 u8 enable_bit)
189{
190 int ret;
191
192 rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
193 rspi_enable_irq(rspi, enable_bit);
194 ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
195 if (ret == 0 && !(rspi->spsr & wait_mask))
196 return -ETIMEDOUT;
197
198 return 0;
199}
200
201static void rspi_assert_ssl(struct rspi_data *rspi)
202{
203 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
204}
205
206static void rspi_negate_ssl(struct rspi_data *rspi)
207{
208 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
209}
210
211static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
212{
213 /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
214 rspi_write8(rspi, 0x00, RSPI_SPPCR);
215
216 /* Sets transfer bit rate */
217 rspi_write8(rspi, rspi_calc_spbr(rspi), RSPI_SPBR);
218
219 /* Sets number of frames to be used: 1 frame */
220 rspi_write8(rspi, 0x00, RSPI_SPDCR);
221
222 /* Sets RSPCK, SSL, next-access delay value */
223 rspi_write8(rspi, 0x00, RSPI_SPCKD);
224 rspi_write8(rspi, 0x00, RSPI_SSLND);
225 rspi_write8(rspi, 0x00, RSPI_SPND);
226
227 /* Sets parity, interrupt mask */
228 rspi_write8(rspi, 0x00, RSPI_SPCR2);
229
230 /* Sets SPCMD */
231 rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | SPCMD_SSLKP,
232 RSPI_SPCMD0);
233
234 /* Sets RSPI mode */
235 rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
236
237 return 0;
238}
239
240static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
241 struct spi_transfer *t)
242{
243 int remain = t->len;
244 u8 *data;
245
246 data = (u8 *)t->tx_buf;
247 while (remain > 0) {
248 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD,
249 RSPI_SPCR);
250
251 if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
252 dev_err(&rspi->master->dev,
253 "%s: tx empty timeout\n", __func__);
254 return -ETIMEDOUT;
255 }
256
257 rspi_write16(rspi, *data, RSPI_SPDR);
258 data++;
259 remain--;
260 }
261
262 /* Waiting for the last transmition */
263 rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
264
265 return 0;
266}
267
268static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
269 struct spi_transfer *t)
270{
271 int remain = t->len;
272 u8 *data;
273 unsigned char spsr;
274
275 spsr = rspi_read8(rspi, RSPI_SPSR);
276 if (spsr & SPSR_SPRF)
277 rspi_read16(rspi, RSPI_SPDR); /* dummy read */
278 if (spsr & SPSR_OVRF)
279 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
280 RSPI_SPCR);
281
282 data = (u8 *)t->rx_buf;
283 while (remain > 0) {
284 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD,
285 RSPI_SPCR);
286
287 if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
288 dev_err(&rspi->master->dev,
289 "%s: tx empty timeout\n", __func__);
290 return -ETIMEDOUT;
291 }
292 /* dummy write for generate clock */
293 rspi_write16(rspi, 0x00, RSPI_SPDR);
294
295 if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
296 dev_err(&rspi->master->dev,
297 "%s: receive timeout\n", __func__);
298 return -ETIMEDOUT;
299 }
300 /* SPDR allows 16 or 32-bit access only */
301 *data = (u8)rspi_read16(rspi, RSPI_SPDR);
302
303 data++;
304 remain--;
305 }
306
307 return 0;
308}
309
310static void rspi_work(struct work_struct *work)
311{
312 struct rspi_data *rspi = container_of(work, struct rspi_data, ws);
313 struct spi_message *mesg;
314 struct spi_transfer *t;
315 unsigned long flags;
316 int ret;
317
318 spin_lock_irqsave(&rspi->lock, flags);
319 while (!list_empty(&rspi->queue)) {
320 mesg = list_entry(rspi->queue.next, struct spi_message, queue);
321 list_del_init(&mesg->queue);
322 spin_unlock_irqrestore(&rspi->lock, flags);
323
324 rspi_assert_ssl(rspi);
325
326 list_for_each_entry(t, &mesg->transfers, transfer_list) {
327 if (t->tx_buf) {
328 ret = rspi_send_pio(rspi, mesg, t);
329 if (ret < 0)
330 goto error;
331 }
332 if (t->rx_buf) {
333 ret = rspi_receive_pio(rspi, mesg, t);
334 if (ret < 0)
335 goto error;
336 }
337 mesg->actual_length += t->len;
338 }
339 rspi_negate_ssl(rspi);
340
341 mesg->status = 0;
342 mesg->complete(mesg->context);
343
344 spin_lock_irqsave(&rspi->lock, flags);
345 }
346
347 return;
348
349error:
350 mesg->status = ret;
351 mesg->complete(mesg->context);
352}
353
354static int rspi_setup(struct spi_device *spi)
355{
356 struct rspi_data *rspi = spi_master_get_devdata(spi->master);
357
358 if (!spi->bits_per_word)
359 spi->bits_per_word = 8;
360 rspi->max_speed_hz = spi->max_speed_hz;
361
362 rspi_set_config_register(rspi, 8);
363
364 return 0;
365}
366
367static int rspi_transfer(struct spi_device *spi, struct spi_message *mesg)
368{
369 struct rspi_data *rspi = spi_master_get_devdata(spi->master);
370 unsigned long flags;
371
372 mesg->actual_length = 0;
373 mesg->status = -EINPROGRESS;
374
375 spin_lock_irqsave(&rspi->lock, flags);
376 list_add_tail(&mesg->queue, &rspi->queue);
377 schedule_work(&rspi->ws);
378 spin_unlock_irqrestore(&rspi->lock, flags);
379
380 return 0;
381}
382
383static void rspi_cleanup(struct spi_device *spi)
384{
385}
386
387static irqreturn_t rspi_irq(int irq, void *_sr)
388{
389 struct rspi_data *rspi = (struct rspi_data *)_sr;
390 unsigned long spsr;
391 irqreturn_t ret = IRQ_NONE;
392 unsigned char disable_irq = 0;
393
394 rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
395 if (spsr & SPSR_SPRF)
396 disable_irq |= SPCR_SPRIE;
397 if (spsr & SPSR_SPTEF)
398 disable_irq |= SPCR_SPTIE;
399
400 if (disable_irq) {
401 ret = IRQ_HANDLED;
402 rspi_disable_irq(rspi, disable_irq);
403 wake_up(&rspi->wait);
404 }
405
406 return ret;
407}
408
409static int __devexit rspi_remove(struct platform_device *pdev)
410{
411 struct rspi_data *rspi = dev_get_drvdata(&pdev->dev);
412
413 spi_unregister_master(rspi->master);
414 free_irq(platform_get_irq(pdev, 0), rspi);
415 clk_put(rspi->clk);
416 iounmap(rspi->addr);
417 spi_master_put(rspi->master);
418
419 return 0;
420}
421
422static int __devinit rspi_probe(struct platform_device *pdev)
423{
424 struct resource *res;
425 struct spi_master *master;
426 struct rspi_data *rspi;
427 int ret, irq;
428 char clk_name[16];
429
430 /* get base addr */
431 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
432 if (unlikely(res == NULL)) {
433 dev_err(&pdev->dev, "invalid resource\n");
434 return -EINVAL;
435 }
436
437 irq = platform_get_irq(pdev, 0);
438 if (irq < 0) {
439 dev_err(&pdev->dev, "platform_get_irq error\n");
440 return -ENODEV;
441 }
442
443 master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
444 if (master == NULL) {
445 dev_err(&pdev->dev, "spi_alloc_master error.\n");
446 return -ENOMEM;
447 }
448
449 rspi = spi_master_get_devdata(master);
450 dev_set_drvdata(&pdev->dev, rspi);
451
452 rspi->master = master;
453 rspi->addr = ioremap(res->start, resource_size(res));
454 if (rspi->addr == NULL) {
455 dev_err(&pdev->dev, "ioremap error.\n");
456 ret = -ENOMEM;
457 goto error1;
458 }
459
460 snprintf(clk_name, sizeof(clk_name), "rspi%d", pdev->id);
461 rspi->clk = clk_get(&pdev->dev, clk_name);
462 if (IS_ERR(rspi->clk)) {
463 dev_err(&pdev->dev, "cannot get clock\n");
464 ret = PTR_ERR(rspi->clk);
465 goto error2;
466 }
467 clk_enable(rspi->clk);
468
469 INIT_LIST_HEAD(&rspi->queue);
470 spin_lock_init(&rspi->lock);
471 INIT_WORK(&rspi->ws, rspi_work);
472 init_waitqueue_head(&rspi->wait);
473
474 master->num_chipselect = 2;
475 master->bus_num = pdev->id;
476 master->setup = rspi_setup;
477 master->transfer = rspi_transfer;
478 master->cleanup = rspi_cleanup;
479
480 ret = request_irq(irq, rspi_irq, 0, dev_name(&pdev->dev), rspi);
481 if (ret < 0) {
482 dev_err(&pdev->dev, "request_irq error\n");
483 goto error3;
484 }
485
486 ret = spi_register_master(master);
487 if (ret < 0) {
488 dev_err(&pdev->dev, "spi_register_master error.\n");
489 goto error4;
490 }
491
492 dev_info(&pdev->dev, "probed\n");
493
494 return 0;
495
496error4:
497 free_irq(irq, rspi);
498error3:
499 clk_put(rspi->clk);
500error2:
501 iounmap(rspi->addr);
502error1:
503 spi_master_put(master);
504
505 return ret;
506}
507
508static struct platform_driver rspi_driver = {
509 .probe = rspi_probe,
510 .remove = __devexit_p(rspi_remove),
511 .driver = {
512 .name = "rspi",
513 .owner = THIS_MODULE,
514 },
515};
516module_platform_driver(rspi_driver);
517
518MODULE_DESCRIPTION("Renesas RSPI bus driver");
519MODULE_LICENSE("GPL v2");
520MODULE_AUTHOR("Yoshihiro Shimoda");
521MODULE_ALIAS("platform:rspi");
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index dcf7e1006426..972a94c58be3 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -20,10 +20,12 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/workqueue.h> 22#include <linux/workqueue.h>
23#include <linux/interrupt.h>
23#include <linux/delay.h> 24#include <linux/delay.h>
24#include <linux/clk.h> 25#include <linux/clk.h>
25#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
26#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/pm_runtime.h>
27#include <linux/spi/spi.h> 29#include <linux/spi/spi.h>
28 30
29#include <mach/dma.h> 31#include <mach/dma.h>
@@ -126,8 +128,6 @@
126 128
127#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) 129#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
128 130
129#define SUSPND (1<<0)
130#define SPIBUSY (1<<1)
131#define RXBUSY (1<<2) 131#define RXBUSY (1<<2)
132#define TXBUSY (1<<3) 132#define TXBUSY (1<<3)
133 133
@@ -142,10 +142,8 @@ struct s3c64xx_spi_dma_data {
142 * @clk: Pointer to the spi clock. 142 * @clk: Pointer to the spi clock.
143 * @src_clk: Pointer to the clock used to generate SPI signals. 143 * @src_clk: Pointer to the clock used to generate SPI signals.
144 * @master: Pointer to the SPI Protocol master. 144 * @master: Pointer to the SPI Protocol master.
145 * @workqueue: Work queue for the SPI xfer requests.
146 * @cntrlr_info: Platform specific data for the controller this driver manages. 145 * @cntrlr_info: Platform specific data for the controller this driver manages.
147 * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint. 146 * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
148 * @work: Work
149 * @queue: To log SPI xfer requests. 147 * @queue: To log SPI xfer requests.
150 * @lock: Controller specific lock. 148 * @lock: Controller specific lock.
151 * @state: Set of FLAGS to indicate status. 149 * @state: Set of FLAGS to indicate status.
@@ -153,6 +151,7 @@ struct s3c64xx_spi_dma_data {
153 * @tx_dmach: Controller's DMA channel for Tx. 151 * @tx_dmach: Controller's DMA channel for Tx.
154 * @sfr_start: BUS address of SPI controller regs. 152 * @sfr_start: BUS address of SPI controller regs.
155 * @regs: Pointer to ioremap'ed controller registers. 153 * @regs: Pointer to ioremap'ed controller registers.
154 * @irq: interrupt
156 * @xfer_completion: To indicate completion of xfer task. 155 * @xfer_completion: To indicate completion of xfer task.
157 * @cur_mode: Stores the active configuration of the controller. 156 * @cur_mode: Stores the active configuration of the controller.
158 * @cur_bpw: Stores the active bits per word settings. 157 * @cur_bpw: Stores the active bits per word settings.
@@ -164,10 +163,8 @@ struct s3c64xx_spi_driver_data {
164 struct clk *src_clk; 163 struct clk *src_clk;
165 struct platform_device *pdev; 164 struct platform_device *pdev;
166 struct spi_master *master; 165 struct spi_master *master;
167 struct workqueue_struct *workqueue;
168 struct s3c64xx_spi_info *cntrlr_info; 166 struct s3c64xx_spi_info *cntrlr_info;
169 struct spi_device *tgl_spi; 167 struct spi_device *tgl_spi;
170 struct work_struct work;
171 struct list_head queue; 168 struct list_head queue;
172 spinlock_t lock; 169 spinlock_t lock;
173 unsigned long sfr_start; 170 unsigned long sfr_start;
@@ -239,7 +236,7 @@ static void s3c64xx_spi_dmacb(void *data)
239 struct s3c64xx_spi_dma_data *dma = data; 236 struct s3c64xx_spi_dma_data *dma = data;
240 unsigned long flags; 237 unsigned long flags;
241 238
242 if (dma->direction == DMA_FROM_DEVICE) 239 if (dma->direction == DMA_DEV_TO_MEM)
243 sdd = container_of(data, 240 sdd = container_of(data,
244 struct s3c64xx_spi_driver_data, rx_dma); 241 struct s3c64xx_spi_driver_data, rx_dma);
245 else 242 else
@@ -248,7 +245,7 @@ static void s3c64xx_spi_dmacb(void *data)
248 245
249 spin_lock_irqsave(&sdd->lock, flags); 246 spin_lock_irqsave(&sdd->lock, flags);
250 247
251 if (dma->direction == DMA_FROM_DEVICE) { 248 if (dma->direction == DMA_DEV_TO_MEM) {
252 sdd->state &= ~RXBUSY; 249 sdd->state &= ~RXBUSY;
253 if (!(sdd->state & TXBUSY)) 250 if (!(sdd->state & TXBUSY))
254 complete(&sdd->xfer_completion); 251 complete(&sdd->xfer_completion);
@@ -267,7 +264,7 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
267 struct s3c64xx_spi_driver_data *sdd; 264 struct s3c64xx_spi_driver_data *sdd;
268 struct samsung_dma_prep_info info; 265 struct samsung_dma_prep_info info;
269 266
270 if (dma->direction == DMA_FROM_DEVICE) 267 if (dma->direction == DMA_DEV_TO_MEM)
271 sdd = container_of((void *)dma, 268 sdd = container_of((void *)dma,
272 struct s3c64xx_spi_driver_data, rx_dma); 269 struct s3c64xx_spi_driver_data, rx_dma);
273 else 270 else
@@ -634,9 +631,10 @@ static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
634 } 631 }
635} 632}
636 633
637static void handle_msg(struct s3c64xx_spi_driver_data *sdd, 634static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
638 struct spi_message *msg) 635 struct spi_message *msg)
639{ 636{
637 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
640 struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 638 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
641 struct spi_device *spi = msg->spi; 639 struct spi_device *spi = msg->spi;
642 struct s3c64xx_spi_csinfo *cs = spi->controller_data; 640 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
@@ -766,73 +764,33 @@ out:
766 764
767 msg->status = status; 765 msg->status = status;
768 766
769 if (msg->complete) 767 spi_finalize_current_message(master);
770 msg->complete(msg->context); 768
769 return 0;
771} 770}
772 771
773static void s3c64xx_spi_work(struct work_struct *work) 772static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
774{ 773{
775 struct s3c64xx_spi_driver_data *sdd = container_of(work, 774 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
776 struct s3c64xx_spi_driver_data, work);
777 unsigned long flags;
778 775
779 /* Acquire DMA channels */ 776 /* Acquire DMA channels */
780 while (!acquire_dma(sdd)) 777 while (!acquire_dma(sdd))
781 msleep(10); 778 msleep(10);
782 779
783 spin_lock_irqsave(&sdd->lock, flags); 780 pm_runtime_get_sync(&sdd->pdev->dev);
784
785 while (!list_empty(&sdd->queue)
786 && !(sdd->state & SUSPND)) {
787
788 struct spi_message *msg;
789
790 msg = container_of(sdd->queue.next, struct spi_message, queue);
791
792 list_del_init(&msg->queue);
793
794 /* Set Xfer busy flag */
795 sdd->state |= SPIBUSY;
796
797 spin_unlock_irqrestore(&sdd->lock, flags);
798
799 handle_msg(sdd, msg);
800 781
801 spin_lock_irqsave(&sdd->lock, flags); 782 return 0;
802 783}
803 sdd->state &= ~SPIBUSY;
804 }
805 784
806 spin_unlock_irqrestore(&sdd->lock, flags); 785static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
786{
787 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
807 788
808 /* Free DMA channels */ 789 /* Free DMA channels */
809 sdd->ops->release(sdd->rx_dma.ch, &s3c64xx_spi_dma_client); 790 sdd->ops->release(sdd->rx_dma.ch, &s3c64xx_spi_dma_client);
810 sdd->ops->release(sdd->tx_dma.ch, &s3c64xx_spi_dma_client); 791 sdd->ops->release(sdd->tx_dma.ch, &s3c64xx_spi_dma_client);
811}
812 792
813static int s3c64xx_spi_transfer(struct spi_device *spi, 793 pm_runtime_put(&sdd->pdev->dev);
814 struct spi_message *msg)
815{
816 struct s3c64xx_spi_driver_data *sdd;
817 unsigned long flags;
818
819 sdd = spi_master_get_devdata(spi->master);
820
821 spin_lock_irqsave(&sdd->lock, flags);
822
823 if (sdd->state & SUSPND) {
824 spin_unlock_irqrestore(&sdd->lock, flags);
825 return -ESHUTDOWN;
826 }
827
828 msg->status = -EINPROGRESS;
829 msg->actual_length = 0;
830
831 list_add_tail(&msg->queue, &sdd->queue);
832
833 queue_work(sdd->workqueue, &sdd->work);
834
835 spin_unlock_irqrestore(&sdd->lock, flags);
836 794
837 return 0; 795 return 0;
838} 796}
@@ -872,13 +830,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
872 } 830 }
873 } 831 }
874 832
875 if (sdd->state & SUSPND) {
876 spin_unlock_irqrestore(&sdd->lock, flags);
877 dev_err(&spi->dev,
878 "setup: SPI-%d not active!\n", spi->master->bus_num);
879 return -ESHUTDOWN;
880 }
881
882 spin_unlock_irqrestore(&sdd->lock, flags); 833 spin_unlock_irqrestore(&sdd->lock, flags);
883 834
884 if (spi->bits_per_word != 8 835 if (spi->bits_per_word != 8
@@ -890,6 +841,8 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
890 goto setup_exit; 841 goto setup_exit;
891 } 842 }
892 843
844 pm_runtime_get_sync(&sdd->pdev->dev);
845
893 /* Check if we can provide the requested rate */ 846 /* Check if we can provide the requested rate */
894 if (!sci->clk_from_cmu) { 847 if (!sci->clk_from_cmu) {
895 u32 psr, speed; 848 u32 psr, speed;
@@ -922,6 +875,8 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
922 err = -EINVAL; 875 err = -EINVAL;
923 } 876 }
924 877
878 pm_runtime_put(&sdd->pdev->dev);
879
925setup_exit: 880setup_exit:
926 881
927 /* setup() returns with device de-selected */ 882 /* setup() returns with device de-selected */
@@ -930,6 +885,33 @@ setup_exit:
930 return err; 885 return err;
931} 886}
932 887
888static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
889{
890 struct s3c64xx_spi_driver_data *sdd = data;
891 struct spi_master *spi = sdd->master;
892 unsigned int val;
893
894 val = readl(sdd->regs + S3C64XX_SPI_PENDING_CLR);
895
896 val &= S3C64XX_SPI_PND_RX_OVERRUN_CLR |
897 S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
898 S3C64XX_SPI_PND_TX_OVERRUN_CLR |
899 S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
900
901 writel(val, sdd->regs + S3C64XX_SPI_PENDING_CLR);
902
903 if (val & S3C64XX_SPI_PND_RX_OVERRUN_CLR)
904 dev_err(&spi->dev, "RX overrun\n");
905 if (val & S3C64XX_SPI_PND_RX_UNDERRUN_CLR)
906 dev_err(&spi->dev, "RX underrun\n");
907 if (val & S3C64XX_SPI_PND_TX_OVERRUN_CLR)
908 dev_err(&spi->dev, "TX overrun\n");
909 if (val & S3C64XX_SPI_PND_TX_UNDERRUN_CLR)
910 dev_err(&spi->dev, "TX underrun\n");
911
912 return IRQ_HANDLED;
913}
914
933static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) 915static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
934{ 916{
935 struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 917 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
@@ -970,7 +952,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
970 struct s3c64xx_spi_driver_data *sdd; 952 struct s3c64xx_spi_driver_data *sdd;
971 struct s3c64xx_spi_info *sci; 953 struct s3c64xx_spi_info *sci;
972 struct spi_master *master; 954 struct spi_master *master;
973 int ret; 955 int ret, irq;
974 char clk_name[16]; 956 char clk_name[16];
975 957
976 if (pdev->id < 0) { 958 if (pdev->id < 0) {
@@ -1006,6 +988,12 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
1006 return -ENXIO; 988 return -ENXIO;
1007 } 989 }
1008 990
991 irq = platform_get_irq(pdev, 0);
992 if (irq < 0) {
993 dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq);
994 return irq;
995 }
996
1009 master = spi_alloc_master(&pdev->dev, 997 master = spi_alloc_master(&pdev->dev,
1010 sizeof(struct s3c64xx_spi_driver_data)); 998 sizeof(struct s3c64xx_spi_driver_data));
1011 if (master == NULL) { 999 if (master == NULL) {
@@ -1021,15 +1009,17 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
1021 sdd->pdev = pdev; 1009 sdd->pdev = pdev;
1022 sdd->sfr_start = mem_res->start; 1010 sdd->sfr_start = mem_res->start;
1023 sdd->tx_dma.dmach = dmatx_res->start; 1011 sdd->tx_dma.dmach = dmatx_res->start;
1024 sdd->tx_dma.direction = DMA_TO_DEVICE; 1012 sdd->tx_dma.direction = DMA_MEM_TO_DEV;
1025 sdd->rx_dma.dmach = dmarx_res->start; 1013 sdd->rx_dma.dmach = dmarx_res->start;
1026 sdd->rx_dma.direction = DMA_FROM_DEVICE; 1014 sdd->rx_dma.direction = DMA_DEV_TO_MEM;
1027 1015
1028 sdd->cur_bpw = 8; 1016 sdd->cur_bpw = 8;
1029 1017
1030 master->bus_num = pdev->id; 1018 master->bus_num = pdev->id;
1031 master->setup = s3c64xx_spi_setup; 1019 master->setup = s3c64xx_spi_setup;
1032 master->transfer = s3c64xx_spi_transfer; 1020 master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
1021 master->transfer_one_message = s3c64xx_spi_transfer_one_message;
1022 master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
1033 master->num_chipselect = sci->num_cs; 1023 master->num_chipselect = sci->num_cs;
1034 master->dma_alignment = 8; 1024 master->dma_alignment = 8;
1035 /* the spi->mode bits understood by this driver: */ 1025 /* the spi->mode bits understood by this driver: */
@@ -1084,22 +1074,24 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
1084 goto err6; 1074 goto err6;
1085 } 1075 }
1086 1076
1087 sdd->workqueue = create_singlethread_workqueue(
1088 dev_name(master->dev.parent));
1089 if (sdd->workqueue == NULL) {
1090 dev_err(&pdev->dev, "Unable to create workqueue\n");
1091 ret = -ENOMEM;
1092 goto err7;
1093 }
1094
1095 /* Setup Deufult Mode */ 1077 /* Setup Deufult Mode */
1096 s3c64xx_spi_hwinit(sdd, pdev->id); 1078 s3c64xx_spi_hwinit(sdd, pdev->id);
1097 1079
1098 spin_lock_init(&sdd->lock); 1080 spin_lock_init(&sdd->lock);
1099 init_completion(&sdd->xfer_completion); 1081 init_completion(&sdd->xfer_completion);
1100 INIT_WORK(&sdd->work, s3c64xx_spi_work);
1101 INIT_LIST_HEAD(&sdd->queue); 1082 INIT_LIST_HEAD(&sdd->queue);
1102 1083
1084 ret = request_irq(irq, s3c64xx_spi_irq, 0, "spi-s3c64xx", sdd);
1085 if (ret != 0) {
1086 dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
1087 irq, ret);
1088 goto err7;
1089 }
1090
1091 writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
1092 S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
1093 sdd->regs + S3C64XX_SPI_INT_EN);
1094
1103 if (spi_register_master(master)) { 1095 if (spi_register_master(master)) {
1104 dev_err(&pdev->dev, "cannot register SPI master\n"); 1096 dev_err(&pdev->dev, "cannot register SPI master\n");
1105 ret = -EBUSY; 1097 ret = -EBUSY;
@@ -1113,10 +1105,12 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
1113 mem_res->end, mem_res->start, 1105 mem_res->end, mem_res->start,
1114 sdd->rx_dma.dmach, sdd->tx_dma.dmach); 1106 sdd->rx_dma.dmach, sdd->tx_dma.dmach);
1115 1107
1108 pm_runtime_enable(&pdev->dev);
1109
1116 return 0; 1110 return 0;
1117 1111
1118err8: 1112err8:
1119 destroy_workqueue(sdd->workqueue); 1113 free_irq(irq, sdd);
1120err7: 1114err7:
1121 clk_disable(sdd->src_clk); 1115 clk_disable(sdd->src_clk);
1122err6: 1116err6:
@@ -1142,18 +1136,14 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
1142 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); 1136 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1143 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1137 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1144 struct resource *mem_res; 1138 struct resource *mem_res;
1145 unsigned long flags;
1146
1147 spin_lock_irqsave(&sdd->lock, flags);
1148 sdd->state |= SUSPND;
1149 spin_unlock_irqrestore(&sdd->lock, flags);
1150 1139
1151 while (sdd->state & SPIBUSY) 1140 pm_runtime_disable(&pdev->dev);
1152 msleep(10);
1153 1141
1154 spi_unregister_master(master); 1142 spi_unregister_master(master);
1155 1143
1156 destroy_workqueue(sdd->workqueue); 1144 writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
1145
1146 free_irq(platform_get_irq(pdev, 0), sdd);
1157 1147
1158 clk_disable(sdd->src_clk); 1148 clk_disable(sdd->src_clk);
1159 clk_put(sdd->src_clk); 1149 clk_put(sdd->src_clk);
@@ -1174,18 +1164,12 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
1174} 1164}
1175 1165
1176#ifdef CONFIG_PM 1166#ifdef CONFIG_PM
1177static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state) 1167static int s3c64xx_spi_suspend(struct device *dev)
1178{ 1168{
1179 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); 1169 struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
1180 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1170 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1181 unsigned long flags;
1182
1183 spin_lock_irqsave(&sdd->lock, flags);
1184 sdd->state |= SUSPND;
1185 spin_unlock_irqrestore(&sdd->lock, flags);
1186 1171
1187 while (sdd->state & SPIBUSY) 1172 spi_master_suspend(master);
1188 msleep(10);
1189 1173
1190 /* Disable the clock */ 1174 /* Disable the clock */
1191 clk_disable(sdd->src_clk); 1175 clk_disable(sdd->src_clk);
@@ -1196,12 +1180,12 @@ static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
1196 return 0; 1180 return 0;
1197} 1181}
1198 1182
1199static int s3c64xx_spi_resume(struct platform_device *pdev) 1183static int s3c64xx_spi_resume(struct device *dev)
1200{ 1184{
1201 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); 1185 struct platform_device *pdev = to_platform_device(dev);
1186 struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
1202 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); 1187 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1203 struct s3c64xx_spi_info *sci = sdd->cntrlr_info; 1188 struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
1204 unsigned long flags;
1205 1189
1206 sci->cfg_gpio(pdev); 1190 sci->cfg_gpio(pdev);
1207 1191
@@ -1211,25 +1195,49 @@ static int s3c64xx_spi_resume(struct platform_device *pdev)
1211 1195
1212 s3c64xx_spi_hwinit(sdd, pdev->id); 1196 s3c64xx_spi_hwinit(sdd, pdev->id);
1213 1197
1214 spin_lock_irqsave(&sdd->lock, flags); 1198 spi_master_resume(master);
1215 sdd->state &= ~SUSPND;
1216 spin_unlock_irqrestore(&sdd->lock, flags);
1217 1199
1218 return 0; 1200 return 0;
1219} 1201}
1220#else
1221#define s3c64xx_spi_suspend NULL
1222#define s3c64xx_spi_resume NULL
1223#endif /* CONFIG_PM */ 1202#endif /* CONFIG_PM */
1224 1203
1204#ifdef CONFIG_PM_RUNTIME
1205static int s3c64xx_spi_runtime_suspend(struct device *dev)
1206{
1207 struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
1208 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1209
1210 clk_disable(sdd->clk);
1211 clk_disable(sdd->src_clk);
1212
1213 return 0;
1214}
1215
1216static int s3c64xx_spi_runtime_resume(struct device *dev)
1217{
1218 struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
1219 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1220
1221 clk_enable(sdd->src_clk);
1222 clk_enable(sdd->clk);
1223
1224 return 0;
1225}
1226#endif /* CONFIG_PM_RUNTIME */
1227
1228static const struct dev_pm_ops s3c64xx_spi_pm = {
1229 SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
1230 SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend,
1231 s3c64xx_spi_runtime_resume, NULL)
1232};
1233
1225static struct platform_driver s3c64xx_spi_driver = { 1234static struct platform_driver s3c64xx_spi_driver = {
1226 .driver = { 1235 .driver = {
1227 .name = "s3c64xx-spi", 1236 .name = "s3c64xx-spi",
1228 .owner = THIS_MODULE, 1237 .owner = THIS_MODULE,
1238 .pm = &s3c64xx_spi_pm,
1229 }, 1239 },
1230 .remove = s3c64xx_spi_remove, 1240 .remove = s3c64xx_spi_remove,
1231 .suspend = s3c64xx_spi_suspend,
1232 .resume = s3c64xx_spi_resume,
1233}; 1241};
1234MODULE_ALIAS("platform:s3c64xx-spi"); 1242MODULE_ALIAS("platform:s3c64xx-spi");
1235 1243
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
new file mode 100644
index 000000000000..934138c7b3d3
--- /dev/null
+++ b/drivers/spi/spi-sh-hspi.c
@@ -0,0 +1,331 @@
1/*
2 * SuperH HSPI bus driver
3 *
4 * Copyright (C) 2011 Kuninori Morimoto
5 *
6 * Based on spi-sh.c:
7 * Based on pxa2xx_spi.c:
8 * Copyright (C) 2011 Renesas Solutions Corp.
9 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2 of the License.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 *
24 */
25
26#include <linux/clk.h>
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/timer.h>
30#include <linux/delay.h>
31#include <linux/list.h>
32#include <linux/interrupt.h>
33#include <linux/platform_device.h>
34#include <linux/pm_runtime.h>
35#include <linux/io.h>
36#include <linux/spi/spi.h>
37#include <linux/spi/sh_hspi.h>
38
39#define SPCR 0x00
40#define SPSR 0x04
41#define SPSCR 0x08
42#define SPTBR 0x0C
43#define SPRBR 0x10
44#define SPCR2 0x14
45
46/* SPSR */
47#define RXFL (1 << 2)
48
49#define hspi2info(h) (h->dev->platform_data)
50
51struct hspi_priv {
52 void __iomem *addr;
53 struct spi_master *master;
54 struct device *dev;
55 struct clk *clk;
56};
57
58/*
59 * basic function
60 */
61static void hspi_write(struct hspi_priv *hspi, int reg, u32 val)
62{
63 iowrite32(val, hspi->addr + reg);
64}
65
66static u32 hspi_read(struct hspi_priv *hspi, int reg)
67{
68 return ioread32(hspi->addr + reg);
69}
70
71/*
72 * transfer function
73 */
74static int hspi_status_check_timeout(struct hspi_priv *hspi, u32 mask, u32 val)
75{
76 int t = 256;
77
78 while (t--) {
79 if ((mask & hspi_read(hspi, SPSR)) == val)
80 return 0;
81
82 msleep(20);
83 }
84
85 dev_err(hspi->dev, "timeout\n");
86 return -ETIMEDOUT;
87}
88
89/*
90 * spi master function
91 */
92static int hspi_prepare_transfer(struct spi_master *master)
93{
94 struct hspi_priv *hspi = spi_master_get_devdata(master);
95
96 pm_runtime_get_sync(hspi->dev);
97 return 0;
98}
99
100static int hspi_unprepare_transfer(struct spi_master *master)
101{
102 struct hspi_priv *hspi = spi_master_get_devdata(master);
103
104 pm_runtime_put_sync(hspi->dev);
105 return 0;
106}
107
108static void hspi_hw_setup(struct hspi_priv *hspi,
109 struct spi_message *msg,
110 struct spi_transfer *t)
111{
112 struct spi_device *spi = msg->spi;
113 struct device *dev = hspi->dev;
114 u32 target_rate;
115 u32 spcr, idiv_clk;
116 u32 rate, best_rate, min, tmp;
117
118 target_rate = t ? t->speed_hz : 0;
119 if (!target_rate)
120 target_rate = spi->max_speed_hz;
121
122 /*
123 * find best IDIV/CLKCx settings
124 */
125 min = ~0;
126 best_rate = 0;
127 spcr = 0;
128 for (idiv_clk = 0x00; idiv_clk <= 0x3F; idiv_clk++) {
129 rate = clk_get_rate(hspi->clk);
130
131 /* IDIV calculation */
132 if (idiv_clk & (1 << 5))
133 rate /= 128;
134 else
135 rate /= 16;
136
137 /* CLKCx calculation */
138 rate /= (((idiv_clk & 0x1F) + 1) * 2) ;
139
140 /* save best settings */
141 tmp = abs(target_rate - rate);
142 if (tmp < min) {
143 min = tmp;
144 spcr = idiv_clk;
145 best_rate = rate;
146 }
147 }
148
149 if (spi->mode & SPI_CPHA)
150 spcr |= 1 << 7;
151 if (spi->mode & SPI_CPOL)
152 spcr |= 1 << 6;
153
154 dev_dbg(dev, "speed %d/%d\n", target_rate, best_rate);
155
156 hspi_write(hspi, SPCR, spcr);
157 hspi_write(hspi, SPSR, 0x0);
158 hspi_write(hspi, SPSCR, 0x1); /* master mode */
159}
160
161static int hspi_transfer_one_message(struct spi_master *master,
162 struct spi_message *msg)
163{
164 struct hspi_priv *hspi = spi_master_get_devdata(master);
165 struct spi_transfer *t;
166 u32 tx;
167 u32 rx;
168 int ret, i;
169
170 dev_dbg(hspi->dev, "%s\n", __func__);
171
172 ret = 0;
173 list_for_each_entry(t, &msg->transfers, transfer_list) {
174 hspi_hw_setup(hspi, msg, t);
175
176 for (i = 0; i < t->len; i++) {
177
178 /* wait remains */
179 ret = hspi_status_check_timeout(hspi, 0x1, 0);
180 if (ret < 0)
181 break;
182
183 tx = 0;
184 if (t->tx_buf)
185 tx = (u32)((u8 *)t->tx_buf)[i];
186
187 hspi_write(hspi, SPTBR, tx);
188
189 /* wait recive */
190 ret = hspi_status_check_timeout(hspi, 0x4, 0x4);
191 if (ret < 0)
192 break;
193
194 rx = hspi_read(hspi, SPRBR);
195 if (t->rx_buf)
196 ((u8 *)t->rx_buf)[i] = (u8)rx;
197
198 }
199
200 msg->actual_length += t->len;
201 }
202
203 msg->status = ret;
204 spi_finalize_current_message(master);
205
206 return ret;
207}
208
209static int hspi_setup(struct spi_device *spi)
210{
211 struct hspi_priv *hspi = spi_master_get_devdata(spi->master);
212 struct device *dev = hspi->dev;
213
214 if (8 != spi->bits_per_word) {
215 dev_err(dev, "bits_per_word should be 8\n");
216 return -EIO;
217 }
218
219 dev_dbg(dev, "%s setup\n", spi->modalias);
220
221 return 0;
222}
223
224static void hspi_cleanup(struct spi_device *spi)
225{
226 struct hspi_priv *hspi = spi_master_get_devdata(spi->master);
227 struct device *dev = hspi->dev;
228
229 dev_dbg(dev, "%s cleanup\n", spi->modalias);
230}
231
232static int __devinit hspi_probe(struct platform_device *pdev)
233{
234 struct resource *res;
235 struct spi_master *master;
236 struct hspi_priv *hspi;
237 struct clk *clk;
238 int ret;
239
240 /* get base addr */
241 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
242 if (!res) {
243 dev_err(&pdev->dev, "invalid resource\n");
244 return -EINVAL;
245 }
246
247 master = spi_alloc_master(&pdev->dev, sizeof(*hspi));
248 if (!master) {
249 dev_err(&pdev->dev, "spi_alloc_master error.\n");
250 return -ENOMEM;
251 }
252
253 clk = clk_get(NULL, "shyway_clk");
254 if (!clk) {
255 dev_err(&pdev->dev, "shyway_clk is required\n");
256 ret = -EINVAL;
257 goto error0;
258 }
259
260 hspi = spi_master_get_devdata(master);
261 dev_set_drvdata(&pdev->dev, hspi);
262
263 /* init hspi */
264 hspi->master = master;
265 hspi->dev = &pdev->dev;
266 hspi->clk = clk;
267 hspi->addr = devm_ioremap(hspi->dev,
268 res->start, resource_size(res));
269 if (!hspi->addr) {
270 dev_err(&pdev->dev, "ioremap error.\n");
271 ret = -ENOMEM;
272 goto error1;
273 }
274
275 master->num_chipselect = 1;
276 master->bus_num = pdev->id;
277 master->setup = hspi_setup;
278 master->cleanup = hspi_cleanup;
279 master->mode_bits = SPI_CPOL | SPI_CPHA;
280 master->prepare_transfer_hardware = hspi_prepare_transfer;
281 master->transfer_one_message = hspi_transfer_one_message;
282 master->unprepare_transfer_hardware = hspi_unprepare_transfer;
283 ret = spi_register_master(master);
284 if (ret < 0) {
285 dev_err(&pdev->dev, "spi_register_master error.\n");
286 goto error2;
287 }
288
289 pm_runtime_enable(&pdev->dev);
290
291 dev_info(&pdev->dev, "probed\n");
292
293 return 0;
294
295 error2:
296 devm_iounmap(hspi->dev, hspi->addr);
297 error1:
298 clk_put(clk);
299 error0:
300 spi_master_put(master);
301
302 return ret;
303}
304
305static int __devexit hspi_remove(struct platform_device *pdev)
306{
307 struct hspi_priv *hspi = dev_get_drvdata(&pdev->dev);
308
309 pm_runtime_disable(&pdev->dev);
310
311 clk_put(hspi->clk);
312 spi_unregister_master(hspi->master);
313 devm_iounmap(hspi->dev, hspi->addr);
314
315 return 0;
316}
317
318static struct platform_driver hspi_driver = {
319 .probe = hspi_probe,
320 .remove = __devexit_p(hspi_remove),
321 .driver = {
322 .name = "sh-hspi",
323 .owner = THIS_MODULE,
324 },
325};
326module_platform_driver(hspi_driver);
327
328MODULE_DESCRIPTION("SuperH HSPI bus driver");
329MODULE_LICENSE("GPL");
330MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
331MODULE_ALIAS("platform:sh_spi");
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
index 70c8af9f7ccc..79442c31bcd9 100644
--- a/drivers/spi/spi-sh.c
+++ b/drivers/spi/spi-sh.c
@@ -92,17 +92,26 @@ struct spi_sh_data {
92 unsigned long cr1; 92 unsigned long cr1;
93 wait_queue_head_t wait; 93 wait_queue_head_t wait;
94 spinlock_t lock; 94 spinlock_t lock;
95 int width;
95}; 96};
96 97
97static void spi_sh_write(struct spi_sh_data *ss, unsigned long data, 98static void spi_sh_write(struct spi_sh_data *ss, unsigned long data,
98 unsigned long offset) 99 unsigned long offset)
99{ 100{
100 writel(data, ss->addr + offset); 101 if (ss->width == 8)
102 iowrite8(data, ss->addr + (offset >> 2));
103 else if (ss->width == 32)
104 iowrite32(data, ss->addr + offset);
101} 105}
102 106
103static unsigned long spi_sh_read(struct spi_sh_data *ss, unsigned long offset) 107static unsigned long spi_sh_read(struct spi_sh_data *ss, unsigned long offset)
104{ 108{
105 return readl(ss->addr + offset); 109 if (ss->width == 8)
110 return ioread8(ss->addr + (offset >> 2));
111 else if (ss->width == 32)
112 return ioread32(ss->addr + offset);
113 else
114 return 0;
106} 115}
107 116
108static void spi_sh_set_bit(struct spi_sh_data *ss, unsigned long val, 117static void spi_sh_set_bit(struct spi_sh_data *ss, unsigned long val,
@@ -464,6 +473,18 @@ static int __devinit spi_sh_probe(struct platform_device *pdev)
464 ss = spi_master_get_devdata(master); 473 ss = spi_master_get_devdata(master);
465 dev_set_drvdata(&pdev->dev, ss); 474 dev_set_drvdata(&pdev->dev, ss);
466 475
476 switch (res->flags & IORESOURCE_MEM_TYPE_MASK) {
477 case IORESOURCE_MEM_8BIT:
478 ss->width = 8;
479 break;
480 case IORESOURCE_MEM_32BIT:
481 ss->width = 32;
482 break;
483 default:
484 dev_err(&pdev->dev, "No support width\n");
485 ret = -ENODEV;
486 goto error1;
487 }
467 ss->irq = irq; 488 ss->irq = irq;
468 ss->master = master; 489 ss->master = master;
469 ss->addr = ioremap(res->start, resource_size(res)); 490 ss->addr = ioremap(res->start, resource_size(res));
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
new file mode 100644
index 000000000000..52fe495bb32a
--- /dev/null
+++ b/drivers/spi/spi-sirf.c
@@ -0,0 +1,687 @@
1/*
2 * SPI bus driver for CSR SiRFprimaII
3 *
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
5 *
6 * Licensed under GPLv2 or later.
7 */
8
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/clk.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/of.h>
16#include <linux/bitops.h>
17#include <linux/err.h>
18#include <linux/platform_device.h>
19#include <linux/of_gpio.h>
20#include <linux/spi/spi.h>
21#include <linux/spi/spi_bitbang.h>
22#include <linux/pinctrl/pinmux.h>
23
24#define DRIVER_NAME "sirfsoc_spi"
25
26#define SIRFSOC_SPI_CTRL 0x0000
27#define SIRFSOC_SPI_CMD 0x0004
28#define SIRFSOC_SPI_TX_RX_EN 0x0008
29#define SIRFSOC_SPI_INT_EN 0x000C
30#define SIRFSOC_SPI_INT_STATUS 0x0010
31#define SIRFSOC_SPI_TX_DMA_IO_CTRL 0x0100
32#define SIRFSOC_SPI_TX_DMA_IO_LEN 0x0104
33#define SIRFSOC_SPI_TXFIFO_CTRL 0x0108
34#define SIRFSOC_SPI_TXFIFO_LEVEL_CHK 0x010C
35#define SIRFSOC_SPI_TXFIFO_OP 0x0110
36#define SIRFSOC_SPI_TXFIFO_STATUS 0x0114
37#define SIRFSOC_SPI_TXFIFO_DATA 0x0118
38#define SIRFSOC_SPI_RX_DMA_IO_CTRL 0x0120
39#define SIRFSOC_SPI_RX_DMA_IO_LEN 0x0124
40#define SIRFSOC_SPI_RXFIFO_CTRL 0x0128
41#define SIRFSOC_SPI_RXFIFO_LEVEL_CHK 0x012C
42#define SIRFSOC_SPI_RXFIFO_OP 0x0130
43#define SIRFSOC_SPI_RXFIFO_STATUS 0x0134
44#define SIRFSOC_SPI_RXFIFO_DATA 0x0138
45#define SIRFSOC_SPI_DUMMY_DELAY_CTL 0x0144
46
47/* SPI CTRL register defines */
48#define SIRFSOC_SPI_SLV_MODE BIT(16)
49#define SIRFSOC_SPI_CMD_MODE BIT(17)
50#define SIRFSOC_SPI_CS_IO_OUT BIT(18)
51#define SIRFSOC_SPI_CS_IO_MODE BIT(19)
52#define SIRFSOC_SPI_CLK_IDLE_STAT BIT(20)
53#define SIRFSOC_SPI_CS_IDLE_STAT BIT(21)
54#define SIRFSOC_SPI_TRAN_MSB BIT(22)
55#define SIRFSOC_SPI_DRV_POS_EDGE BIT(23)
56#define SIRFSOC_SPI_CS_HOLD_TIME BIT(24)
57#define SIRFSOC_SPI_CLK_SAMPLE_MODE BIT(25)
58#define SIRFSOC_SPI_TRAN_DAT_FORMAT_8 (0 << 26)
59#define SIRFSOC_SPI_TRAN_DAT_FORMAT_12 (1 << 26)
60#define SIRFSOC_SPI_TRAN_DAT_FORMAT_16 (2 << 26)
61#define SIRFSOC_SPI_TRAN_DAT_FORMAT_32 (3 << 26)
62#define SIRFSOC_SPI_CMD_BYTE_NUM(x) ((x & 3) << 28)
63#define SIRFSOC_SPI_ENA_AUTO_CLR BIT(30)
64#define SIRFSOC_SPI_MUL_DAT_MODE BIT(31)
65
66/* Interrupt Enable */
67#define SIRFSOC_SPI_RX_DONE_INT_EN BIT(0)
68#define SIRFSOC_SPI_TX_DONE_INT_EN BIT(1)
69#define SIRFSOC_SPI_RX_OFLOW_INT_EN BIT(2)
70#define SIRFSOC_SPI_TX_UFLOW_INT_EN BIT(3)
71#define SIRFSOC_SPI_RX_IO_DMA_INT_EN BIT(4)
72#define SIRFSOC_SPI_TX_IO_DMA_INT_EN BIT(5)
73#define SIRFSOC_SPI_RXFIFO_FULL_INT_EN BIT(6)
74#define SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN BIT(7)
75#define SIRFSOC_SPI_RXFIFO_THD_INT_EN BIT(8)
76#define SIRFSOC_SPI_TXFIFO_THD_INT_EN BIT(9)
77#define SIRFSOC_SPI_FRM_END_INT_EN BIT(10)
78
79#define SIRFSOC_SPI_INT_MASK_ALL 0x1FFF
80
81/* Interrupt status */
82#define SIRFSOC_SPI_RX_DONE BIT(0)
83#define SIRFSOC_SPI_TX_DONE BIT(1)
84#define SIRFSOC_SPI_RX_OFLOW BIT(2)
85#define SIRFSOC_SPI_TX_UFLOW BIT(3)
86#define SIRFSOC_SPI_RX_FIFO_FULL BIT(6)
87#define SIRFSOC_SPI_TXFIFO_EMPTY BIT(7)
88#define SIRFSOC_SPI_RXFIFO_THD_REACH BIT(8)
89#define SIRFSOC_SPI_TXFIFO_THD_REACH BIT(9)
90#define SIRFSOC_SPI_FRM_END BIT(10)
91
92/* TX RX enable */
93#define SIRFSOC_SPI_RX_EN BIT(0)
94#define SIRFSOC_SPI_TX_EN BIT(1)
95#define SIRFSOC_SPI_CMD_TX_EN BIT(2)
96
97#define SIRFSOC_SPI_IO_MODE_SEL BIT(0)
98#define SIRFSOC_SPI_RX_DMA_FLUSH BIT(2)
99
100/* FIFO OPs */
101#define SIRFSOC_SPI_FIFO_RESET BIT(0)
102#define SIRFSOC_SPI_FIFO_START BIT(1)
103
104/* FIFO CTRL */
105#define SIRFSOC_SPI_FIFO_WIDTH_BYTE (0 << 0)
106#define SIRFSOC_SPI_FIFO_WIDTH_WORD (1 << 0)
107#define SIRFSOC_SPI_FIFO_WIDTH_DWORD (2 << 0)
108
109/* FIFO Status */
110#define SIRFSOC_SPI_FIFO_LEVEL_MASK 0xFF
111#define SIRFSOC_SPI_FIFO_FULL BIT(8)
112#define SIRFSOC_SPI_FIFO_EMPTY BIT(9)
113
114/* 256 bytes rx/tx FIFO */
115#define SIRFSOC_SPI_FIFO_SIZE 256
116#define SIRFSOC_SPI_DAT_FRM_LEN_MAX (64 * 1024)
117
118#define SIRFSOC_SPI_FIFO_SC(x) ((x) & 0x3F)
119#define SIRFSOC_SPI_FIFO_LC(x) (((x) & 0x3F) << 10)
120#define SIRFSOC_SPI_FIFO_HC(x) (((x) & 0x3F) << 20)
121#define SIRFSOC_SPI_FIFO_THD(x) (((x) & 0xFF) << 2)
122
123struct sirfsoc_spi {
124 struct spi_bitbang bitbang;
125 struct completion done;
126
127 void __iomem *base;
128 u32 ctrl_freq; /* SPI controller clock speed */
129 struct clk *clk;
130 struct pinmux *pmx;
131
132 /* rx & tx bufs from the spi_transfer */
133 const void *tx;
134 void *rx;
135
136 /* place received word into rx buffer */
137 void (*rx_word) (struct sirfsoc_spi *);
138 /* get word from tx buffer for sending */
139 void (*tx_word) (struct sirfsoc_spi *);
140
141 /* number of words left to be tranmitted/received */
142 unsigned int left_tx_cnt;
143 unsigned int left_rx_cnt;
144
145 /* tasklet to push tx msg into FIFO */
146 struct tasklet_struct tasklet_tx;
147
148 int chipselect[0];
149};
150
151static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi)
152{
153 u32 data;
154 u8 *rx = sspi->rx;
155
156 data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
157
158 if (rx) {
159 *rx++ = (u8) data;
160 sspi->rx = rx;
161 }
162
163 sspi->left_rx_cnt--;
164}
165
166static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
167{
168 u32 data = 0;
169 const u8 *tx = sspi->tx;
170
171 if (tx) {
172 data = *tx++;
173 sspi->tx = tx;
174 }
175
176 writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
177 sspi->left_tx_cnt--;
178}
179
180static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
181{
182 u32 data;
183 u16 *rx = sspi->rx;
184
185 data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
186
187 if (rx) {
188 *rx++ = (u16) data;
189 sspi->rx = rx;
190 }
191
192 sspi->left_rx_cnt--;
193}
194
195static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
196{
197 u32 data = 0;
198 const u16 *tx = sspi->tx;
199
200 if (tx) {
201 data = *tx++;
202 sspi->tx = tx;
203 }
204
205 writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
206 sspi->left_tx_cnt--;
207}
208
209static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
210{
211 u32 data;
212 u32 *rx = sspi->rx;
213
214 data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
215
216 if (rx) {
217 *rx++ = (u32) data;
218 sspi->rx = rx;
219 }
220
221 sspi->left_rx_cnt--;
222
223}
224
225static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi)
226{
227 u32 data = 0;
228 const u32 *tx = sspi->tx;
229
230 if (tx) {
231 data = *tx++;
232 sspi->tx = tx;
233 }
234
235 writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
236 sspi->left_tx_cnt--;
237}
238
239static void spi_sirfsoc_tasklet_tx(unsigned long arg)
240{
241 struct sirfsoc_spi *sspi = (struct sirfsoc_spi *)arg;
242
243 /* Fill Tx FIFO while there are left words to be transmitted */
244 while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS) &
245 SIRFSOC_SPI_FIFO_FULL)) &&
246 sspi->left_tx_cnt)
247 sspi->tx_word(sspi);
248}
249
250static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
251{
252 struct sirfsoc_spi *sspi = dev_id;
253 u32 spi_stat = readl(sspi->base + SIRFSOC_SPI_INT_STATUS);
254
255 writel(spi_stat, sspi->base + SIRFSOC_SPI_INT_STATUS);
256
257 /* Error Conditions */
258 if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
259 spi_stat & SIRFSOC_SPI_TX_UFLOW) {
260 complete(&sspi->done);
261 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
262 }
263
264 if (spi_stat & SIRFSOC_SPI_FRM_END) {
265 while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS)
266 & SIRFSOC_SPI_FIFO_EMPTY)) &&
267 sspi->left_rx_cnt)
268 sspi->rx_word(sspi);
269
270 /* Received all words */
271 if ((sspi->left_rx_cnt == 0) && (sspi->left_tx_cnt == 0)) {
272 complete(&sspi->done);
273 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
274 }
275 }
276
277 if (spi_stat & SIRFSOC_SPI_RXFIFO_THD_REACH ||
278 spi_stat & SIRFSOC_SPI_TXFIFO_THD_REACH ||
279 spi_stat & SIRFSOC_SPI_RX_FIFO_FULL ||
280 spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY)
281 tasklet_schedule(&sspi->tasklet_tx);
282
283 return IRQ_HANDLED;
284}
285
286static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
287{
288 struct sirfsoc_spi *sspi;
289 int timeout = t->len * 10;
290 sspi = spi_master_get_devdata(spi->master);
291
292 sspi->tx = t->tx_buf;
293 sspi->rx = t->rx_buf;
294 sspi->left_tx_cnt = sspi->left_rx_cnt = t->len;
295 INIT_COMPLETION(sspi->done);
296
297 writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
298
299 if (t->len == 1) {
300 writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
301 SIRFSOC_SPI_ENA_AUTO_CLR,
302 sspi->base + SIRFSOC_SPI_CTRL);
303 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
304 writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
305 } else if ((t->len > 1) && (t->len < SIRFSOC_SPI_DAT_FRM_LEN_MAX)) {
306 writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
307 SIRFSOC_SPI_MUL_DAT_MODE |
308 SIRFSOC_SPI_ENA_AUTO_CLR,
309 sspi->base + SIRFSOC_SPI_CTRL);
310 writel(t->len - 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
311 writel(t->len - 1, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
312 } else {
313 writel(readl(sspi->base + SIRFSOC_SPI_CTRL),
314 sspi->base + SIRFSOC_SPI_CTRL);
315 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
316 writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
317 }
318
319 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
320 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
321 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
322 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
323
324 /* Send the first word to trigger the whole tx/rx process */
325 sspi->tx_word(sspi);
326
327 writel(SIRFSOC_SPI_RX_OFLOW_INT_EN | SIRFSOC_SPI_TX_UFLOW_INT_EN |
328 SIRFSOC_SPI_RXFIFO_THD_INT_EN | SIRFSOC_SPI_TXFIFO_THD_INT_EN |
329 SIRFSOC_SPI_FRM_END_INT_EN | SIRFSOC_SPI_RXFIFO_FULL_INT_EN |
330 SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN, sspi->base + SIRFSOC_SPI_INT_EN);
331 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, sspi->base + SIRFSOC_SPI_TX_RX_EN);
332
333 if (wait_for_completion_timeout(&sspi->done, timeout) == 0)
334 dev_err(&spi->dev, "transfer timeout\n");
335
336 /* TX, RX FIFO stop */
337 writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
338 writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
339 writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN);
340 writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
341
342 return t->len - sspi->left_rx_cnt;
343}
344
345static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
346{
347 struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master);
348
349 if (sspi->chipselect[spi->chip_select] == 0) {
350 u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL);
351 regval |= SIRFSOC_SPI_CS_IO_OUT;
352 switch (value) {
353 case BITBANG_CS_ACTIVE:
354 if (spi->mode & SPI_CS_HIGH)
355 regval |= SIRFSOC_SPI_CS_IO_OUT;
356 else
357 regval &= ~SIRFSOC_SPI_CS_IO_OUT;
358 break;
359 case BITBANG_CS_INACTIVE:
360 if (spi->mode & SPI_CS_HIGH)
361 regval &= ~SIRFSOC_SPI_CS_IO_OUT;
362 else
363 regval |= SIRFSOC_SPI_CS_IO_OUT;
364 break;
365 }
366 writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
367 } else {
368 int gpio = sspi->chipselect[spi->chip_select];
369 gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
370 }
371}
372
373static int
374spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
375{
376 struct sirfsoc_spi *sspi;
377 u8 bits_per_word = 0;
378 int hz = 0;
379 u32 regval;
380 u32 txfifo_ctrl, rxfifo_ctrl;
381 u32 fifo_size = SIRFSOC_SPI_FIFO_SIZE / 4;
382
383 sspi = spi_master_get_devdata(spi->master);
384
385 bits_per_word = t && t->bits_per_word ? t->bits_per_word :
386 spi->bits_per_word;
387 hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
388
389 /* Enable IO mode for RX, TX */
390 writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
391 writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
392 regval = (sspi->ctrl_freq / (2 * hz)) - 1;
393
394 if (regval > 0xFFFF || regval < 0) {
395 dev_err(&spi->dev, "Speed %d not supported\n", hz);
396 return -EINVAL;
397 }
398
399 switch (bits_per_word) {
400 case 8:
401 regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8;
402 sspi->rx_word = spi_sirfsoc_rx_word_u8;
403 sspi->tx_word = spi_sirfsoc_tx_word_u8;
404 txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
405 SIRFSOC_SPI_FIFO_WIDTH_BYTE;
406 rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
407 SIRFSOC_SPI_FIFO_WIDTH_BYTE;
408 break;
409 case 12:
410 case 16:
411 regval |= (bits_per_word == 12) ? SIRFSOC_SPI_TRAN_DAT_FORMAT_12 :
412 SIRFSOC_SPI_TRAN_DAT_FORMAT_16;
413 sspi->rx_word = spi_sirfsoc_rx_word_u16;
414 sspi->tx_word = spi_sirfsoc_tx_word_u16;
415 txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
416 SIRFSOC_SPI_FIFO_WIDTH_WORD;
417 rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
418 SIRFSOC_SPI_FIFO_WIDTH_WORD;
419 break;
420 case 32:
421 regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32;
422 sspi->rx_word = spi_sirfsoc_rx_word_u32;
423 sspi->tx_word = spi_sirfsoc_tx_word_u32;
424 txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
425 SIRFSOC_SPI_FIFO_WIDTH_DWORD;
426 rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
427 SIRFSOC_SPI_FIFO_WIDTH_DWORD;
428 break;
429 default:
430 dev_err(&spi->dev, "Bits per word %d not supported\n",
431 bits_per_word);
432 return -EINVAL;
433 }
434
435 if (!(spi->mode & SPI_CS_HIGH))
436 regval |= SIRFSOC_SPI_CS_IDLE_STAT;
437 if (!(spi->mode & SPI_LSB_FIRST))
438 regval |= SIRFSOC_SPI_TRAN_MSB;
439 if (spi->mode & SPI_CPOL)
440 regval |= SIRFSOC_SPI_CLK_IDLE_STAT;
441
442 /*
443 * Data should be driven at least 1/2 cycle before the fetch edge to make
444 * sure that data gets stable at the fetch edge.
445 */
446 if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
447 (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA)))
448 regval &= ~SIRFSOC_SPI_DRV_POS_EDGE;
449 else
450 regval |= SIRFSOC_SPI_DRV_POS_EDGE;
451
452 writel(SIRFSOC_SPI_FIFO_SC(fifo_size - 2) |
453 SIRFSOC_SPI_FIFO_LC(fifo_size / 2) |
454 SIRFSOC_SPI_FIFO_HC(2),
455 sspi->base + SIRFSOC_SPI_TXFIFO_LEVEL_CHK);
456 writel(SIRFSOC_SPI_FIFO_SC(2) |
457 SIRFSOC_SPI_FIFO_LC(fifo_size / 2) |
458 SIRFSOC_SPI_FIFO_HC(fifo_size - 2),
459 sspi->base + SIRFSOC_SPI_RXFIFO_LEVEL_CHK);
460 writel(txfifo_ctrl, sspi->base + SIRFSOC_SPI_TXFIFO_CTRL);
461 writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL);
462
463 writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
464 return 0;
465}
466
467static int spi_sirfsoc_setup(struct spi_device *spi)
468{
469 struct sirfsoc_spi *sspi;
470
471 if (!spi->max_speed_hz)
472 return -EINVAL;
473
474 sspi = spi_master_get_devdata(spi->master);
475
476 if (!spi->bits_per_word)
477 spi->bits_per_word = 8;
478
479 return spi_sirfsoc_setup_transfer(spi, NULL);
480}
481
482static int __devinit spi_sirfsoc_probe(struct platform_device *pdev)
483{
484 struct sirfsoc_spi *sspi;
485 struct spi_master *master;
486 struct resource *mem_res;
487 int num_cs, cs_gpio, irq;
488 int i;
489 int ret;
490
491 ret = of_property_read_u32(pdev->dev.of_node,
492 "sirf,spi-num-chipselects", &num_cs);
493 if (ret < 0) {
494 dev_err(&pdev->dev, "Unable to get chip select number\n");
495 goto err_cs;
496 }
497
498 master = spi_alloc_master(&pdev->dev, sizeof(*sspi) + sizeof(int) * num_cs);
499 if (!master) {
500 dev_err(&pdev->dev, "Unable to allocate SPI master\n");
501 return -ENOMEM;
502 }
503 platform_set_drvdata(pdev, master);
504 sspi = spi_master_get_devdata(master);
505
506 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
507 if (!mem_res) {
508 dev_err(&pdev->dev, "Unable to get IO resource\n");
509 ret = -ENODEV;
510 goto free_master;
511 }
512 master->num_chipselect = num_cs;
513
514 for (i = 0; i < master->num_chipselect; i++) {
515 cs_gpio = of_get_named_gpio(pdev->dev.of_node, "cs-gpios", i);
516 if (cs_gpio < 0) {
517 dev_err(&pdev->dev, "can't get cs gpio from DT\n");
518 ret = -ENODEV;
519 goto free_master;
520 }
521
522 sspi->chipselect[i] = cs_gpio;
523 if (cs_gpio == 0)
524 continue; /* use cs from spi controller */
525
526 ret = gpio_request(cs_gpio, DRIVER_NAME);
527 if (ret) {
528 while (i > 0) {
529 i--;
530 if (sspi->chipselect[i] > 0)
531 gpio_free(sspi->chipselect[i]);
532 }
533 dev_err(&pdev->dev, "fail to request cs gpios\n");
534 goto free_master;
535 }
536 }
537
538 sspi->base = devm_request_and_ioremap(&pdev->dev, mem_res);
539 if (!sspi->base) {
540 dev_err(&pdev->dev, "IO remap failed!\n");
541 ret = -ENOMEM;
542 goto free_master;
543 }
544
545 irq = platform_get_irq(pdev, 0);
546 if (irq < 0) {
547 ret = -ENXIO;
548 goto free_master;
549 }
550 ret = devm_request_irq(&pdev->dev, irq, spi_sirfsoc_irq, 0,
551 DRIVER_NAME, sspi);
552 if (ret)
553 goto free_master;
554
555 sspi->bitbang.master = spi_master_get(master);
556 sspi->bitbang.chipselect = spi_sirfsoc_chipselect;
557 sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer;
558 sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
559 sspi->bitbang.master->setup = spi_sirfsoc_setup;
560 master->bus_num = pdev->id;
561 sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
562
563 sspi->pmx = pinmux_get(&pdev->dev, NULL);
564 ret = IS_ERR(sspi->pmx);
565 if (ret)
566 goto free_master;
567
568 pinmux_enable(sspi->pmx);
569
570 sspi->clk = clk_get(&pdev->dev, NULL);
571 if (IS_ERR(sspi->clk)) {
572 ret = -EINVAL;
573 goto free_pmx;
574 }
575 clk_enable(sspi->clk);
576 sspi->ctrl_freq = clk_get_rate(sspi->clk);
577
578 init_completion(&sspi->done);
579
580 tasklet_init(&sspi->tasklet_tx, spi_sirfsoc_tasklet_tx,
581 (unsigned long)sspi);
582
583 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
584 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
585 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
586 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
587 /* We are not using dummy delay between command and data */
588 writel(0, sspi->base + SIRFSOC_SPI_DUMMY_DELAY_CTL);
589
590 ret = spi_bitbang_start(&sspi->bitbang);
591 if (ret)
592 goto free_clk;
593
594 dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num);
595
596 return 0;
597
598free_clk:
599 clk_disable(sspi->clk);
600 clk_put(sspi->clk);
601free_pmx:
602 pinmux_disable(sspi->pmx);
603 pinmux_put(sspi->pmx);
604free_master:
605 spi_master_put(master);
606err_cs:
607 return ret;
608}
609
610static int __devexit spi_sirfsoc_remove(struct platform_device *pdev)
611{
612 struct spi_master *master;
613 struct sirfsoc_spi *sspi;
614 int i;
615
616 master = platform_get_drvdata(pdev);
617 sspi = spi_master_get_devdata(master);
618
619 spi_bitbang_stop(&sspi->bitbang);
620 for (i = 0; i < master->num_chipselect; i++) {
621 if (sspi->chipselect[i] > 0)
622 gpio_free(sspi->chipselect[i]);
623 }
624 clk_disable(sspi->clk);
625 clk_put(sspi->clk);
626 pinmux_disable(sspi->pmx);
627 pinmux_put(sspi->pmx);
628 spi_master_put(master);
629 return 0;
630}
631
632#ifdef CONFIG_PM
633static int spi_sirfsoc_suspend(struct device *dev)
634{
635 struct platform_device *pdev = to_platform_device(dev);
636 struct spi_master *master = platform_get_drvdata(pdev);
637 struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
638
639 clk_disable(sspi->clk);
640 return 0;
641}
642
643static int spi_sirfsoc_resume(struct device *dev)
644{
645 struct platform_device *pdev = to_platform_device(dev);
646 struct spi_master *master = platform_get_drvdata(pdev);
647 struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
648
649 clk_enable(sspi->clk);
650 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
651 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
652 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
653 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
654
655 return 0;
656}
657
658static const struct dev_pm_ops spi_sirfsoc_pm_ops = {
659 .suspend = spi_sirfsoc_suspend,
660 .resume = spi_sirfsoc_resume,
661};
662#endif
663
664static const struct of_device_id spi_sirfsoc_of_match[] = {
665 { .compatible = "sirf,prima2-spi", },
666 {}
667};
668MODULE_DEVICE_TABLE(of, sirfsoc_spi_of_match);
669
670static struct platform_driver spi_sirfsoc_driver = {
671 .driver = {
672 .name = DRIVER_NAME,
673 .owner = THIS_MODULE,
674#ifdef CONFIG_PM
675 .pm = &spi_sirfsoc_pm_ops,
676#endif
677 .of_match_table = spi_sirfsoc_of_match,
678 },
679 .probe = spi_sirfsoc_probe,
680 .remove = __devexit_p(spi_sirfsoc_remove),
681};
682module_platform_driver(spi_sirfsoc_driver);
683
684MODULE_DESCRIPTION("SiRF SoC SPI master driver");
685MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>, "
686 "Barry Song <Baohua.Song@csr.com>");
687MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 10182eb50068..5c6fa5ed3366 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -196,6 +196,7 @@ struct pch_spi_data {
196 struct pch_spi_dma_ctrl dma; 196 struct pch_spi_dma_ctrl dma;
197 int use_dma; 197 int use_dma;
198 u8 irq_reg_sts; 198 u8 irq_reg_sts;
199 int save_total_len;
199}; 200};
200 201
201/** 202/**
@@ -216,7 +217,7 @@ struct pch_pd_dev_save {
216 struct pch_spi_board_data *board_dat; 217 struct pch_spi_board_data *board_dat;
217}; 218};
218 219
219static struct pci_device_id pch_spi_pcidev_id[] = { 220static DEFINE_PCI_DEVICE_TABLE(pch_spi_pcidev_id) = {
220 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, }, 221 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
221 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, }, 222 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
222 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, }, 223 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
@@ -318,22 +319,23 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
318 data->tx_index = tx_index; 319 data->tx_index = tx_index;
319 data->rx_index = rx_index; 320 data->rx_index = rx_index;
320 321
321 } 322 /* if transfer complete interrupt */
322 323 if (reg_spsr_val & SPSR_FI_BIT) {
323 /* if transfer complete interrupt */ 324 if ((tx_index == bpw_len) && (rx_index == tx_index)) {
324 if (reg_spsr_val & SPSR_FI_BIT) { 325 /* disable interrupts */
325 if ((tx_index == bpw_len) && (rx_index == tx_index)) { 326 pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
326 /* disable interrupts */ 327 PCH_ALL);
327 pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); 328
328 329 /* transfer is completed;
329 /* transfer is completed; 330 inform pch_spi_process_messages */
330 inform pch_spi_process_messages */ 331 data->transfer_complete = true;
331 data->transfer_complete = true; 332 data->transfer_active = false;
332 data->transfer_active = false; 333 wake_up(&data->wait);
333 wake_up(&data->wait); 334 } else {
334 } else { 335 dev_err(&data->master->dev,
335 dev_err(&data->master->dev, 336 "%s : Transfer is not completed",
336 "%s : Transfer is not completed", __func__); 337 __func__);
338 }
337 } 339 }
338 } 340 }
339} 341}
@@ -822,11 +824,13 @@ static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
822 rx_dma_buf = data->dma.rx_buf_virt; 824 rx_dma_buf = data->dma.rx_buf_virt;
823 for (j = 0; j < data->bpw_len; j++) 825 for (j = 0; j < data->bpw_len; j++)
824 *rx_buf++ = *rx_dma_buf++ & 0xFF; 826 *rx_buf++ = *rx_dma_buf++ & 0xFF;
827 data->cur_trans->rx_buf = rx_buf;
825 } else { 828 } else {
826 rx_sbuf = data->cur_trans->rx_buf; 829 rx_sbuf = data->cur_trans->rx_buf;
827 rx_dma_sbuf = data->dma.rx_buf_virt; 830 rx_dma_sbuf = data->dma.rx_buf_virt;
828 for (j = 0; j < data->bpw_len; j++) 831 for (j = 0; j < data->bpw_len; j++)
829 *rx_sbuf++ = *rx_dma_sbuf++; 832 *rx_sbuf++ = *rx_dma_sbuf++;
833 data->cur_trans->rx_buf = rx_sbuf;
830 } 834 }
831} 835}
832 836
@@ -852,6 +856,9 @@ static int pch_spi_start_transfer(struct pch_spi_data *data)
852 rtn = wait_event_interruptible_timeout(data->wait, 856 rtn = wait_event_interruptible_timeout(data->wait,
853 data->transfer_complete, 857 data->transfer_complete,
854 msecs_to_jiffies(2 * HZ)); 858 msecs_to_jiffies(2 * HZ));
859 if (!rtn)
860 dev_err(&data->master->dev,
861 "%s wait-event timeout\n", __func__);
855 862
856 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, 863 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
857 DMA_FROM_DEVICE); 864 DMA_FROM_DEVICE);
@@ -923,7 +930,8 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
923 dma_cap_set(DMA_SLAVE, mask); 930 dma_cap_set(DMA_SLAVE, mask);
924 931
925 /* Get DMA's dev information */ 932 /* Get DMA's dev information */
926 dma_dev = pci_get_bus_and_slot(2, PCI_DEVFN(12, 0)); 933 dma_dev = pci_get_bus_and_slot(data->board_dat->pdev->bus->number,
934 PCI_DEVFN(12, 0));
927 935
928 /* Set Tx DMA */ 936 /* Set Tx DMA */
929 param = &dma->param_tx; 937 param = &dma->param_tx;
@@ -987,6 +995,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
987 int i; 995 int i;
988 int size; 996 int size;
989 int rem; 997 int rem;
998 int head;
990 unsigned long flags; 999 unsigned long flags;
991 struct pch_spi_dma_ctrl *dma; 1000 struct pch_spi_dma_ctrl *dma;
992 1001
@@ -1015,6 +1024,11 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
1015 } 1024 }
1016 data->bpw_len = data->cur_trans->len / (*bpw / 8); 1025 data->bpw_len = data->cur_trans->len / (*bpw / 8);
1017 1026
1027 if (data->bpw_len > PCH_BUF_SIZE) {
1028 data->bpw_len = PCH_BUF_SIZE;
1029 data->cur_trans->len -= PCH_BUF_SIZE;
1030 }
1031
1018 /* copy Tx Data */ 1032 /* copy Tx Data */
1019 if (data->cur_trans->tx_buf != NULL) { 1033 if (data->cur_trans->tx_buf != NULL) {
1020 if (*bpw == 8) { 1034 if (*bpw == 8) {
@@ -1029,10 +1043,17 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
1029 *tx_dma_sbuf++ = *tx_sbuf++; 1043 *tx_dma_sbuf++ = *tx_sbuf++;
1030 } 1044 }
1031 } 1045 }
1046
1047 /* Calculate Rx parameter for DMA transmitting */
1032 if (data->bpw_len > PCH_DMA_TRANS_SIZE) { 1048 if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
1033 num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1; 1049 if (data->bpw_len % PCH_DMA_TRANS_SIZE) {
1050 num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
1051 rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
1052 } else {
1053 num = data->bpw_len / PCH_DMA_TRANS_SIZE;
1054 rem = PCH_DMA_TRANS_SIZE;
1055 }
1034 size = PCH_DMA_TRANS_SIZE; 1056 size = PCH_DMA_TRANS_SIZE;
1035 rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
1036 } else { 1057 } else {
1037 num = 1; 1058 num = 1;
1038 size = data->bpw_len; 1059 size = data->bpw_len;
@@ -1092,15 +1113,23 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
1092 dma->nent = num; 1113 dma->nent = num;
1093 dma->desc_rx = desc_rx; 1114 dma->desc_rx = desc_rx;
1094 1115
1095 /* TX */ 1116 /* Calculate Tx parameter for DMA transmitting */
1096 if (data->bpw_len > PCH_DMA_TRANS_SIZE) { 1117 if (data->bpw_len > PCH_MAX_FIFO_DEPTH) {
1097 num = data->bpw_len / PCH_DMA_TRANS_SIZE; 1118 head = PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE;
1119 if (data->bpw_len % PCH_DMA_TRANS_SIZE > 4) {
1120 num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
1121 rem = data->bpw_len % PCH_DMA_TRANS_SIZE - head;
1122 } else {
1123 num = data->bpw_len / PCH_DMA_TRANS_SIZE;
1124 rem = data->bpw_len % PCH_DMA_TRANS_SIZE +
1125 PCH_DMA_TRANS_SIZE - head;
1126 }
1098 size = PCH_DMA_TRANS_SIZE; 1127 size = PCH_DMA_TRANS_SIZE;
1099 rem = 16;
1100 } else { 1128 } else {
1101 num = 1; 1129 num = 1;
1102 size = data->bpw_len; 1130 size = data->bpw_len;
1103 rem = data->bpw_len; 1131 rem = data->bpw_len;
1132 head = 0;
1104 } 1133 }
1105 1134
1106 dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC); 1135 dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
@@ -1110,11 +1139,17 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
1110 for (i = 0; i < num; i++, sg++) { 1139 for (i = 0; i < num; i++, sg++) {
1111 if (i == 0) { 1140 if (i == 0) {
1112 sg->offset = 0; 1141 sg->offset = 0;
1142 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size + head,
1143 sg->offset);
1144 sg_dma_len(sg) = size + head;
1145 } else if (i == (num - 1)) {
1146 sg->offset = head + size * i;
1147 sg->offset = sg->offset * (*bpw / 8);
1113 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem, 1148 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem,
1114 sg->offset); 1149 sg->offset);
1115 sg_dma_len(sg) = rem; 1150 sg_dma_len(sg) = rem;
1116 } else { 1151 } else {
1117 sg->offset = rem + size * (i - 1); 1152 sg->offset = head + size * i;
1118 sg->offset = sg->offset * (*bpw / 8); 1153 sg->offset = sg->offset * (*bpw / 8);
1119 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size, 1154 sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size,
1120 sg->offset); 1155 sg->offset);
@@ -1202,6 +1237,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
1202 data->current_msg->spi->bits_per_word); 1237 data->current_msg->spi->bits_per_word);
1203 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); 1238 pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
1204 do { 1239 do {
1240 int cnt;
1205 /* If we are already processing a message get the next 1241 /* If we are already processing a message get the next
1206 transfer structure from the message otherwise retrieve 1242 transfer structure from the message otherwise retrieve
1207 the 1st transfer request from the message. */ 1243 the 1st transfer request from the message. */
@@ -1221,11 +1257,28 @@ static void pch_spi_process_messages(struct work_struct *pwork)
1221 } 1257 }
1222 spin_unlock(&data->lock); 1258 spin_unlock(&data->lock);
1223 1259
1260 if (!data->cur_trans->len)
1261 goto out;
1262 cnt = (data->cur_trans->len - 1) / PCH_BUF_SIZE + 1;
1263 data->save_total_len = data->cur_trans->len;
1224 if (data->use_dma) { 1264 if (data->use_dma) {
1225 pch_spi_handle_dma(data, &bpw); 1265 int i;
1226 if (!pch_spi_start_transfer(data)) 1266 char *save_rx_buf = data->cur_trans->rx_buf;
1227 goto out; 1267 for (i = 0; i < cnt; i ++) {
1228 pch_spi_copy_rx_data_for_dma(data, bpw); 1268 pch_spi_handle_dma(data, &bpw);
1269 if (!pch_spi_start_transfer(data)) {
1270 data->transfer_complete = true;
1271 data->current_msg->status = -EIO;
1272 data->current_msg->complete
1273 (data->current_msg->context);
1274 data->bcurrent_msg_processing = false;
1275 data->current_msg = NULL;
1276 data->cur_trans = NULL;
1277 goto out;
1278 }
1279 pch_spi_copy_rx_data_for_dma(data, bpw);
1280 }
1281 data->cur_trans->rx_buf = save_rx_buf;
1229 } else { 1282 } else {
1230 pch_spi_set_tx(data, &bpw); 1283 pch_spi_set_tx(data, &bpw);
1231 pch_spi_set_ir(data); 1284 pch_spi_set_ir(data);
@@ -1236,6 +1289,7 @@ static void pch_spi_process_messages(struct work_struct *pwork)
1236 data->pkt_tx_buff = NULL; 1289 data->pkt_tx_buff = NULL;
1237 } 1290 }
1238 /* increment message count */ 1291 /* increment message count */
1292 data->cur_trans->len = data->save_total_len;
1239 data->current_msg->actual_length += data->cur_trans->len; 1293 data->current_msg->actual_length += data->cur_trans->len;
1240 1294
1241 dev_dbg(&data->master->dev, 1295 dev_dbg(&data->master->dev,
@@ -1388,6 +1442,7 @@ static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev)
1388 master->num_chipselect = PCH_MAX_CS; 1442 master->num_chipselect = PCH_MAX_CS;
1389 master->setup = pch_spi_setup; 1443 master->setup = pch_spi_setup;
1390 master->transfer = pch_spi_transfer; 1444 master->transfer = pch_spi_transfer;
1445 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
1391 1446
1392 data->board_dat = board_dat; 1447 data->board_dat = board_dat;
1393 data->plat_dev = plat_dev; 1448 data->plat_dev = plat_dev;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index b2ccdea30cb9..3d8f662e4fe9 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -30,6 +30,9 @@
30#include <linux/of_spi.h> 30#include <linux/of_spi.h>
31#include <linux/pm_runtime.h> 31#include <linux/pm_runtime.h>
32#include <linux/export.h> 32#include <linux/export.h>
33#include <linux/sched.h>
34#include <linux/delay.h>
35#include <linux/kthread.h>
33 36
34static void spidev_release(struct device *dev) 37static void spidev_release(struct device *dev)
35{ 38{
@@ -481,7 +484,7 @@ static void spi_match_master_to_boardinfo(struct spi_master *master,
481 * The board info passed can safely be __initdata ... but be careful of 484 * The board info passed can safely be __initdata ... but be careful of
482 * any embedded pointers (platform_data, etc), they're copied as-is. 485 * any embedded pointers (platform_data, etc), they're copied as-is.
483 */ 486 */
484int __init 487int __devinit
485spi_register_board_info(struct spi_board_info const *info, unsigned n) 488spi_register_board_info(struct spi_board_info const *info, unsigned n)
486{ 489{
487 struct boardinfo *bi; 490 struct boardinfo *bi;
@@ -507,6 +510,294 @@ spi_register_board_info(struct spi_board_info const *info, unsigned n)
507 510
508/*-------------------------------------------------------------------------*/ 511/*-------------------------------------------------------------------------*/
509 512
513/**
514 * spi_pump_messages - kthread work function which processes spi message queue
515 * @work: pointer to kthread work struct contained in the master struct
516 *
517 * This function checks if there is any spi message in the queue that
518 * needs processing and if so call out to the driver to initialize hardware
519 * and transfer each message.
520 *
521 */
522static void spi_pump_messages(struct kthread_work *work)
523{
524 struct spi_master *master =
525 container_of(work, struct spi_master, pump_messages);
526 unsigned long flags;
527 bool was_busy = false;
528 int ret;
529
530 /* Lock queue and check for queue work */
531 spin_lock_irqsave(&master->queue_lock, flags);
532 if (list_empty(&master->queue) || !master->running) {
533 if (master->busy) {
534 ret = master->unprepare_transfer_hardware(master);
535 if (ret) {
536 spin_unlock_irqrestore(&master->queue_lock, flags);
537 dev_err(&master->dev,
538 "failed to unprepare transfer hardware\n");
539 return;
540 }
541 }
542 master->busy = false;
543 spin_unlock_irqrestore(&master->queue_lock, flags);
544 return;
545 }
546
547 /* Make sure we are not already running a message */
548 if (master->cur_msg) {
549 spin_unlock_irqrestore(&master->queue_lock, flags);
550 return;
551 }
552 /* Extract head of queue */
553 master->cur_msg =
554 list_entry(master->queue.next, struct spi_message, queue);
555
556 list_del_init(&master->cur_msg->queue);
557 if (master->busy)
558 was_busy = true;
559 else
560 master->busy = true;
561 spin_unlock_irqrestore(&master->queue_lock, flags);
562
563 if (!was_busy) {
564 ret = master->prepare_transfer_hardware(master);
565 if (ret) {
566 dev_err(&master->dev,
567 "failed to prepare transfer hardware\n");
568 return;
569 }
570 }
571
572 ret = master->transfer_one_message(master, master->cur_msg);
573 if (ret) {
574 dev_err(&master->dev,
575 "failed to transfer one message from queue\n");
576 return;
577 }
578}
579
580static int spi_init_queue(struct spi_master *master)
581{
582 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
583
584 INIT_LIST_HEAD(&master->queue);
585 spin_lock_init(&master->queue_lock);
586
587 master->running = false;
588 master->busy = false;
589
590 init_kthread_worker(&master->kworker);
591 master->kworker_task = kthread_run(kthread_worker_fn,
592 &master->kworker,
593 dev_name(&master->dev));
594 if (IS_ERR(master->kworker_task)) {
595 dev_err(&master->dev, "failed to create message pump task\n");
596 return -ENOMEM;
597 }
598 init_kthread_work(&master->pump_messages, spi_pump_messages);
599
600 /*
601 * Master config will indicate if this controller should run the
602 * message pump with high (realtime) priority to reduce the transfer
603 * latency on the bus by minimising the delay between a transfer
604 * request and the scheduling of the message pump thread. Without this
605 * setting the message pump thread will remain at default priority.
606 */
607 if (master->rt) {
608 dev_info(&master->dev,
609 "will run message pump with realtime priority\n");
610 sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
611 }
612
613 return 0;
614}
615
616/**
617 * spi_get_next_queued_message() - called by driver to check for queued
618 * messages
619 * @master: the master to check for queued messages
620 *
621 * If there are more messages in the queue, the next message is returned from
622 * this call.
623 */
624struct spi_message *spi_get_next_queued_message(struct spi_master *master)
625{
626 struct spi_message *next;
627 unsigned long flags;
628
629 /* get a pointer to the next message, if any */
630 spin_lock_irqsave(&master->queue_lock, flags);
631 if (list_empty(&master->queue))
632 next = NULL;
633 else
634 next = list_entry(master->queue.next,
635 struct spi_message, queue);
636 spin_unlock_irqrestore(&master->queue_lock, flags);
637
638 return next;
639}
640EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
641
642/**
643 * spi_finalize_current_message() - the current message is complete
644 * @master: the master to return the message to
645 *
646 * Called by the driver to notify the core that the message in the front of the
647 * queue is complete and can be removed from the queue.
648 */
649void spi_finalize_current_message(struct spi_master *master)
650{
651 struct spi_message *mesg;
652 unsigned long flags;
653
654 spin_lock_irqsave(&master->queue_lock, flags);
655 mesg = master->cur_msg;
656 master->cur_msg = NULL;
657
658 queue_kthread_work(&master->kworker, &master->pump_messages);
659 spin_unlock_irqrestore(&master->queue_lock, flags);
660
661 mesg->state = NULL;
662 if (mesg->complete)
663 mesg->complete(mesg->context);
664}
665EXPORT_SYMBOL_GPL(spi_finalize_current_message);
666
667static int spi_start_queue(struct spi_master *master)
668{
669 unsigned long flags;
670
671 spin_lock_irqsave(&master->queue_lock, flags);
672
673 if (master->running || master->busy) {
674 spin_unlock_irqrestore(&master->queue_lock, flags);
675 return -EBUSY;
676 }
677
678 master->running = true;
679 master->cur_msg = NULL;
680 spin_unlock_irqrestore(&master->queue_lock, flags);
681
682 queue_kthread_work(&master->kworker, &master->pump_messages);
683
684 return 0;
685}
686
687static int spi_stop_queue(struct spi_master *master)
688{
689 unsigned long flags;
690 unsigned limit = 500;
691 int ret = 0;
692
693 spin_lock_irqsave(&master->queue_lock, flags);
694
695 /*
696 * This is a bit lame, but is optimized for the common execution path.
697 * A wait_queue on the master->busy could be used, but then the common
698 * execution path (pump_messages) would be required to call wake_up or
699 * friends on every SPI message. Do this instead.
700 */
701 while ((!list_empty(&master->queue) || master->busy) && limit--) {
702 spin_unlock_irqrestore(&master->queue_lock, flags);
703 msleep(10);
704 spin_lock_irqsave(&master->queue_lock, flags);
705 }
706
707 if (!list_empty(&master->queue) || master->busy)
708 ret = -EBUSY;
709 else
710 master->running = false;
711
712 spin_unlock_irqrestore(&master->queue_lock, flags);
713
714 if (ret) {
715 dev_warn(&master->dev,
716 "could not stop message queue\n");
717 return ret;
718 }
719 return ret;
720}
721
722static int spi_destroy_queue(struct spi_master *master)
723{
724 int ret;
725
726 ret = spi_stop_queue(master);
727
728 /*
729 * flush_kthread_worker will block until all work is done.
730 * If the reason that stop_queue timed out is that the work will never
731 * finish, then it does no good to call flush/stop thread, so
732 * return anyway.
733 */
734 if (ret) {
735 dev_err(&master->dev, "problem destroying queue\n");
736 return ret;
737 }
738
739 flush_kthread_worker(&master->kworker);
740 kthread_stop(master->kworker_task);
741
742 return 0;
743}
744
745/**
746 * spi_queued_transfer - transfer function for queued transfers
747 * @spi: spi device which is requesting transfer
748 * @msg: spi message which is to handled is queued to driver queue
749 */
750static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
751{
752 struct spi_master *master = spi->master;
753 unsigned long flags;
754
755 spin_lock_irqsave(&master->queue_lock, flags);
756
757 if (!master->running) {
758 spin_unlock_irqrestore(&master->queue_lock, flags);
759 return -ESHUTDOWN;
760 }
761 msg->actual_length = 0;
762 msg->status = -EINPROGRESS;
763
764 list_add_tail(&msg->queue, &master->queue);
765 if (master->running && !master->busy)
766 queue_kthread_work(&master->kworker, &master->pump_messages);
767
768 spin_unlock_irqrestore(&master->queue_lock, flags);
769 return 0;
770}
771
772static int spi_master_initialize_queue(struct spi_master *master)
773{
774 int ret;
775
776 master->queued = true;
777 master->transfer = spi_queued_transfer;
778
779 /* Initialize and start queue */
780 ret = spi_init_queue(master);
781 if (ret) {
782 dev_err(&master->dev, "problem initializing queue\n");
783 goto err_init_queue;
784 }
785 ret = spi_start_queue(master);
786 if (ret) {
787 dev_err(&master->dev, "problem starting queue\n");
788 goto err_start_queue;
789 }
790
791 return 0;
792
793err_start_queue:
794err_init_queue:
795 spi_destroy_queue(master);
796 return ret;
797}
798
799/*-------------------------------------------------------------------------*/
800
510static void spi_master_release(struct device *dev) 801static void spi_master_release(struct device *dev)
511{ 802{
512 struct spi_master *master; 803 struct spi_master *master;
@@ -522,6 +813,7 @@ static struct class spi_master_class = {
522}; 813};
523 814
524 815
816
525/** 817/**
526 * spi_alloc_master - allocate SPI master controller 818 * spi_alloc_master - allocate SPI master controller
527 * @dev: the controller, possibly using the platform_bus 819 * @dev: the controller, possibly using the platform_bus
@@ -539,7 +831,8 @@ static struct class spi_master_class = {
539 * 831 *
540 * The caller is responsible for assigning the bus number and initializing 832 * The caller is responsible for assigning the bus number and initializing
541 * the master's methods before calling spi_register_master(); and (after errors 833 * the master's methods before calling spi_register_master(); and (after errors
542 * adding the device) calling spi_master_put() to prevent a memory leak. 834 * adding the device) calling spi_master_put() and kfree() to prevent a memory
835 * leak.
543 */ 836 */
544struct spi_master *spi_alloc_master(struct device *dev, unsigned size) 837struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
545{ 838{
@@ -621,14 +914,23 @@ int spi_register_master(struct spi_master *master)
621 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), 914 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
622 dynamic ? " (dynamic)" : ""); 915 dynamic ? " (dynamic)" : "");
623 916
917 /* If we're using a queued driver, start the queue */
918 if (master->transfer)
919 dev_info(dev, "master is unqueued, this is deprecated\n");
920 else {
921 status = spi_master_initialize_queue(master);
922 if (status) {
923 device_unregister(&master->dev);
924 goto done;
925 }
926 }
927
624 mutex_lock(&board_lock); 928 mutex_lock(&board_lock);
625 list_add_tail(&master->list, &spi_master_list); 929 list_add_tail(&master->list, &spi_master_list);
626 list_for_each_entry(bi, &board_list, list) 930 list_for_each_entry(bi, &board_list, list)
627 spi_match_master_to_boardinfo(master, &bi->board_info); 931 spi_match_master_to_boardinfo(master, &bi->board_info);
628 mutex_unlock(&board_lock); 932 mutex_unlock(&board_lock);
629 933
630 status = 0;
631
632 /* Register devices from the device tree */ 934 /* Register devices from the device tree */
633 of_register_spi_devices(master); 935 of_register_spi_devices(master);
634done: 936done:
@@ -636,7 +938,6 @@ done:
636} 938}
637EXPORT_SYMBOL_GPL(spi_register_master); 939EXPORT_SYMBOL_GPL(spi_register_master);
638 940
639
640static int __unregister(struct device *dev, void *null) 941static int __unregister(struct device *dev, void *null)
641{ 942{
642 spi_unregister_device(to_spi_device(dev)); 943 spi_unregister_device(to_spi_device(dev));
@@ -657,6 +958,11 @@ void spi_unregister_master(struct spi_master *master)
657{ 958{
658 int dummy; 959 int dummy;
659 960
961 if (master->queued) {
962 if (spi_destroy_queue(master))
963 dev_err(&master->dev, "queue remove failed\n");
964 }
965
660 mutex_lock(&board_lock); 966 mutex_lock(&board_lock);
661 list_del(&master->list); 967 list_del(&master->list);
662 mutex_unlock(&board_lock); 968 mutex_unlock(&board_lock);
@@ -666,6 +972,37 @@ void spi_unregister_master(struct spi_master *master)
666} 972}
667EXPORT_SYMBOL_GPL(spi_unregister_master); 973EXPORT_SYMBOL_GPL(spi_unregister_master);
668 974
975int spi_master_suspend(struct spi_master *master)
976{
977 int ret;
978
979 /* Basically no-ops for non-queued masters */
980 if (!master->queued)
981 return 0;
982
983 ret = spi_stop_queue(master);
984 if (ret)
985 dev_err(&master->dev, "queue stop failed\n");
986
987 return ret;
988}
989EXPORT_SYMBOL_GPL(spi_master_suspend);
990
991int spi_master_resume(struct spi_master *master)
992{
993 int ret;
994
995 if (!master->queued)
996 return 0;
997
998 ret = spi_start_queue(master);
999 if (ret)
1000 dev_err(&master->dev, "queue restart failed\n");
1001
1002 return ret;
1003}
1004EXPORT_SYMBOL_GPL(spi_master_resume);
1005
669static int __spi_master_match(struct device *dev, void *data) 1006static int __spi_master_match(struct device *dev, void *data)
670{ 1007{
671 struct spi_master *m; 1008 struct spi_master *m;
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h
index 572f637299c9..3672f40f3455 100644
--- a/include/linux/amba/pl022.h
+++ b/include/linux/amba/pl022.h
@@ -241,6 +241,8 @@ struct dma_chan;
241 * @autosuspend_delay: delay in ms following transfer completion before the 241 * @autosuspend_delay: delay in ms following transfer completion before the
242 * runtime power management system suspends the device. A setting of 0 242 * runtime power management system suspends the device. A setting of 0
243 * indicates no delay and the device will be suspended immediately. 243 * indicates no delay and the device will be suspended immediately.
244 * @rt: indicates the controller should run the message pump with realtime
245 * priority to minimise the transfer latency on the bus.
244 */ 246 */
245struct pl022_ssp_controller { 247struct pl022_ssp_controller {
246 u16 bus_id; 248 u16 bus_id;
@@ -250,6 +252,7 @@ struct pl022_ssp_controller {
250 void *dma_rx_param; 252 void *dma_rx_param;
251 void *dma_tx_param; 253 void *dma_tx_param;
252 int autosuspend_delay; 254 int autosuspend_delay;
255 bool rt;
253}; 256};
254 257
255/** 258/**
diff --git a/include/linux/spi/sh_hspi.h b/include/linux/spi/sh_hspi.h
new file mode 100644
index 000000000000..a1121f872ac1
--- /dev/null
+++ b/include/linux/spi/sh_hspi.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (C) 2011 Kuninori Morimoto
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17#ifndef SH_HSPI_H
18#define SH_HSPI_H
19
20struct sh_hspi_info {
21};
22
23#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 176fce9cc6b1..98679b061b63 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -22,6 +22,7 @@
22#include <linux/device.h> 22#include <linux/device.h>
23#include <linux/mod_devicetable.h> 23#include <linux/mod_devicetable.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/kthread.h>
25 26
26/* 27/*
27 * INTERFACES between SPI master-side drivers and SPI infrastructure. 28 * INTERFACES between SPI master-side drivers and SPI infrastructure.
@@ -235,6 +236,27 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
235 * the device whose settings are being modified. 236 * the device whose settings are being modified.
236 * @transfer: adds a message to the controller's transfer queue. 237 * @transfer: adds a message to the controller's transfer queue.
237 * @cleanup: frees controller-specific state 238 * @cleanup: frees controller-specific state
239 * @queued: whether this master is providing an internal message queue
240 * @kworker: thread struct for message pump
241 * @kworker_task: pointer to task for message pump kworker thread
242 * @pump_messages: work struct for scheduling work to the message pump
243 * @queue_lock: spinlock to syncronise access to message queue
244 * @queue: message queue
245 * @cur_msg: the currently in-flight message
246 * @busy: message pump is busy
247 * @running: message pump is running
248 * @rt: whether this queue is set to run as a realtime task
249 * @prepare_transfer_hardware: a message will soon arrive from the queue
250 * so the subsystem requests the driver to prepare the transfer hardware
251 * by issuing this call
252 * @transfer_one_message: the subsystem calls the driver to transfer a single
253 * message while queuing transfers that arrive in the meantime. When the
254 * driver is finished with this message, it must call
255 * spi_finalize_current_message() so the subsystem can issue the next
256 * transfer
257 * @prepare_transfer_hardware: there are currently no more messages on the
258 * queue so the subsystem notifies the driver that it may relax the
259 * hardware by issuing this call
238 * 260 *
239 * Each SPI master controller can communicate with one or more @spi_device 261 * Each SPI master controller can communicate with one or more @spi_device
240 * children. These make a small bus, sharing MOSI, MISO and SCK signals 262 * children. These make a small bus, sharing MOSI, MISO and SCK signals
@@ -318,6 +340,28 @@ struct spi_master {
318 340
319 /* called on release() to free memory provided by spi_master */ 341 /* called on release() to free memory provided by spi_master */
320 void (*cleanup)(struct spi_device *spi); 342 void (*cleanup)(struct spi_device *spi);
343
344 /*
345 * These hooks are for drivers that want to use the generic
346 * master transfer queueing mechanism. If these are used, the
347 * transfer() function above must NOT be specified by the driver.
348 * Over time we expect SPI drivers to be phased over to this API.
349 */
350 bool queued;
351 struct kthread_worker kworker;
352 struct task_struct *kworker_task;
353 struct kthread_work pump_messages;
354 spinlock_t queue_lock;
355 struct list_head queue;
356 struct spi_message *cur_msg;
357 bool busy;
358 bool running;
359 bool rt;
360
361 int (*prepare_transfer_hardware)(struct spi_master *master);
362 int (*transfer_one_message)(struct spi_master *master,
363 struct spi_message *mesg);
364 int (*unprepare_transfer_hardware)(struct spi_master *master);
321}; 365};
322 366
323static inline void *spi_master_get_devdata(struct spi_master *master) 367static inline void *spi_master_get_devdata(struct spi_master *master)
@@ -343,6 +387,13 @@ static inline void spi_master_put(struct spi_master *master)
343 put_device(&master->dev); 387 put_device(&master->dev);
344} 388}
345 389
390/* PM calls that need to be issued by the driver */
391extern int spi_master_suspend(struct spi_master *master);
392extern int spi_master_resume(struct spi_master *master);
393
394/* Calls the driver make to interact with the message queue */
395extern struct spi_message *spi_get_next_queued_message(struct spi_master *master);
396extern void spi_finalize_current_message(struct spi_master *master);
346 397
347/* the spi driver core manages memory for the spi_master classdev */ 398/* the spi driver core manages memory for the spi_master classdev */
348extern struct spi_master * 399extern struct spi_master *
@@ -549,7 +600,7 @@ static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags
549 + ntrans * sizeof(struct spi_transfer), 600 + ntrans * sizeof(struct spi_transfer),
550 flags); 601 flags);
551 if (m) { 602 if (m) {
552 int i; 603 unsigned i;
553 struct spi_transfer *t = (struct spi_transfer *)(m + 1); 604 struct spi_transfer *t = (struct spi_transfer *)(m + 1);
554 605
555 INIT_LIST_HEAD(&m->transfers); 606 INIT_LIST_HEAD(&m->transfers);