aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig7
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi_s3c64xx.c1196
3 files changed, 1204 insertions, 0 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 6fa595e0c989..c712f53cd5c9 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -240,6 +240,13 @@ config SPI_S3C24XX_GPIO
240 the inbuilt hardware cannot provide the transfer mode, or 240 the inbuilt hardware cannot provide the transfer mode, or
241 where the board is using non hardware connected pins. 241 where the board is using non hardware connected pins.
242 242
243config SPI_S3C64XX
244 tristate "Samsung S3C64XX series type SPI"
245 depends on ARCH_S3C64XX && EXPERIMENTAL
246 select S3C64XX_DMA
247 help
248 SPI driver for Samsung S3C64XX and newer SoCs.
249
243config SPI_SH_MSIOF 250config SPI_SH_MSIOF
244 tristate "SuperH MSIOF SPI controller" 251 tristate "SuperH MSIOF SPI controller"
245 depends on SUPERH && HAVE_CLK 252 depends on SUPERH && HAVE_CLK
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 33faa7fa0ab8..f3d2810ba11c 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o
33obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o 33obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o
34obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o 34obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
35obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o 35obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o
36obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o
36obj-$(CONFIG_SPI_TXX9) += spi_txx9.o 37obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
37obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o 38obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
38obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o 39obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c
new file mode 100644
index 000000000000..88a456dba967
--- /dev/null
+++ b/drivers/spi/spi_s3c64xx.c
@@ -0,0 +1,1196 @@
1/* linux/drivers/spi/spi_s3c64xx.c
2 *
3 * Copyright (C) 2009 Samsung Electronics Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/workqueue.h>
24#include <linux/delay.h>
25#include <linux/clk.h>
26#include <linux/dma-mapping.h>
27#include <linux/platform_device.h>
28#include <linux/spi/spi.h>
29
30#include <mach/dma.h>
31#include <plat/spi.h>
32
33/* Registers and bit-fields */
34
35#define S3C64XX_SPI_CH_CFG 0x00
36#define S3C64XX_SPI_CLK_CFG 0x04
37#define S3C64XX_SPI_MODE_CFG 0x08
38#define S3C64XX_SPI_SLAVE_SEL 0x0C
39#define S3C64XX_SPI_INT_EN 0x10
40#define S3C64XX_SPI_STATUS 0x14
41#define S3C64XX_SPI_TX_DATA 0x18
42#define S3C64XX_SPI_RX_DATA 0x1C
43#define S3C64XX_SPI_PACKET_CNT 0x20
44#define S3C64XX_SPI_PENDING_CLR 0x24
45#define S3C64XX_SPI_SWAP_CFG 0x28
46#define S3C64XX_SPI_FB_CLK 0x2C
47
48#define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
49#define S3C64XX_SPI_CH_SW_RST (1<<5)
50#define S3C64XX_SPI_CH_SLAVE (1<<4)
51#define S3C64XX_SPI_CPOL_L (1<<3)
52#define S3C64XX_SPI_CPHA_B (1<<2)
53#define S3C64XX_SPI_CH_RXCH_ON (1<<1)
54#define S3C64XX_SPI_CH_TXCH_ON (1<<0)
55
56#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
57#define S3C64XX_SPI_CLKSEL_SRCSHFT 9
58#define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
59#define S3C64XX_SPI_PSR_MASK 0xff
60
61#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
62#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
63#define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
64#define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
65#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
66#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
67#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
68#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
69#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
70#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
71#define S3C64XX_SPI_MODE_4BURST (1<<0)
72
73#define S3C64XX_SPI_SLAVE_AUTO (1<<1)
74#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
75
76#define S3C64XX_SPI_ACT(c) writel(0, (c)->regs + S3C64XX_SPI_SLAVE_SEL)
77
78#define S3C64XX_SPI_DEACT(c) writel(S3C64XX_SPI_SLAVE_SIG_INACT, \
79 (c)->regs + S3C64XX_SPI_SLAVE_SEL)
80
81#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
82#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
83#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
84#define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
85#define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
86#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
87#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
88
89#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
90#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
91#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
92#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
93#define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
94#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
95
96#define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
97
98#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
99#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
100#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
101#define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
102#define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
103
104#define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
105#define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
106#define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
107#define S3C64XX_SPI_SWAP_RX_EN (1<<4)
108#define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
109#define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
110#define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
111#define S3C64XX_SPI_SWAP_TX_EN (1<<0)
112
113#define S3C64XX_SPI_FBCLK_MSK (3<<0)
114
115#define S3C64XX_SPI_ST_TRLCNTZ(v, i) ((((v) >> (i)->rx_lvl_offset) & \
116 (((i)->fifo_lvl_mask + 1))) \
117 ? 1 : 0)
118
119#define S3C64XX_SPI_ST_TX_DONE(v, i) ((((v) >> (i)->rx_lvl_offset) & \
120 (((i)->fifo_lvl_mask + 1) << 1)) \
121 ? 1 : 0)
122#define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask)
123#define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask)
124
125#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
126#define S3C64XX_SPI_TRAILCNT_OFF 19
127
128#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
129
130#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
131
132#define SUSPND (1<<0)
133#define SPIBUSY (1<<1)
134#define RXBUSY (1<<2)
135#define TXBUSY (1<<3)
136
137/**
138 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
139 * @clk: Pointer to the spi clock.
140 * @master: Pointer to the SPI Protocol master.
141 * @workqueue: Work queue for the SPI xfer requests.
142 * @cntrlr_info: Platform specific data for the controller this driver manages.
143 * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
144 * @work: Work
145 * @queue: To log SPI xfer requests.
146 * @lock: Controller specific lock.
147 * @state: Set of FLAGS to indicate status.
148 * @rx_dmach: Controller's DMA channel for Rx.
149 * @tx_dmach: Controller's DMA channel for Tx.
150 * @sfr_start: BUS address of SPI controller regs.
151 * @regs: Pointer to ioremap'ed controller registers.
152 * @xfer_completion: To indicate completion of xfer task.
153 * @cur_mode: Stores the active configuration of the controller.
154 * @cur_bpw: Stores the active bits per word settings.
155 * @cur_speed: Stores the active xfer clock speed.
156 */
157struct s3c64xx_spi_driver_data {
158 void __iomem *regs;
159 struct clk *clk;
160 struct platform_device *pdev;
161 struct spi_master *master;
162 struct workqueue_struct *workqueue;
163 struct s3c64xx_spi_cntrlr_info *cntrlr_info;
164 struct spi_device *tgl_spi;
165 struct work_struct work;
166 struct list_head queue;
167 spinlock_t lock;
168 enum dma_ch rx_dmach;
169 enum dma_ch tx_dmach;
170 unsigned long sfr_start;
171 struct completion xfer_completion;
172 unsigned state;
173 unsigned cur_mode, cur_bpw;
174 unsigned cur_speed;
175};
176
177static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
178 .name = "samsung-spi-dma",
179};
180
181static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
182{
183 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
184 void __iomem *regs = sdd->regs;
185 unsigned long loops;
186 u32 val;
187
188 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
189
190 val = readl(regs + S3C64XX_SPI_CH_CFG);
191 val |= S3C64XX_SPI_CH_SW_RST;
192 val &= ~S3C64XX_SPI_CH_HS_EN;
193 writel(val, regs + S3C64XX_SPI_CH_CFG);
194
195 /* Flush TxFIFO*/
196 loops = msecs_to_loops(1);
197 do {
198 val = readl(regs + S3C64XX_SPI_STATUS);
199 } while (TX_FIFO_LVL(val, sci) && loops--);
200
201 /* Flush RxFIFO*/
202 loops = msecs_to_loops(1);
203 do {
204 val = readl(regs + S3C64XX_SPI_STATUS);
205 if (RX_FIFO_LVL(val, sci))
206 readl(regs + S3C64XX_SPI_RX_DATA);
207 else
208 break;
209 } while (loops--);
210
211 val = readl(regs + S3C64XX_SPI_CH_CFG);
212 val &= ~S3C64XX_SPI_CH_SW_RST;
213 writel(val, regs + S3C64XX_SPI_CH_CFG);
214
215 val = readl(regs + S3C64XX_SPI_MODE_CFG);
216 val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
217 writel(val, regs + S3C64XX_SPI_MODE_CFG);
218
219 val = readl(regs + S3C64XX_SPI_CH_CFG);
220 val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
221 writel(val, regs + S3C64XX_SPI_CH_CFG);
222}
223
224static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
225 struct spi_device *spi,
226 struct spi_transfer *xfer, int dma_mode)
227{
228 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
229 void __iomem *regs = sdd->regs;
230 u32 modecfg, chcfg;
231
232 modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
233 modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
234
235 chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
236 chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
237
238 if (dma_mode) {
239 chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
240 } else {
241 /* Always shift in data in FIFO, even if xfer is Tx only,
242 * this helps setting PCKT_CNT value for generating clocks
243 * as exactly needed.
244 */
245 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
246 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
247 | S3C64XX_SPI_PACKET_CNT_EN,
248 regs + S3C64XX_SPI_PACKET_CNT);
249 }
250
251 if (xfer->tx_buf != NULL) {
252 sdd->state |= TXBUSY;
253 chcfg |= S3C64XX_SPI_CH_TXCH_ON;
254 if (dma_mode) {
255 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
256 s3c2410_dma_config(sdd->tx_dmach, 1);
257 s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd,
258 xfer->tx_dma, xfer->len);
259 s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START);
260 } else {
261 unsigned char *buf = (unsigned char *) xfer->tx_buf;
262 int i = 0;
263 while (i < xfer->len)
264 writeb(buf[i++], regs + S3C64XX_SPI_TX_DATA);
265 }
266 }
267
268 if (xfer->rx_buf != NULL) {
269 sdd->state |= RXBUSY;
270
271 if (sci->high_speed && sdd->cur_speed >= 30000000UL
272 && !(sdd->cur_mode & SPI_CPHA))
273 chcfg |= S3C64XX_SPI_CH_HS_EN;
274
275 if (dma_mode) {
276 modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
277 chcfg |= S3C64XX_SPI_CH_RXCH_ON;
278 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
279 | S3C64XX_SPI_PACKET_CNT_EN,
280 regs + S3C64XX_SPI_PACKET_CNT);
281 s3c2410_dma_config(sdd->rx_dmach, 1);
282 s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd,
283 xfer->rx_dma, xfer->len);
284 s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START);
285 }
286 }
287
288 writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
289 writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
290}
291
292static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
293 struct spi_device *spi)
294{
295 struct s3c64xx_spi_csinfo *cs;
296
297 if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
298 if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
299 /* Deselect the last toggled device */
300 cs = sdd->tgl_spi->controller_data;
301 cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1);
302 }
303 sdd->tgl_spi = NULL;
304 }
305
306 cs = spi->controller_data;
307 cs->set_level(spi->mode & SPI_CS_HIGH ? 1 : 0);
308}
309
310static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
311 struct spi_transfer *xfer, int dma_mode)
312{
313 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
314 void __iomem *regs = sdd->regs;
315 unsigned long val;
316 int ms;
317
318 /* millisecs to xfer 'len' bytes @ 'cur_speed' */
319 ms = xfer->len * 8 * 1000 / sdd->cur_speed;
320 ms += 5; /* some tolerance */
321
322 if (dma_mode) {
323 val = msecs_to_jiffies(ms) + 10;
324 val = wait_for_completion_timeout(&sdd->xfer_completion, val);
325 } else {
326 val = msecs_to_loops(ms);
327 do {
328 val = readl(regs + S3C64XX_SPI_STATUS);
329 } while (RX_FIFO_LVL(val, sci) < xfer->len && --val);
330 }
331
332 if (!val)
333 return -EIO;
334
335 if (dma_mode) {
336 u32 status;
337
338 /*
339 * DmaTx returns after simply writing data in the FIFO,
340 * w/o waiting for real transmission on the bus to finish.
341 * DmaRx returns only after Dma read data from FIFO which
342 * needs bus transmission to finish, so we don't worry if
343 * Xfer involved Rx(with or without Tx).
344 */
345 if (xfer->rx_buf == NULL) {
346 val = msecs_to_loops(10);
347 status = readl(regs + S3C64XX_SPI_STATUS);
348 while ((TX_FIFO_LVL(status, sci)
349 || !S3C64XX_SPI_ST_TX_DONE(status, sci))
350 && --val) {
351 cpu_relax();
352 status = readl(regs + S3C64XX_SPI_STATUS);
353 }
354
355 if (!val)
356 return -EIO;
357 }
358 } else {
359 unsigned char *buf;
360 int i;
361
362 /* If it was only Tx */
363 if (xfer->rx_buf == NULL) {
364 sdd->state &= ~TXBUSY;
365 return 0;
366 }
367
368 i = 0;
369 buf = xfer->rx_buf;
370 while (i < xfer->len)
371 buf[i++] = readb(regs + S3C64XX_SPI_RX_DATA);
372
373 sdd->state &= ~RXBUSY;
374 }
375
376 return 0;
377}
378
379static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
380 struct spi_device *spi)
381{
382 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
383
384 if (sdd->tgl_spi == spi)
385 sdd->tgl_spi = NULL;
386
387 cs->set_level(spi->mode & SPI_CS_HIGH ? 0 : 1);
388}
389
390static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
391{
392 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
393 void __iomem *regs = sdd->regs;
394 u32 val;
395
396 /* Disable Clock */
397 val = readl(regs + S3C64XX_SPI_CLK_CFG);
398 val &= ~S3C64XX_SPI_ENCLK_ENABLE;
399 writel(val, regs + S3C64XX_SPI_CLK_CFG);
400
401 /* Set Polarity and Phase */
402 val = readl(regs + S3C64XX_SPI_CH_CFG);
403 val &= ~(S3C64XX_SPI_CH_SLAVE |
404 S3C64XX_SPI_CPOL_L |
405 S3C64XX_SPI_CPHA_B);
406
407 if (sdd->cur_mode & SPI_CPOL)
408 val |= S3C64XX_SPI_CPOL_L;
409
410 if (sdd->cur_mode & SPI_CPHA)
411 val |= S3C64XX_SPI_CPHA_B;
412
413 writel(val, regs + S3C64XX_SPI_CH_CFG);
414
415 /* Set Channel & DMA Mode */
416 val = readl(regs + S3C64XX_SPI_MODE_CFG);
417 val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
418 | S3C64XX_SPI_MODE_CH_TSZ_MASK);
419
420 switch (sdd->cur_bpw) {
421 case 32:
422 val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
423 break;
424 case 16:
425 val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
426 break;
427 default:
428 val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
429 break;
430 }
431 val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; /* Always 8bits wide */
432
433 writel(val, regs + S3C64XX_SPI_MODE_CFG);
434
435 /* Configure Clock */
436 val = readl(regs + S3C64XX_SPI_CLK_CFG);
437 val &= ~S3C64XX_SPI_PSR_MASK;
438 val |= ((clk_get_rate(sci->src_clk) / sdd->cur_speed / 2 - 1)
439 & S3C64XX_SPI_PSR_MASK);
440 writel(val, regs + S3C64XX_SPI_CLK_CFG);
441
442 /* Enable Clock */
443 val = readl(regs + S3C64XX_SPI_CLK_CFG);
444 val |= S3C64XX_SPI_ENCLK_ENABLE;
445 writel(val, regs + S3C64XX_SPI_CLK_CFG);
446}
447
448void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id,
449 int size, enum s3c2410_dma_buffresult res)
450{
451 struct s3c64xx_spi_driver_data *sdd = buf_id;
452 unsigned long flags;
453
454 spin_lock_irqsave(&sdd->lock, flags);
455
456 if (res == S3C2410_RES_OK)
457 sdd->state &= ~RXBUSY;
458 else
459 dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size);
460
461 /* If the other done */
462 if (!(sdd->state & TXBUSY))
463 complete(&sdd->xfer_completion);
464
465 spin_unlock_irqrestore(&sdd->lock, flags);
466}
467
468void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id,
469 int size, enum s3c2410_dma_buffresult res)
470{
471 struct s3c64xx_spi_driver_data *sdd = buf_id;
472 unsigned long flags;
473
474 spin_lock_irqsave(&sdd->lock, flags);
475
476 if (res == S3C2410_RES_OK)
477 sdd->state &= ~TXBUSY;
478 else
479 dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size);
480
481 /* If the other done */
482 if (!(sdd->state & RXBUSY))
483 complete(&sdd->xfer_completion);
484
485 spin_unlock_irqrestore(&sdd->lock, flags);
486}
487
488#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
489
490static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
491 struct spi_message *msg)
492{
493 struct device *dev = &sdd->pdev->dev;
494 struct spi_transfer *xfer;
495
496 if (msg->is_dma_mapped)
497 return 0;
498
499 /* First mark all xfer unmapped */
500 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
501 xfer->rx_dma = XFER_DMAADDR_INVALID;
502 xfer->tx_dma = XFER_DMAADDR_INVALID;
503 }
504
505 /* Map until end or first fail */
506 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
507
508 if (xfer->tx_buf != NULL) {
509 xfer->tx_dma = dma_map_single(dev, xfer->tx_buf,
510 xfer->len, DMA_TO_DEVICE);
511 if (dma_mapping_error(dev, xfer->tx_dma)) {
512 dev_err(dev, "dma_map_single Tx failed\n");
513 xfer->tx_dma = XFER_DMAADDR_INVALID;
514 return -ENOMEM;
515 }
516 }
517
518 if (xfer->rx_buf != NULL) {
519 xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
520 xfer->len, DMA_FROM_DEVICE);
521 if (dma_mapping_error(dev, xfer->rx_dma)) {
522 dev_err(dev, "dma_map_single Rx failed\n");
523 dma_unmap_single(dev, xfer->tx_dma,
524 xfer->len, DMA_TO_DEVICE);
525 xfer->tx_dma = XFER_DMAADDR_INVALID;
526 xfer->rx_dma = XFER_DMAADDR_INVALID;
527 return -ENOMEM;
528 }
529 }
530 }
531
532 return 0;
533}
534
535static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
536 struct spi_message *msg)
537{
538 struct device *dev = &sdd->pdev->dev;
539 struct spi_transfer *xfer;
540
541 if (msg->is_dma_mapped)
542 return;
543
544 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
545
546 if (xfer->rx_buf != NULL
547 && xfer->rx_dma != XFER_DMAADDR_INVALID)
548 dma_unmap_single(dev, xfer->rx_dma,
549 xfer->len, DMA_FROM_DEVICE);
550
551 if (xfer->tx_buf != NULL
552 && xfer->tx_dma != XFER_DMAADDR_INVALID)
553 dma_unmap_single(dev, xfer->tx_dma,
554 xfer->len, DMA_TO_DEVICE);
555 }
556}
557
558static void handle_msg(struct s3c64xx_spi_driver_data *sdd,
559 struct spi_message *msg)
560{
561 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
562 struct spi_device *spi = msg->spi;
563 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
564 struct spi_transfer *xfer;
565 int status = 0, cs_toggle = 0;
566 u32 speed;
567 u8 bpw;
568
569 /* If Master's(controller) state differs from that needed by Slave */
570 if (sdd->cur_speed != spi->max_speed_hz
571 || sdd->cur_mode != spi->mode
572 || sdd->cur_bpw != spi->bits_per_word) {
573 sdd->cur_bpw = spi->bits_per_word;
574 sdd->cur_speed = spi->max_speed_hz;
575 sdd->cur_mode = spi->mode;
576 s3c64xx_spi_config(sdd);
577 }
578
579 /* Map all the transfers if needed */
580 if (s3c64xx_spi_map_mssg(sdd, msg)) {
581 dev_err(&spi->dev,
582 "Xfer: Unable to map message buffers!\n");
583 status = -ENOMEM;
584 goto out;
585 }
586
587 /* Configure feedback delay */
588 writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
589
590 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
591
592 unsigned long flags;
593 int use_dma;
594
595 INIT_COMPLETION(sdd->xfer_completion);
596
597 /* Only BPW and Speed may change across transfers */
598 bpw = xfer->bits_per_word ? : spi->bits_per_word;
599 speed = xfer->speed_hz ? : spi->max_speed_hz;
600
601 if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
602 sdd->cur_bpw = bpw;
603 sdd->cur_speed = speed;
604 s3c64xx_spi_config(sdd);
605 }
606
607 /* Polling method for xfers not bigger than FIFO capacity */
608 if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
609 use_dma = 0;
610 else
611 use_dma = 1;
612
613 spin_lock_irqsave(&sdd->lock, flags);
614
615 /* Pending only which is to be done */
616 sdd->state &= ~RXBUSY;
617 sdd->state &= ~TXBUSY;
618
619 enable_datapath(sdd, spi, xfer, use_dma);
620
621 /* Slave Select */
622 enable_cs(sdd, spi);
623
624 /* Start the signals */
625 S3C64XX_SPI_ACT(sdd);
626
627 spin_unlock_irqrestore(&sdd->lock, flags);
628
629 status = wait_for_xfer(sdd, xfer, use_dma);
630
631 /* Quiese the signals */
632 S3C64XX_SPI_DEACT(sdd);
633
634 if (status) {
635 dev_err(&spi->dev, "I/O Error: \
636 rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
637 xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
638 (sdd->state & RXBUSY) ? 'f' : 'p',
639 (sdd->state & TXBUSY) ? 'f' : 'p',
640 xfer->len);
641
642 if (use_dma) {
643 if (xfer->tx_buf != NULL
644 && (sdd->state & TXBUSY))
645 s3c2410_dma_ctrl(sdd->tx_dmach,
646 S3C2410_DMAOP_FLUSH);
647 if (xfer->rx_buf != NULL
648 && (sdd->state & RXBUSY))
649 s3c2410_dma_ctrl(sdd->rx_dmach,
650 S3C2410_DMAOP_FLUSH);
651 }
652
653 goto out;
654 }
655
656 if (xfer->delay_usecs)
657 udelay(xfer->delay_usecs);
658
659 if (xfer->cs_change) {
660 /* Hint that the next mssg is gonna be
661 for the same device */
662 if (list_is_last(&xfer->transfer_list,
663 &msg->transfers))
664 cs_toggle = 1;
665 else
666 disable_cs(sdd, spi);
667 }
668
669 msg->actual_length += xfer->len;
670
671 flush_fifo(sdd);
672 }
673
674out:
675 if (!cs_toggle || status)
676 disable_cs(sdd, spi);
677 else
678 sdd->tgl_spi = spi;
679
680 s3c64xx_spi_unmap_mssg(sdd, msg);
681
682 msg->status = status;
683
684 if (msg->complete)
685 msg->complete(msg->context);
686}
687
688static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
689{
690 if (s3c2410_dma_request(sdd->rx_dmach,
691 &s3c64xx_spi_dma_client, NULL) < 0) {
692 dev_err(&sdd->pdev->dev, "cannot get RxDMA\n");
693 return 0;
694 }
695 s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb);
696 s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW,
697 sdd->sfr_start + S3C64XX_SPI_RX_DATA);
698
699 if (s3c2410_dma_request(sdd->tx_dmach,
700 &s3c64xx_spi_dma_client, NULL) < 0) {
701 dev_err(&sdd->pdev->dev, "cannot get TxDMA\n");
702 s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
703 return 0;
704 }
705 s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb);
706 s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM,
707 sdd->sfr_start + S3C64XX_SPI_TX_DATA);
708
709 return 1;
710}
711
712static void s3c64xx_spi_work(struct work_struct *work)
713{
714 struct s3c64xx_spi_driver_data *sdd = container_of(work,
715 struct s3c64xx_spi_driver_data, work);
716 unsigned long flags;
717
718 /* Acquire DMA channels */
719 while (!acquire_dma(sdd))
720 msleep(10);
721
722 spin_lock_irqsave(&sdd->lock, flags);
723
724 while (!list_empty(&sdd->queue)
725 && !(sdd->state & SUSPND)) {
726
727 struct spi_message *msg;
728
729 msg = container_of(sdd->queue.next, struct spi_message, queue);
730
731 list_del_init(&msg->queue);
732
733 /* Set Xfer busy flag */
734 sdd->state |= SPIBUSY;
735
736 spin_unlock_irqrestore(&sdd->lock, flags);
737
738 handle_msg(sdd, msg);
739
740 spin_lock_irqsave(&sdd->lock, flags);
741
742 sdd->state &= ~SPIBUSY;
743 }
744
745 spin_unlock_irqrestore(&sdd->lock, flags);
746
747 /* Free DMA channels */
748 s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client);
749 s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client);
750}
751
752static int s3c64xx_spi_transfer(struct spi_device *spi,
753 struct spi_message *msg)
754{
755 struct s3c64xx_spi_driver_data *sdd;
756 unsigned long flags;
757
758 sdd = spi_master_get_devdata(spi->master);
759
760 spin_lock_irqsave(&sdd->lock, flags);
761
762 if (sdd->state & SUSPND) {
763 spin_unlock_irqrestore(&sdd->lock, flags);
764 return -ESHUTDOWN;
765 }
766
767 msg->status = -EINPROGRESS;
768 msg->actual_length = 0;
769
770 list_add_tail(&msg->queue, &sdd->queue);
771
772 queue_work(sdd->workqueue, &sdd->work);
773
774 spin_unlock_irqrestore(&sdd->lock, flags);
775
776 return 0;
777}
778
779/*
780 * Here we only check the validity of requested configuration
781 * and save the configuration in a local data-structure.
782 * The controller is actually configured only just before we
783 * get a message to transfer.
784 */
785static int s3c64xx_spi_setup(struct spi_device *spi)
786{
787 struct s3c64xx_spi_csinfo *cs = spi->controller_data;
788 struct s3c64xx_spi_driver_data *sdd;
789 struct s3c64xx_spi_cntrlr_info *sci;
790 struct spi_message *msg;
791 u32 psr, speed;
792 unsigned long flags;
793 int err = 0;
794
795 if (cs == NULL || cs->set_level == NULL) {
796 dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
797 return -ENODEV;
798 }
799
800 sdd = spi_master_get_devdata(spi->master);
801 sci = sdd->cntrlr_info;
802
803 spin_lock_irqsave(&sdd->lock, flags);
804
805 list_for_each_entry(msg, &sdd->queue, queue) {
806 /* Is some mssg is already queued for this device */
807 if (msg->spi == spi) {
808 dev_err(&spi->dev,
809 "setup: attempt while mssg in queue!\n");
810 spin_unlock_irqrestore(&sdd->lock, flags);
811 return -EBUSY;
812 }
813 }
814
815 if (sdd->state & SUSPND) {
816 spin_unlock_irqrestore(&sdd->lock, flags);
817 dev_err(&spi->dev,
818 "setup: SPI-%d not active!\n", spi->master->bus_num);
819 return -ESHUTDOWN;
820 }
821
822 spin_unlock_irqrestore(&sdd->lock, flags);
823
824 if (spi->bits_per_word != 8
825 && spi->bits_per_word != 16
826 && spi->bits_per_word != 32) {
827 dev_err(&spi->dev, "setup: %dbits/wrd not supported!\n",
828 spi->bits_per_word);
829 err = -EINVAL;
830 goto setup_exit;
831 }
832
833 /* Check if we can provide the requested rate */
834 speed = clk_get_rate(sci->src_clk) / 2 / (0 + 1); /* Max possible */
835
836 if (spi->max_speed_hz > speed)
837 spi->max_speed_hz = speed;
838
839 psr = clk_get_rate(sci->src_clk) / 2 / spi->max_speed_hz - 1;
840 psr &= S3C64XX_SPI_PSR_MASK;
841 if (psr == S3C64XX_SPI_PSR_MASK)
842 psr--;
843
844 speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1);
845 if (spi->max_speed_hz < speed) {
846 if (psr+1 < S3C64XX_SPI_PSR_MASK) {
847 psr++;
848 } else {
849 err = -EINVAL;
850 goto setup_exit;
851 }
852 }
853
854 speed = clk_get_rate(sci->src_clk) / 2 / (psr + 1);
855 if (spi->max_speed_hz >= speed)
856 spi->max_speed_hz = speed;
857 else
858 err = -EINVAL;
859
860setup_exit:
861
862 /* setup() returns with device de-selected */
863 disable_cs(sdd, spi);
864
865 return err;
866}
867
868static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
869{
870 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
871 void __iomem *regs = sdd->regs;
872 unsigned int val;
873
874 sdd->cur_speed = 0;
875
876 S3C64XX_SPI_DEACT(sdd);
877
878 /* Disable Interrupts - we use Polling if not DMA mode */
879 writel(0, regs + S3C64XX_SPI_INT_EN);
880
881 writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
882 regs + S3C64XX_SPI_CLK_CFG);
883 writel(0, regs + S3C64XX_SPI_MODE_CFG);
884 writel(0, regs + S3C64XX_SPI_PACKET_CNT);
885
886 /* Clear any irq pending bits */
887 writel(readl(regs + S3C64XX_SPI_PENDING_CLR),
888 regs + S3C64XX_SPI_PENDING_CLR);
889
890 writel(0, regs + S3C64XX_SPI_SWAP_CFG);
891
892 val = readl(regs + S3C64XX_SPI_MODE_CFG);
893 val &= ~S3C64XX_SPI_MODE_4BURST;
894 val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
895 val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
896 writel(val, regs + S3C64XX_SPI_MODE_CFG);
897
898 flush_fifo(sdd);
899}
900
901static int __init s3c64xx_spi_probe(struct platform_device *pdev)
902{
903 struct resource *mem_res, *dmatx_res, *dmarx_res;
904 struct s3c64xx_spi_driver_data *sdd;
905 struct s3c64xx_spi_cntrlr_info *sci;
906 struct spi_master *master;
907 int ret;
908
909 if (pdev->id < 0) {
910 dev_err(&pdev->dev,
911 "Invalid platform device id-%d\n", pdev->id);
912 return -ENODEV;
913 }
914
915 if (pdev->dev.platform_data == NULL) {
916 dev_err(&pdev->dev, "platform_data missing!\n");
917 return -ENODEV;
918 }
919
920 /* Check for availability of necessary resource */
921
922 dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
923 if (dmatx_res == NULL) {
924 dev_err(&pdev->dev, "Unable to get SPI-Tx dma resource\n");
925 return -ENXIO;
926 }
927
928 dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
929 if (dmarx_res == NULL) {
930 dev_err(&pdev->dev, "Unable to get SPI-Rx dma resource\n");
931 return -ENXIO;
932 }
933
934 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
935 if (mem_res == NULL) {
936 dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
937 return -ENXIO;
938 }
939
940 master = spi_alloc_master(&pdev->dev,
941 sizeof(struct s3c64xx_spi_driver_data));
942 if (master == NULL) {
943 dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
944 return -ENOMEM;
945 }
946
947 sci = pdev->dev.platform_data;
948
949 platform_set_drvdata(pdev, master);
950
951 sdd = spi_master_get_devdata(master);
952 sdd->master = master;
953 sdd->cntrlr_info = sci;
954 sdd->pdev = pdev;
955 sdd->sfr_start = mem_res->start;
956 sdd->tx_dmach = dmatx_res->start;
957 sdd->rx_dmach = dmarx_res->start;
958
959 sdd->cur_bpw = 8;
960
961 master->bus_num = pdev->id;
962 master->setup = s3c64xx_spi_setup;
963 master->transfer = s3c64xx_spi_transfer;
964 master->num_chipselect = sci->num_cs;
965 master->dma_alignment = 8;
966 /* the spi->mode bits understood by this driver: */
967 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
968
969 if (request_mem_region(mem_res->start,
970 resource_size(mem_res), pdev->name) == NULL) {
971 dev_err(&pdev->dev, "Req mem region failed\n");
972 ret = -ENXIO;
973 goto err0;
974 }
975
976 sdd->regs = ioremap(mem_res->start, resource_size(mem_res));
977 if (sdd->regs == NULL) {
978 dev_err(&pdev->dev, "Unable to remap IO\n");
979 ret = -ENXIO;
980 goto err1;
981 }
982
983 if (sci->cfg_gpio == NULL || sci->cfg_gpio(pdev)) {
984 dev_err(&pdev->dev, "Unable to config gpio\n");
985 ret = -EBUSY;
986 goto err2;
987 }
988
989 /* Setup clocks */
990 sdd->clk = clk_get(&pdev->dev, "spi");
991 if (IS_ERR(sdd->clk)) {
992 dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
993 ret = PTR_ERR(sdd->clk);
994 goto err3;
995 }
996
997 if (clk_enable(sdd->clk)) {
998 dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
999 ret = -EBUSY;
1000 goto err4;
1001 }
1002
1003 if (sci->src_clk_nr == S3C64XX_SPI_SRCCLK_PCLK)
1004 sci->src_clk = sdd->clk;
1005 else
1006 sci->src_clk = clk_get(&pdev->dev, sci->src_clk_name);
1007 if (IS_ERR(sci->src_clk)) {
1008 dev_err(&pdev->dev,
1009 "Unable to acquire clock '%s'\n", sci->src_clk_name);
1010 ret = PTR_ERR(sci->src_clk);
1011 goto err5;
1012 }
1013
1014 if (sci->src_clk != sdd->clk && clk_enable(sci->src_clk)) {
1015 dev_err(&pdev->dev, "Couldn't enable clock '%s'\n",
1016 sci->src_clk_name);
1017 ret = -EBUSY;
1018 goto err6;
1019 }
1020
1021 sdd->workqueue = create_singlethread_workqueue(
1022 dev_name(master->dev.parent));
1023 if (sdd->workqueue == NULL) {
1024 dev_err(&pdev->dev, "Unable to create workqueue\n");
1025 ret = -ENOMEM;
1026 goto err7;
1027 }
1028
1029 /* Setup Deufult Mode */
1030 s3c64xx_spi_hwinit(sdd, pdev->id);
1031
1032 spin_lock_init(&sdd->lock);
1033 init_completion(&sdd->xfer_completion);
1034 INIT_WORK(&sdd->work, s3c64xx_spi_work);
1035 INIT_LIST_HEAD(&sdd->queue);
1036
1037 if (spi_register_master(master)) {
1038 dev_err(&pdev->dev, "cannot register SPI master\n");
1039 ret = -EBUSY;
1040 goto err8;
1041 }
1042
1043 dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d \
1044 with %d Slaves attached\n",
1045 pdev->id, master->num_chipselect);
1046 dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\
1047 \tDMA=[Rx-%d, Tx-%d]\n",
1048 mem_res->end, mem_res->start,
1049 sdd->rx_dmach, sdd->tx_dmach);
1050
1051 return 0;
1052
1053err8:
1054 destroy_workqueue(sdd->workqueue);
1055err7:
1056 if (sci->src_clk != sdd->clk)
1057 clk_disable(sci->src_clk);
1058err6:
1059 if (sci->src_clk != sdd->clk)
1060 clk_put(sci->src_clk);
1061err5:
1062 clk_disable(sdd->clk);
1063err4:
1064 clk_put(sdd->clk);
1065err3:
1066err2:
1067 iounmap((void *) sdd->regs);
1068err1:
1069 release_mem_region(mem_res->start, resource_size(mem_res));
1070err0:
1071 platform_set_drvdata(pdev, NULL);
1072 spi_master_put(master);
1073
1074 return ret;
1075}
1076
1077static int s3c64xx_spi_remove(struct platform_device *pdev)
1078{
1079 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1080 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1081 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
1082 struct resource *mem_res;
1083 unsigned long flags;
1084
1085 spin_lock_irqsave(&sdd->lock, flags);
1086 sdd->state |= SUSPND;
1087 spin_unlock_irqrestore(&sdd->lock, flags);
1088
1089 while (sdd->state & SPIBUSY)
1090 msleep(10);
1091
1092 spi_unregister_master(master);
1093
1094 destroy_workqueue(sdd->workqueue);
1095
1096 if (sci->src_clk != sdd->clk)
1097 clk_disable(sci->src_clk);
1098
1099 if (sci->src_clk != sdd->clk)
1100 clk_put(sci->src_clk);
1101
1102 clk_disable(sdd->clk);
1103 clk_put(sdd->clk);
1104
1105 iounmap((void *) sdd->regs);
1106
1107 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1108 release_mem_region(mem_res->start, resource_size(mem_res));
1109
1110 platform_set_drvdata(pdev, NULL);
1111 spi_master_put(master);
1112
1113 return 0;
1114}
1115
1116#ifdef CONFIG_PM
1117static int s3c64xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
1118{
1119 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1120 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1121 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
1122 struct s3c64xx_spi_csinfo *cs;
1123 unsigned long flags;
1124
1125 spin_lock_irqsave(&sdd->lock, flags);
1126 sdd->state |= SUSPND;
1127 spin_unlock_irqrestore(&sdd->lock, flags);
1128
1129 while (sdd->state & SPIBUSY)
1130 msleep(10);
1131
1132 /* Disable the clock */
1133 if (sci->src_clk != sdd->clk)
1134 clk_disable(sci->src_clk);
1135
1136 clk_disable(sdd->clk);
1137
1138 sdd->cur_speed = 0; /* Output Clock is stopped */
1139
1140 return 0;
1141}
1142
1143static int s3c64xx_spi_resume(struct platform_device *pdev)
1144{
1145 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
1146 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
1147 struct s3c64xx_spi_cntrlr_info *sci = sdd->cntrlr_info;
1148 unsigned long flags;
1149
1150 sci->cfg_gpio(pdev);
1151
1152 /* Enable the clock */
1153 if (sci->src_clk != sdd->clk)
1154 clk_enable(sci->src_clk);
1155
1156 clk_enable(sdd->clk);
1157
1158 s3c64xx_spi_hwinit(sdd, pdev->id);
1159
1160 spin_lock_irqsave(&sdd->lock, flags);
1161 sdd->state &= ~SUSPND;
1162 spin_unlock_irqrestore(&sdd->lock, flags);
1163
1164 return 0;
1165}
1166#else
1167#define s3c64xx_spi_suspend NULL
1168#define s3c64xx_spi_resume NULL
1169#endif /* CONFIG_PM */
1170
1171static struct platform_driver s3c64xx_spi_driver = {
1172 .driver = {
1173 .name = "s3c64xx-spi",
1174 .owner = THIS_MODULE,
1175 },
1176 .remove = s3c64xx_spi_remove,
1177 .suspend = s3c64xx_spi_suspend,
1178 .resume = s3c64xx_spi_resume,
1179};
1180MODULE_ALIAS("platform:s3c64xx-spi");
1181
1182static int __init s3c64xx_spi_init(void)
1183{
1184 return platform_driver_probe(&s3c64xx_spi_driver, s3c64xx_spi_probe);
1185}
1186module_init(s3c64xx_spi_init);
1187
1188static void __exit s3c64xx_spi_exit(void)
1189{
1190 platform_driver_unregister(&s3c64xx_spi_driver);
1191}
1192module_exit(s3c64xx_spi_exit);
1193
1194MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1195MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
1196MODULE_LICENSE("GPL");