aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/spi/Kconfig8
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi_imx.c1769
3 files changed, 1778 insertions, 0 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 2a2f44d1367d..b217a65453f5 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -75,6 +75,13 @@ config SPI_BUTTERFLY
75 inexpensive battery powered microcontroller evaluation board. 75 inexpensive battery powered microcontroller evaluation board.
76 This same cable can be used to flash new firmware. 76 This same cable can be used to flash new firmware.
77 77
78config SPI_IMX
79 tristate "Freescale iMX SPI controller"
80 depends on SPI_MASTER && ARCH_IMX && EXPERIMENTAL
81 help
82 This enables using the Freescale iMX SPI controller in master
83 mode.
84
78config SPI_MPC83xx 85config SPI_MPC83xx
79 tristate "Freescale MPC83xx SPI controller" 86 tristate "Freescale MPC83xx SPI controller"
80 depends on SPI_MASTER && PPC_83xx && EXPERIMENTAL 87 depends on SPI_MASTER && PPC_83xx && EXPERIMENTAL
@@ -94,6 +101,7 @@ config SPI_OMAP_UWIRE
94 help 101 help
95 This hooks up to the MicroWire controller on OMAP1 chips. 102 This hooks up to the MicroWire controller on OMAP1 chips.
96 103
104
97config SPI_PXA2XX 105config SPI_PXA2XX
98 tristate "PXA2xx SSP SPI master" 106 tristate "PXA2xx SSP SPI master"
99 depends on SPI_MASTER && ARCH_PXA && EXPERIMENTAL 107 depends on SPI_MASTER && ARCH_PXA && EXPERIMENTAL
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index f1a3b96b2a63..e01104d1ebf8 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_SPI_MASTER) += spi.o
13# SPI master controller drivers (bus) 13# SPI master controller drivers (bus)
14obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o 14obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
15obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o 15obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
16obj-$(CONFIG_SPI_IMX) += spi_imx.o
16obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o 17obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
17obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o 18obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
18obj-$(CONFIG_SPI_MPC83xx) += spi_mpc83xx.o 19obj-$(CONFIG_SPI_MPC83xx) += spi_mpc83xx.o
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c
new file mode 100644
index 000000000000..6ccf8a12a21d
--- /dev/null
+++ b/drivers/spi/spi_imx.c
@@ -0,0 +1,1769 @@
1/*
2 * drivers/spi/spi_imx.c
3 *
4 * Copyright (C) 2006 SWAPP
5 * Andrea Paterniani <a.paterniani@swapp-eng.it>
6 *
7 * Initial version inspired by:
8 * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/device.h>
24#include <linux/ioport.h>
25#include <linux/errno.h>
26#include <linux/interrupt.h>
27#include <linux/platform_device.h>
28#include <linux/dma-mapping.h>
29#include <linux/spi/spi.h>
30#include <linux/workqueue.h>
31#include <linux/delay.h>
32
33#include <asm/io.h>
34#include <asm/irq.h>
35#include <asm/hardware.h>
36#include <asm/delay.h>
37
38#include <asm/arch/hardware.h>
39#include <asm/arch/imx-dma.h>
40#include <asm/arch/spi_imx.h>
41
42/*-------------------------------------------------------------------------*/
43/* SPI Registers offsets from peripheral base address */
44#define SPI_RXDATA (0x00)
45#define SPI_TXDATA (0x04)
46#define SPI_CONTROL (0x08)
47#define SPI_INT_STATUS (0x0C)
48#define SPI_TEST (0x10)
49#define SPI_PERIOD (0x14)
50#define SPI_DMA (0x18)
51#define SPI_RESET (0x1C)
52
53/* SPI Control Register Bit Fields & Masks */
54#define SPI_CONTROL_BITCOUNT_MASK (0xF) /* Bit Count Mask */
55#define SPI_CONTROL_BITCOUNT(n) (((n) - 1) & SPI_CONTROL_BITCOUNT_MASK)
56#define SPI_CONTROL_POL (0x1 << 4) /* Clock Polarity Mask */
57#define SPI_CONTROL_POL_ACT_HIGH (0x0 << 4) /* Active high pol. (0=idle) */
58#define SPI_CONTROL_POL_ACT_LOW (0x1 << 4) /* Active low pol. (1=idle) */
59#define SPI_CONTROL_PHA (0x1 << 5) /* Clock Phase Mask */
60#define SPI_CONTROL_PHA_0 (0x0 << 5) /* Clock Phase 0 */
61#define SPI_CONTROL_PHA_1 (0x1 << 5) /* Clock Phase 1 */
62#define SPI_CONTROL_SSCTL (0x1 << 6) /* /SS Waveform Select Mask */
63#define SPI_CONTROL_SSCTL_0 (0x0 << 6) /* Master: /SS stays low between SPI burst
64 Slave: RXFIFO advanced by BIT_COUNT */
65#define SPI_CONTROL_SSCTL_1 (0x1 << 6) /* Master: /SS insert pulse between SPI burst
66 Slave: RXFIFO advanced by /SS rising edge */
67#define SPI_CONTROL_SSPOL (0x1 << 7) /* /SS Polarity Select Mask */
68#define SPI_CONTROL_SSPOL_ACT_LOW (0x0 << 7) /* /SS Active low */
69#define SPI_CONTROL_SSPOL_ACT_HIGH (0x1 << 7) /* /SS Active high */
70#define SPI_CONTROL_XCH (0x1 << 8) /* Exchange */
71#define SPI_CONTROL_SPIEN (0x1 << 9) /* SPI Module Enable */
72#define SPI_CONTROL_MODE (0x1 << 10) /* SPI Mode Select Mask */
73#define SPI_CONTROL_MODE_SLAVE (0x0 << 10) /* SPI Mode Slave */
74#define SPI_CONTROL_MODE_MASTER (0x1 << 10) /* SPI Mode Master */
75#define SPI_CONTROL_DRCTL (0x3 << 11) /* /SPI_RDY Control Mask */
76#define SPI_CONTROL_DRCTL_0 (0x0 << 11) /* Ignore /SPI_RDY */
77#define SPI_CONTROL_DRCTL_1 (0x1 << 11) /* /SPI_RDY falling edge triggers input */
78#define SPI_CONTROL_DRCTL_2 (0x2 << 11) /* /SPI_RDY active low level triggers input */
79#define SPI_CONTROL_DATARATE (0x7 << 13) /* Data Rate Mask */
80#define SPI_PERCLK2_DIV_MIN (0) /* PERCLK2:4 */
81#define SPI_PERCLK2_DIV_MAX (7) /* PERCLK2:512 */
82#define SPI_CONTROL_DATARATE_MIN (SPI_PERCLK2_DIV_MAX << 13)
83#define SPI_CONTROL_DATARATE_MAX (SPI_PERCLK2_DIV_MIN << 13)
84#define SPI_CONTROL_DATARATE_BAD (SPI_CONTROL_DATARATE_MIN + 1)
85
86/* SPI Interrupt/Status Register Bit Fields & Masks */
87#define SPI_STATUS_TE (0x1 << 0) /* TXFIFO Empty Status */
88#define SPI_STATUS_TH (0x1 << 1) /* TXFIFO Half Status */
89#define SPI_STATUS_TF (0x1 << 2) /* TXFIFO Full Status */
90#define SPI_STATUS_RR (0x1 << 3) /* RXFIFO Data Ready Status */
91#define SPI_STATUS_RH (0x1 << 4) /* RXFIFO Half Status */
92#define SPI_STATUS_RF (0x1 << 5) /* RXFIFO Full Status */
93#define SPI_STATUS_RO (0x1 << 6) /* RXFIFO Overflow */
94#define SPI_STATUS_BO (0x1 << 7) /* Bit Count Overflow */
95#define SPI_STATUS (0xFF) /* SPI Status Mask */
96#define SPI_INTEN_TE (0x1 << 8) /* TXFIFO Empty Interrupt Enable */
97#define SPI_INTEN_TH (0x1 << 9) /* TXFIFO Half Interrupt Enable */
98#define SPI_INTEN_TF (0x1 << 10) /* TXFIFO Full Interrupt Enable */
99#define SPI_INTEN_RE (0x1 << 11) /* RXFIFO Data Ready Interrupt Enable */
100#define SPI_INTEN_RH (0x1 << 12) /* RXFIFO Half Interrupt Enable */
101#define SPI_INTEN_RF (0x1 << 13) /* RXFIFO Full Interrupt Enable */
102#define SPI_INTEN_RO (0x1 << 14) /* RXFIFO Overflow Interrupt Enable */
103#define SPI_INTEN_BO (0x1 << 15) /* Bit Count Overflow Interrupt Enable */
104#define SPI_INTEN (0xFF << 8) /* SPI Interrupt Enable Mask */
105
106/* SPI Test Register Bit Fields & Masks */
107#define SPI_TEST_TXCNT (0xF << 0) /* TXFIFO Counter */
108#define SPI_TEST_RXCNT_LSB (4) /* RXFIFO Counter LSB */
109#define SPI_TEST_RXCNT (0xF << 4) /* RXFIFO Counter */
110#define SPI_TEST_SSTATUS (0xF << 8) /* State Machine Status */
111#define SPI_TEST_LBC (0x1 << 14) /* Loop Back Control */
112
113/* SPI Period Register Bit Fields & Masks */
114#define SPI_PERIOD_WAIT (0x7FFF << 0) /* Wait Between Transactions */
115#define SPI_PERIOD_MAX_WAIT (0x7FFF) /* Max Wait Between
116 Transactions */
117#define SPI_PERIOD_CSRC (0x1 << 15) /* Period Clock Source Mask */
118#define SPI_PERIOD_CSRC_BCLK (0x0 << 15) /* Period Clock Source is
119 Bit Clock */
120#define SPI_PERIOD_CSRC_32768 (0x1 << 15) /* Period Clock Source is
121 32.768 KHz Clock */
122
123/* SPI DMA Register Bit Fields & Masks */
124#define SPI_DMA_RHDMA (0xF << 4) /* RXFIFO Half Status */
125#define SPI_DMA_RFDMA (0x1 << 5) /* RXFIFO Full Status */
126#define SPI_DMA_TEDMA (0x1 << 6) /* TXFIFO Empty Status */
127#define SPI_DMA_THDMA (0x1 << 7) /* TXFIFO Half Status */
128#define SPI_DMA_RHDEN (0x1 << 12) /* RXFIFO Half DMA Request Enable */
129#define SPI_DMA_RFDEN (0x1 << 13) /* RXFIFO Full DMA Request Enable */
130#define SPI_DMA_TEDEN (0x1 << 14) /* TXFIFO Empty DMA Request Enable */
131#define SPI_DMA_THDEN (0x1 << 15) /* TXFIFO Half DMA Request Enable */
132
133/* SPI Soft Reset Register Bit Fields & Masks */
134#define SPI_RESET_START (0x1) /* Start */
135
136/* Default SPI configuration values */
137#define SPI_DEFAULT_CONTROL \
138( \
139 SPI_CONTROL_BITCOUNT(16) | \
140 SPI_CONTROL_POL_ACT_HIGH | \
141 SPI_CONTROL_PHA_0 | \
142 SPI_CONTROL_SPIEN | \
143 SPI_CONTROL_SSCTL_1 | \
144 SPI_CONTROL_MODE_MASTER | \
145 SPI_CONTROL_DRCTL_0 | \
146 SPI_CONTROL_DATARATE_MIN \
147)
148#define SPI_DEFAULT_ENABLE_LOOPBACK (0)
149#define SPI_DEFAULT_ENABLE_DMA (0)
150#define SPI_DEFAULT_PERIOD_WAIT (8)
151/*-------------------------------------------------------------------------*/
152
153
154/*-------------------------------------------------------------------------*/
155/* TX/RX SPI FIFO size */
156#define SPI_FIFO_DEPTH (8)
157#define SPI_FIFO_BYTE_WIDTH (2)
158#define SPI_FIFO_OVERFLOW_MARGIN (2)
159
160/* DMA burst lenght for half full/empty request trigger */
161#define SPI_DMA_BLR (SPI_FIFO_DEPTH * SPI_FIFO_BYTE_WIDTH / 2)
162
163/* Dummy char output to achieve reads.
164 Choosing something different from all zeroes may help pattern recogition
165 for oscilloscope analysis, but may break some drivers. */
166#define SPI_DUMMY_u8 0
167#define SPI_DUMMY_u16 ((SPI_DUMMY_u8 << 8) | SPI_DUMMY_u8)
168#define SPI_DUMMY_u32 ((SPI_DUMMY_u16 << 16) | SPI_DUMMY_u16)
169
170/**
171 * Macro to change a u32 field:
172 * @r : register to edit
173 * @m : bit mask
174 * @v : new value for the field correctly bit-alligned
175*/
176#define u32_EDIT(r, m, v) r = (r & ~(m)) | (v)
177
178/* Message state */
179#define START_STATE ((void*)0)
180#define RUNNING_STATE ((void*)1)
181#define DONE_STATE ((void*)2)
182#define ERROR_STATE ((void*)-1)
183
184/* Queue state */
185#define QUEUE_RUNNING (0)
186#define QUEUE_STOPPED (1)
187
188#define IS_DMA_ALIGNED(x) (((u32)(x) & 0x03) == 0)
189/*-------------------------------------------------------------------------*/
190
191
192/*-------------------------------------------------------------------------*/
193/* Driver data structs */
194
195/* Context */
196struct driver_data {
197 /* Driver model hookup */
198 struct platform_device *pdev;
199
200 /* SPI framework hookup */
201 struct spi_master *master;
202
203 /* IMX hookup */
204 struct spi_imx_master *master_info;
205
206 /* Memory resources and SPI regs virtual address */
207 struct resource *ioarea;
208 void __iomem *regs;
209
210 /* SPI RX_DATA physical address */
211 dma_addr_t rd_data_phys;
212
213 /* Driver message queue */
214 struct workqueue_struct *workqueue;
215 struct work_struct work;
216 spinlock_t lock;
217 struct list_head queue;
218 int busy;
219 int run;
220
221 /* Message Transfer pump */
222 struct tasklet_struct pump_transfers;
223
224 /* Current message, transfer and state */
225 struct spi_message *cur_msg;
226 struct spi_transfer *cur_transfer;
227 struct chip_data *cur_chip;
228
229 /* Rd / Wr buffers pointers */
230 size_t len;
231 void *tx;
232 void *tx_end;
233 void *rx;
234 void *rx_end;
235
236 u8 rd_only;
237 u8 n_bytes;
238 int cs_change;
239
240 /* Function pointers */
241 irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
242 void (*cs_control)(u32 command);
243
244 /* DMA setup */
245 int rx_channel;
246 int tx_channel;
247 dma_addr_t rx_dma;
248 dma_addr_t tx_dma;
249 int rx_dma_needs_unmap;
250 int tx_dma_needs_unmap;
251 size_t tx_map_len;
252 u32 dummy_dma_buf ____cacheline_aligned;
253};
254
255/* Runtime state */
256struct chip_data {
257 u32 control;
258 u32 period;
259 u32 test;
260
261 u8 enable_dma:1;
262 u8 bits_per_word;
263 u8 n_bytes;
264 u32 max_speed_hz;
265
266 void (*cs_control)(u32 command);
267};
268/*-------------------------------------------------------------------------*/
269
270
271static void pump_messages(struct work_struct *work);
272
273static int flush(struct driver_data *drv_data)
274{
275 unsigned long limit = loops_per_jiffy << 1;
276 void __iomem *regs = drv_data->regs;
277 volatile u32 d;
278
279 dev_dbg(&drv_data->pdev->dev, "flush\n");
280 do {
281 while (readl(regs + SPI_INT_STATUS) & SPI_STATUS_RR)
282 d = readl(regs + SPI_RXDATA);
283 } while ((readl(regs + SPI_CONTROL) & SPI_CONTROL_XCH) && limit--);
284
285 return limit;
286}
287
288static void restore_state(struct driver_data *drv_data)
289{
290 void __iomem *regs = drv_data->regs;
291 struct chip_data *chip = drv_data->cur_chip;
292
293 /* Load chip registers */
294 dev_dbg(&drv_data->pdev->dev,
295 "restore_state\n"
296 " test = 0x%08X\n"
297 " control = 0x%08X\n",
298 chip->test,
299 chip->control);
300 writel(chip->test, regs + SPI_TEST);
301 writel(chip->period, regs + SPI_PERIOD);
302 writel(0, regs + SPI_INT_STATUS);
303 writel(chip->control, regs + SPI_CONTROL);
304}
305
306static void null_cs_control(u32 command)
307{
308}
309
310static inline u32 data_to_write(struct driver_data *drv_data)
311{
312 return ((u32)(drv_data->tx_end - drv_data->tx)) / drv_data->n_bytes;
313}
314
315static inline u32 data_to_read(struct driver_data *drv_data)
316{
317 return ((u32)(drv_data->rx_end - drv_data->rx)) / drv_data->n_bytes;
318}
319
320static int write(struct driver_data *drv_data)
321{
322 void __iomem *regs = drv_data->regs;
323 void *tx = drv_data->tx;
324 void *tx_end = drv_data->tx_end;
325 u8 n_bytes = drv_data->n_bytes;
326 u32 remaining_writes;
327 u32 fifo_avail_space;
328 u32 n;
329 u16 d;
330
331 /* Compute how many fifo writes to do */
332 remaining_writes = (u32)(tx_end - tx) / n_bytes;
333 fifo_avail_space = SPI_FIFO_DEPTH -
334 (readl(regs + SPI_TEST) & SPI_TEST_TXCNT);
335 if (drv_data->rx && (fifo_avail_space > SPI_FIFO_OVERFLOW_MARGIN))
336 /* Fix misunderstood receive overflow */
337 fifo_avail_space -= SPI_FIFO_OVERFLOW_MARGIN;
338 n = min(remaining_writes, fifo_avail_space);
339
340 dev_dbg(&drv_data->pdev->dev,
341 "write type %s\n"
342 " remaining writes = %d\n"
343 " fifo avail space = %d\n"
344 " fifo writes = %d\n",
345 (n_bytes == 1) ? "u8" : "u16",
346 remaining_writes,
347 fifo_avail_space,
348 n);
349
350 if (n > 0) {
351 /* Fill SPI TXFIFO */
352 if (drv_data->rd_only) {
353 tx += n * n_bytes;
354 while (n--)
355 writel(SPI_DUMMY_u16, regs + SPI_TXDATA);
356 } else {
357 if (n_bytes == 1) {
358 while (n--) {
359 d = *(u8*)tx;
360 writel(d, regs + SPI_TXDATA);
361 tx += 1;
362 }
363 } else {
364 while (n--) {
365 d = *(u16*)tx;
366 writel(d, regs + SPI_TXDATA);
367 tx += 2;
368 }
369 }
370 }
371
372 /* Trigger transfer */
373 writel(readl(regs + SPI_CONTROL) | SPI_CONTROL_XCH,
374 regs + SPI_CONTROL);
375
376 /* Update tx pointer */
377 drv_data->tx = tx;
378 }
379
380 return (tx >= tx_end);
381}
382
383static int read(struct driver_data *drv_data)
384{
385 void __iomem *regs = drv_data->regs;
386 void *rx = drv_data->rx;
387 void *rx_end = drv_data->rx_end;
388 u8 n_bytes = drv_data->n_bytes;
389 u32 remaining_reads;
390 u32 fifo_rxcnt;
391 u32 n;
392 u16 d;
393
394 /* Compute how many fifo reads to do */
395 remaining_reads = (u32)(rx_end - rx) / n_bytes;
396 fifo_rxcnt = (readl(regs + SPI_TEST) & SPI_TEST_RXCNT) >>
397 SPI_TEST_RXCNT_LSB;
398 n = min(remaining_reads, fifo_rxcnt);
399
400 dev_dbg(&drv_data->pdev->dev,
401 "read type %s\n"
402 " remaining reads = %d\n"
403 " fifo rx count = %d\n"
404 " fifo reads = %d\n",
405 (n_bytes == 1) ? "u8" : "u16",
406 remaining_reads,
407 fifo_rxcnt,
408 n);
409
410 if (n > 0) {
411 /* Read SPI RXFIFO */
412 if (n_bytes == 1) {
413 while (n--) {
414 d = readl(regs + SPI_RXDATA);
415 *((u8*)rx) = d;
416 rx += 1;
417 }
418 } else {
419 while (n--) {
420 d = readl(regs + SPI_RXDATA);
421 *((u16*)rx) = d;
422 rx += 2;
423 }
424 }
425
426 /* Update rx pointer */
427 drv_data->rx = rx;
428 }
429
430 return (rx >= rx_end);
431}
432
433static void *next_transfer(struct driver_data *drv_data)
434{
435 struct spi_message *msg = drv_data->cur_msg;
436 struct spi_transfer *trans = drv_data->cur_transfer;
437
438 /* Move to next transfer */
439 if (trans->transfer_list.next != &msg->transfers) {
440 drv_data->cur_transfer =
441 list_entry(trans->transfer_list.next,
442 struct spi_transfer,
443 transfer_list);
444 return RUNNING_STATE;
445 }
446
447 return DONE_STATE;
448}
449
450static int map_dma_buffers(struct driver_data *drv_data)
451{
452 struct spi_message *msg;
453 struct device *dev;
454 void *buf;
455
456 drv_data->rx_dma_needs_unmap = 0;
457 drv_data->tx_dma_needs_unmap = 0;
458
459 if (!drv_data->master_info->enable_dma ||
460 !drv_data->cur_chip->enable_dma)
461 return -1;
462
463 msg = drv_data->cur_msg;
464 dev = &msg->spi->dev;
465 if (msg->is_dma_mapped) {
466 if (drv_data->tx_dma)
467 /* The caller provided at least dma and cpu virtual
468 address for write; pump_transfers() will consider the
469 transfer as write only if cpu rx virtual address is
470 NULL */
471 return 0;
472
473 if (drv_data->rx_dma) {
474 /* The caller provided dma and cpu virtual address to
475 performe read only transfer -->
476 use drv_data->dummy_dma_buf for dummy writes to
477 achive reads */
478 buf = &drv_data->dummy_dma_buf;
479 drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf);
480 drv_data->tx_dma = dma_map_single(dev,
481 buf,
482 drv_data->tx_map_len,
483 DMA_TO_DEVICE);
484 if (dma_mapping_error(drv_data->tx_dma))
485 return -1;
486
487 drv_data->tx_dma_needs_unmap = 1;
488
489 /* Flags transfer as rd_only for pump_transfers() DMA
490 regs programming (should be redundant) */
491 drv_data->tx = NULL;
492
493 return 0;
494 }
495 }
496
497 if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
498 return -1;
499
500 /* NULL rx means write-only transfer and no map needed
501 since rx DMA will not be used */
502 if (drv_data->rx) {
503 buf = drv_data->rx;
504 drv_data->rx_dma = dma_map_single(
505 dev,
506 buf,
507 drv_data->len,
508 DMA_FROM_DEVICE);
509 if (dma_mapping_error(drv_data->rx_dma))
510 return -1;
511 drv_data->rx_dma_needs_unmap = 1;
512 }
513
514 if (drv_data->tx == NULL) {
515 /* Read only message --> use drv_data->dummy_dma_buf for dummy
516 writes to achive reads */
517 buf = &drv_data->dummy_dma_buf;
518 drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf);
519 } else {
520 buf = drv_data->tx;
521 drv_data->tx_map_len = drv_data->len;
522 }
523 drv_data->tx_dma = dma_map_single(dev,
524 buf,
525 drv_data->tx_map_len,
526 DMA_TO_DEVICE);
527 if (dma_mapping_error(drv_data->tx_dma)) {
528 if (drv_data->rx_dma) {
529 dma_unmap_single(dev,
530 drv_data->rx_dma,
531 drv_data->len,
532 DMA_FROM_DEVICE);
533 drv_data->rx_dma_needs_unmap = 0;
534 }
535 return -1;
536 }
537 drv_data->tx_dma_needs_unmap = 1;
538
539 return 0;
540}
541
542static void unmap_dma_buffers(struct driver_data *drv_data)
543{
544 struct spi_message *msg = drv_data->cur_msg;
545 struct device *dev = &msg->spi->dev;
546
547 if (drv_data->rx_dma_needs_unmap) {
548 dma_unmap_single(dev,
549 drv_data->rx_dma,
550 drv_data->len,
551 DMA_FROM_DEVICE);
552 drv_data->rx_dma_needs_unmap = 0;
553 }
554 if (drv_data->tx_dma_needs_unmap) {
555 dma_unmap_single(dev,
556 drv_data->tx_dma,
557 drv_data->tx_map_len,
558 DMA_TO_DEVICE);
559 drv_data->tx_dma_needs_unmap = 0;
560 }
561}
562
563/* Caller already set message->status (dma is already blocked) */
564static void giveback(struct spi_message *message, struct driver_data *drv_data)
565{
566 void __iomem *regs = drv_data->regs;
567
568 /* Bring SPI to sleep; restore_state() and pump_transfer()
569 will do new setup */
570 writel(0, regs + SPI_INT_STATUS);
571 writel(0, regs + SPI_DMA);
572
573 drv_data->cs_control(SPI_CS_DEASSERT);
574
575 message->state = NULL;
576 if (message->complete)
577 message->complete(message->context);
578
579 drv_data->cur_msg = NULL;
580 drv_data->cur_transfer = NULL;
581 drv_data->cur_chip = NULL;
582 queue_work(drv_data->workqueue, &drv_data->work);
583}
584
585static void dma_err_handler(int channel, void *data, int errcode)
586{
587 struct driver_data *drv_data = data;
588 struct spi_message *msg = drv_data->cur_msg;
589
590 dev_dbg(&drv_data->pdev->dev, "dma_err_handler\n");
591
592 /* Disable both rx and tx dma channels */
593 imx_dma_disable(drv_data->rx_channel);
594 imx_dma_disable(drv_data->tx_channel);
595
596 if (flush(drv_data) == 0)
597 dev_err(&drv_data->pdev->dev,
598 "dma_err_handler - flush failed\n");
599
600 unmap_dma_buffers(drv_data);
601
602 msg->state = ERROR_STATE;
603 tasklet_schedule(&drv_data->pump_transfers);
604}
605
606static void dma_tx_handler(int channel, void *data)
607{
608 struct driver_data *drv_data = data;
609
610 dev_dbg(&drv_data->pdev->dev, "dma_tx_handler\n");
611
612 imx_dma_disable(channel);
613
614 /* Now waits for TX FIFO empty */
615 writel(readl(drv_data->regs + SPI_INT_STATUS) | SPI_INTEN_TE,
616 drv_data->regs + SPI_INT_STATUS);
617}
618
619static irqreturn_t dma_transfer(struct driver_data *drv_data)
620{
621 u32 status;
622 struct spi_message *msg = drv_data->cur_msg;
623 void __iomem *regs = drv_data->regs;
624 unsigned long limit;
625
626 status = readl(regs + SPI_INT_STATUS);
627
628 if ((status & SPI_INTEN_RO) && (status & SPI_STATUS_RO)) {
629 writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
630
631 imx_dma_disable(drv_data->rx_channel);
632 unmap_dma_buffers(drv_data);
633
634 if (flush(drv_data) == 0)
635 dev_err(&drv_data->pdev->dev,
636 "dma_transfer - flush failed\n");
637
638 dev_warn(&drv_data->pdev->dev,
639 "dma_transfer - fifo overun\n");
640
641 msg->state = ERROR_STATE;
642 tasklet_schedule(&drv_data->pump_transfers);
643
644 return IRQ_HANDLED;
645 }
646
647 if (status & SPI_STATUS_TE) {
648 writel(status & ~SPI_INTEN_TE, regs + SPI_INT_STATUS);
649
650 if (drv_data->rx) {
651 /* Wait end of transfer before read trailing data */
652 limit = loops_per_jiffy << 1;
653 while ((readl(regs + SPI_CONTROL) & SPI_CONTROL_XCH) &&
654 limit--);
655
656 if (limit == 0)
657 dev_err(&drv_data->pdev->dev,
658 "dma_transfer - end of tx failed\n");
659 else
660 dev_dbg(&drv_data->pdev->dev,
661 "dma_transfer - end of tx\n");
662
663 imx_dma_disable(drv_data->rx_channel);
664 unmap_dma_buffers(drv_data);
665
666 /* Calculate number of trailing data and read them */
667 dev_dbg(&drv_data->pdev->dev,
668 "dma_transfer - test = 0x%08X\n",
669 readl(regs + SPI_TEST));
670 drv_data->rx = drv_data->rx_end -
671 ((readl(regs + SPI_TEST) &
672 SPI_TEST_RXCNT) >>
673 SPI_TEST_RXCNT_LSB)*drv_data->n_bytes;
674 read(drv_data);
675 } else {
676 /* Write only transfer */
677 unmap_dma_buffers(drv_data);
678
679 if (flush(drv_data) == 0)
680 dev_err(&drv_data->pdev->dev,
681 "dma_transfer - flush failed\n");
682 }
683
684 /* End of transfer, update total byte transfered */
685 msg->actual_length += drv_data->len;
686
687 /* Release chip select if requested, transfer delays are
688 handled in pump_transfers() */
689 if (drv_data->cs_change)
690 drv_data->cs_control(SPI_CS_DEASSERT);
691
692 /* Move to next transfer */
693 msg->state = next_transfer(drv_data);
694
695 /* Schedule transfer tasklet */
696 tasklet_schedule(&drv_data->pump_transfers);
697
698 return IRQ_HANDLED;
699 }
700
701 /* Opps problem detected */
702 return IRQ_NONE;
703}
704
705static irqreturn_t interrupt_wronly_transfer(struct driver_data *drv_data)
706{
707 struct spi_message *msg = drv_data->cur_msg;
708 void __iomem *regs = drv_data->regs;
709 u32 status;
710 irqreturn_t handled = IRQ_NONE;
711
712 status = readl(regs + SPI_INT_STATUS);
713
714 while (status & SPI_STATUS_TH) {
715 dev_dbg(&drv_data->pdev->dev,
716 "interrupt_wronly_transfer - status = 0x%08X\n", status);
717
718 /* Pump data */
719 if (write(drv_data)) {
720 writel(readl(regs + SPI_INT_STATUS) & ~SPI_INTEN,
721 regs + SPI_INT_STATUS);
722
723 dev_dbg(&drv_data->pdev->dev,
724 "interrupt_wronly_transfer - end of tx\n");
725
726 if (flush(drv_data) == 0)
727 dev_err(&drv_data->pdev->dev,
728 "interrupt_wronly_transfer - "
729 "flush failed\n");
730
731 /* End of transfer, update total byte transfered */
732 msg->actual_length += drv_data->len;
733
734 /* Release chip select if requested, transfer delays are
735 handled in pump_transfers */
736 if (drv_data->cs_change)
737 drv_data->cs_control(SPI_CS_DEASSERT);
738
739 /* Move to next transfer */
740 msg->state = next_transfer(drv_data);
741
742 /* Schedule transfer tasklet */
743 tasklet_schedule(&drv_data->pump_transfers);
744
745 return IRQ_HANDLED;
746 }
747
748 status = readl(regs + SPI_INT_STATUS);
749
750 /* We did something */
751 handled = IRQ_HANDLED;
752 }
753
754 return handled;
755}
756
757static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
758{
759 struct spi_message *msg = drv_data->cur_msg;
760 void __iomem *regs = drv_data->regs;
761 u32 status;
762 irqreturn_t handled = IRQ_NONE;
763 unsigned long limit;
764
765 status = readl(regs + SPI_INT_STATUS);
766
767 while (status & (SPI_STATUS_TH | SPI_STATUS_RO)) {
768 dev_dbg(&drv_data->pdev->dev,
769 "interrupt_transfer - status = 0x%08X\n", status);
770
771 if (status & SPI_STATUS_RO) {
772 writel(readl(regs + SPI_INT_STATUS) & ~SPI_INTEN,
773 regs + SPI_INT_STATUS);
774
775 dev_warn(&drv_data->pdev->dev,
776 "interrupt_transfer - fifo overun\n"
777 " data not yet written = %d\n"
778 " data not yet read = %d\n",
779 data_to_write(drv_data),
780 data_to_read(drv_data));
781
782 if (flush(drv_data) == 0)
783 dev_err(&drv_data->pdev->dev,
784 "interrupt_transfer - flush failed\n");
785
786 msg->state = ERROR_STATE;
787 tasklet_schedule(&drv_data->pump_transfers);
788
789 return IRQ_HANDLED;
790 }
791
792 /* Pump data */
793 read(drv_data);
794 if (write(drv_data)) {
795 writel(readl(regs + SPI_INT_STATUS) & ~SPI_INTEN,
796 regs + SPI_INT_STATUS);
797
798 dev_dbg(&drv_data->pdev->dev,
799 "interrupt_transfer - end of tx\n");
800
801 /* Read trailing bytes */
802 limit = loops_per_jiffy << 1;
803 while ((read(drv_data) == 0) && limit--);
804
805 if (limit == 0)
806 dev_err(&drv_data->pdev->dev,
807 "interrupt_transfer - "
808 "trailing byte read failed\n");
809 else
810 dev_dbg(&drv_data->pdev->dev,
811 "interrupt_transfer - end of rx\n");
812
813 /* End of transfer, update total byte transfered */
814 msg->actual_length += drv_data->len;
815
816 /* Release chip select if requested, transfer delays are
817 handled in pump_transfers */
818 if (drv_data->cs_change)
819 drv_data->cs_control(SPI_CS_DEASSERT);
820
821 /* Move to next transfer */
822 msg->state = next_transfer(drv_data);
823
824 /* Schedule transfer tasklet */
825 tasklet_schedule(&drv_data->pump_transfers);
826
827 return IRQ_HANDLED;
828 }
829
830 status = readl(regs + SPI_INT_STATUS);
831
832 /* We did something */
833 handled = IRQ_HANDLED;
834 }
835
836 return handled;
837}
838
839static irqreturn_t spi_int(int irq, void *dev_id)
840{
841 struct driver_data *drv_data = (struct driver_data *)dev_id;
842
843 if (!drv_data->cur_msg) {
844 dev_err(&drv_data->pdev->dev,
845 "spi_int - bad message state\n");
846 /* Never fail */
847 return IRQ_HANDLED;
848 }
849
850 return drv_data->transfer_handler(drv_data);
851}
852
853static inline u32 spi_speed_hz(u32 data_rate)
854{
855 return imx_get_perclk2() / (4 << ((data_rate) >> 13));
856}
857
858static u32 spi_data_rate(u32 speed_hz)
859{
860 u32 div;
861 u32 quantized_hz = imx_get_perclk2() >> 2;
862
863 for (div = SPI_PERCLK2_DIV_MIN;
864 div <= SPI_PERCLK2_DIV_MAX;
865 div++, quantized_hz >>= 1) {
866 if (quantized_hz <= speed_hz)
867 /* Max available speed LEQ required speed */
868 return div << 13;
869 }
870 return SPI_CONTROL_DATARATE_BAD;
871}
872
873static void pump_transfers(unsigned long data)
874{
875 struct driver_data *drv_data = (struct driver_data *)data;
876 struct spi_message *message;
877 struct spi_transfer *transfer, *previous;
878 struct chip_data *chip;
879 void __iomem *regs;
880 u32 tmp, control;
881
882 dev_dbg(&drv_data->pdev->dev, "pump_transfer\n");
883
884 message = drv_data->cur_msg;
885
886 /* Handle for abort */
887 if (message->state == ERROR_STATE) {
888 message->status = -EIO;
889 giveback(message, drv_data);
890 return;
891 }
892
893 /* Handle end of message */
894 if (message->state == DONE_STATE) {
895 message->status = 0;
896 giveback(message, drv_data);
897 return;
898 }
899
900 chip = drv_data->cur_chip;
901
902 /* Delay if requested at end of transfer*/
903 transfer = drv_data->cur_transfer;
904 if (message->state == RUNNING_STATE) {
905 previous = list_entry(transfer->transfer_list.prev,
906 struct spi_transfer,
907 transfer_list);
908 if (previous->delay_usecs)
909 udelay(previous->delay_usecs);
910 } else {
911 /* START_STATE */
912 message->state = RUNNING_STATE;
913 drv_data->cs_control = chip->cs_control;
914 }
915
916 transfer = drv_data->cur_transfer;
917 drv_data->tx = (void *)transfer->tx_buf;
918 drv_data->tx_end = drv_data->tx + transfer->len;
919 drv_data->rx = transfer->rx_buf;
920 drv_data->rx_end = drv_data->rx + transfer->len;
921 drv_data->rx_dma = transfer->rx_dma;
922 drv_data->tx_dma = transfer->tx_dma;
923 drv_data->len = transfer->len;
924 drv_data->cs_change = transfer->cs_change;
925 drv_data->rd_only = (drv_data->tx == NULL);
926
927 regs = drv_data->regs;
928 control = readl(regs + SPI_CONTROL);
929
930 /* Bits per word setup */
931 tmp = transfer->bits_per_word;
932 if (tmp == 0) {
933 /* Use device setup */
934 tmp = chip->bits_per_word;
935 drv_data->n_bytes = chip->n_bytes;
936 } else
937 /* Use per-transfer setup */
938 drv_data->n_bytes = (tmp <= 8) ? 1 : 2;
939 u32_EDIT(control, SPI_CONTROL_BITCOUNT_MASK, tmp - 1);
940
941 /* Speed setup (surely valid because already checked) */
942 tmp = transfer->speed_hz;
943 if (tmp == 0)
944 tmp = chip->max_speed_hz;
945 tmp = spi_data_rate(tmp);
946 u32_EDIT(control, SPI_CONTROL_DATARATE, tmp);
947
948 writel(control, regs + SPI_CONTROL);
949
950 /* Assert device chip-select */
951 drv_data->cs_control(SPI_CS_ASSERT);
952
953 /* DMA cannot read/write SPI FIFOs other than 16 bits at a time; hence
954 if bits_per_word is less or equal 8 PIO transfers are performed.
955 Moreover DMA is convinient for transfer length bigger than FIFOs
956 byte size. */
957 if ((drv_data->n_bytes == 2) &&
958 (drv_data->len > SPI_FIFO_DEPTH*SPI_FIFO_BYTE_WIDTH) &&
959 (map_dma_buffers(drv_data) == 0)) {
960 dev_dbg(&drv_data->pdev->dev,
961 "pump dma transfer\n"
962 " tx = %p\n"
963 " tx_dma = %08X\n"
964 " rx = %p\n"
965 " rx_dma = %08X\n"
966 " len = %d\n",
967 drv_data->tx,
968 (unsigned int)drv_data->tx_dma,
969 drv_data->rx,
970 (unsigned int)drv_data->rx_dma,
971 drv_data->len);
972
973 /* Ensure we have the correct interrupt handler */
974 drv_data->transfer_handler = dma_transfer;
975
976 /* Trigger transfer */
977 writel(readl(regs + SPI_CONTROL) | SPI_CONTROL_XCH,
978 regs + SPI_CONTROL);
979
980 /* Setup tx DMA */
981 if (drv_data->tx)
982 /* Linear source address */
983 CCR(drv_data->tx_channel) =
984 CCR_DMOD_FIFO |
985 CCR_SMOD_LINEAR |
986 CCR_SSIZ_32 | CCR_DSIZ_16 |
987 CCR_REN;
988 else
989 /* Read only transfer -> fixed source address for
990 dummy write to achive read */
991 CCR(drv_data->tx_channel) =
992 CCR_DMOD_FIFO |
993 CCR_SMOD_FIFO |
994 CCR_SSIZ_32 | CCR_DSIZ_16 |
995 CCR_REN;
996
997 imx_dma_setup_single(
998 drv_data->tx_channel,
999 drv_data->tx_dma,
1000 drv_data->len,
1001 drv_data->rd_data_phys + 4,
1002 DMA_MODE_WRITE);
1003
1004 if (drv_data->rx) {
1005 /* Setup rx DMA for linear destination address */
1006 CCR(drv_data->rx_channel) =
1007 CCR_DMOD_LINEAR |
1008 CCR_SMOD_FIFO |
1009 CCR_DSIZ_32 | CCR_SSIZ_16 |
1010 CCR_REN;
1011 imx_dma_setup_single(
1012 drv_data->rx_channel,
1013 drv_data->rx_dma,
1014 drv_data->len,
1015 drv_data->rd_data_phys,
1016 DMA_MODE_READ);
1017 imx_dma_enable(drv_data->rx_channel);
1018
1019 /* Enable SPI interrupt */
1020 writel(SPI_INTEN_RO, regs + SPI_INT_STATUS);
1021
1022 /* Set SPI to request DMA service on both
1023 Rx and Tx half fifo watermark */
1024 writel(SPI_DMA_RHDEN | SPI_DMA_THDEN, regs + SPI_DMA);
1025 } else
1026 /* Write only access -> set SPI to request DMA
1027 service on Tx half fifo watermark */
1028 writel(SPI_DMA_THDEN, regs + SPI_DMA);
1029
1030 imx_dma_enable(drv_data->tx_channel);
1031 } else {
1032 dev_dbg(&drv_data->pdev->dev,
1033 "pump pio transfer\n"
1034 " tx = %p\n"
1035 " rx = %p\n"
1036 " len = %d\n",
1037 drv_data->tx,
1038 drv_data->rx,
1039 drv_data->len);
1040
1041 /* Ensure we have the correct interrupt handler */
1042 if (drv_data->rx)
1043 drv_data->transfer_handler = interrupt_transfer;
1044 else
1045 drv_data->transfer_handler = interrupt_wronly_transfer;
1046
1047 /* Enable SPI interrupt */
1048 if (drv_data->rx)
1049 writel(SPI_INTEN_TH | SPI_INTEN_RO,
1050 regs + SPI_INT_STATUS);
1051 else
1052 writel(SPI_INTEN_TH, regs + SPI_INT_STATUS);
1053 }
1054}
1055
1056static void pump_messages(struct work_struct *work)
1057{
1058 struct driver_data *drv_data =
1059 container_of(work, struct driver_data, work);
1060 unsigned long flags;
1061
1062 /* Lock queue and check for queue work */
1063 spin_lock_irqsave(&drv_data->lock, flags);
1064 if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
1065 drv_data->busy = 0;
1066 spin_unlock_irqrestore(&drv_data->lock, flags);
1067 return;
1068 }
1069
1070 /* Make sure we are not already running a message */
1071 if (drv_data->cur_msg) {
1072 spin_unlock_irqrestore(&drv_data->lock, flags);
1073 return;
1074 }
1075
1076 /* Extract head of queue */
1077 drv_data->cur_msg = list_entry(drv_data->queue.next,
1078 struct spi_message, queue);
1079 list_del_init(&drv_data->cur_msg->queue);
1080 drv_data->busy = 1;
1081 spin_unlock_irqrestore(&drv_data->lock, flags);
1082
1083 /* Initial message state */
1084 drv_data->cur_msg->state = START_STATE;
1085 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
1086 struct spi_transfer,
1087 transfer_list);
1088
1089 /* Setup the SPI using the per chip configuration */
1090 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
1091 restore_state(drv_data);
1092
1093 /* Mark as busy and launch transfers */
1094 tasklet_schedule(&drv_data->pump_transfers);
1095}
1096
1097static int transfer(struct spi_device *spi, struct spi_message *msg)
1098{
1099 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1100 u32 min_speed_hz, max_speed_hz, tmp;
1101 struct spi_transfer *trans;
1102 unsigned long flags;
1103
1104 msg->actual_length = 0;
1105
1106 /* Per transfer setup check */
1107 min_speed_hz = spi_speed_hz(SPI_CONTROL_DATARATE_MIN);
1108 max_speed_hz = spi->max_speed_hz;
1109 list_for_each_entry(trans, &msg->transfers, transfer_list) {
1110 tmp = trans->bits_per_word;
1111 if (tmp > 16) {
1112 dev_err(&drv_data->pdev->dev,
1113 "message rejected : "
1114 "invalid transfer bits_per_word (%d bits)\n",
1115 tmp);
1116 goto msg_rejected;
1117 }
1118 tmp = trans->speed_hz;
1119 if (tmp) {
1120 if (tmp < min_speed_hz) {
1121 dev_err(&drv_data->pdev->dev,
1122 "message rejected : "
1123 "device min speed (%d Hz) exceeds "
1124 "required transfer speed (%d Hz)\n",
1125 min_speed_hz,
1126 tmp);
1127 goto msg_rejected;
1128 } else if (tmp > max_speed_hz) {
1129 dev_err(&drv_data->pdev->dev,
1130 "message rejected : "
1131 "transfer speed (%d Hz) exceeds "
1132 "device max speed (%d Hz)\n",
1133 tmp,
1134 max_speed_hz);
1135 goto msg_rejected;
1136 }
1137 }
1138 }
1139
1140 /* Message accepted */
1141 msg->status = -EINPROGRESS;
1142 msg->state = START_STATE;
1143
1144 spin_lock_irqsave(&drv_data->lock, flags);
1145 if (drv_data->run == QUEUE_STOPPED) {
1146 spin_unlock_irqrestore(&drv_data->lock, flags);
1147 return -ESHUTDOWN;
1148 }
1149
1150 list_add_tail(&msg->queue, &drv_data->queue);
1151 if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
1152 queue_work(drv_data->workqueue, &drv_data->work);
1153
1154 spin_unlock_irqrestore(&drv_data->lock, flags);
1155 return 0;
1156
1157msg_rejected:
1158 /* Message rejected and not queued */
1159 msg->status = -EINVAL;
1160 msg->state = ERROR_STATE;
1161 if (msg->complete)
1162 msg->complete(msg->context);
1163 return -EINVAL;
1164}
1165
1166/* On first setup bad values must free chip_data memory since will cause
1167 spi_new_device to fail. Bad value setup from protocol driver are simply not
1168 applied and notified to the calling driver. */
1169static int setup(struct spi_device *spi)
1170{
1171 struct spi_imx_chip *chip_info;
1172 struct chip_data *chip;
1173 int first_setup = 0;
1174 u32 tmp;
1175 int status = 0;
1176
1177 /* Get controller data */
1178 chip_info = spi->controller_data;
1179
1180 /* Get controller_state */
1181 chip = spi_get_ctldata(spi);
1182 if (chip == NULL) {
1183 first_setup = 1;
1184
1185 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1186 if (!chip) {
1187 dev_err(&spi->dev,
1188 "setup - cannot allocate controller state");
1189 return -ENOMEM;
1190 }
1191 chip->control = SPI_DEFAULT_CONTROL;
1192
1193 if (chip_info == NULL) {
1194 /* spi_board_info.controller_data not is supplied */
1195 chip_info = kzalloc(sizeof(struct spi_imx_chip),
1196 GFP_KERNEL);
1197 if (!chip_info) {
1198 dev_err(&spi->dev,
1199 "setup - "
1200 "cannot allocate controller data");
1201 status = -ENOMEM;
1202 goto err_first_setup;
1203 }
1204 /* Set controller data default value */
1205 chip_info->enable_loopback =
1206 SPI_DEFAULT_ENABLE_LOOPBACK;
1207 chip_info->enable_dma = SPI_DEFAULT_ENABLE_DMA;
1208 chip_info->ins_ss_pulse = 1;
1209 chip_info->bclk_wait = SPI_DEFAULT_PERIOD_WAIT;
1210 chip_info->cs_control = null_cs_control;
1211 }
1212 }
1213
1214 /* Now set controller state based on controller data */
1215
1216 if (first_setup) {
1217 /* SPI loopback */
1218 if (chip_info->enable_loopback)
1219 chip->test = SPI_TEST_LBC;
1220 else
1221 chip->test = 0;
1222
1223 /* SPI dma driven */
1224 chip->enable_dma = chip_info->enable_dma;
1225
1226 /* SPI /SS pulse between spi burst */
1227 if (chip_info->ins_ss_pulse)
1228 u32_EDIT(chip->control,
1229 SPI_CONTROL_SSCTL, SPI_CONTROL_SSCTL_1);
1230 else
1231 u32_EDIT(chip->control,
1232 SPI_CONTROL_SSCTL, SPI_CONTROL_SSCTL_0);
1233
1234 /* SPI bclk waits between each bits_per_word spi burst */
1235 if (chip_info->bclk_wait > SPI_PERIOD_MAX_WAIT) {
1236 dev_err(&spi->dev,
1237 "setup - "
1238 "bclk_wait exceeds max allowed (%d)\n",
1239 SPI_PERIOD_MAX_WAIT);
1240 goto err_first_setup;
1241 }
1242 chip->period = SPI_PERIOD_CSRC_BCLK |
1243 (chip_info->bclk_wait & SPI_PERIOD_WAIT);
1244 }
1245
1246 /* SPI mode */
1247 tmp = spi->mode;
1248 if (tmp & SPI_LSB_FIRST) {
1249 status = -EINVAL;
1250 if (first_setup) {
1251 dev_err(&spi->dev,
1252 "setup - "
1253 "HW doesn't support LSB first transfer\n");
1254 goto err_first_setup;
1255 } else {
1256 dev_err(&spi->dev,
1257 "setup - "
1258 "HW doesn't support LSB first transfer, "
1259 "default to MSB first\n");
1260 spi->mode &= ~SPI_LSB_FIRST;
1261 }
1262 }
1263 if (tmp & SPI_CS_HIGH) {
1264 u32_EDIT(chip->control,
1265 SPI_CONTROL_SSPOL, SPI_CONTROL_SSPOL_ACT_HIGH);
1266 }
1267 switch (tmp & SPI_MODE_3) {
1268 case SPI_MODE_0:
1269 tmp = 0;
1270 break;
1271 case SPI_MODE_1:
1272 tmp = SPI_CONTROL_PHA_1;
1273 break;
1274 case SPI_MODE_2:
1275 tmp = SPI_CONTROL_POL_ACT_LOW;
1276 break;
1277 default:
1278 /* SPI_MODE_3 */
1279 tmp = SPI_CONTROL_PHA_1 | SPI_CONTROL_POL_ACT_LOW;
1280 break;
1281 }
1282 u32_EDIT(chip->control, SPI_CONTROL_POL | SPI_CONTROL_PHA, tmp);
1283
1284 /* SPI word width */
1285 tmp = spi->bits_per_word;
1286 if (tmp == 0) {
1287 tmp = 8;
1288 spi->bits_per_word = 8;
1289 } else if (tmp > 16) {
1290 status = -EINVAL;
1291 dev_err(&spi->dev,
1292 "setup - "
1293 "invalid bits_per_word (%d)\n",
1294 tmp);
1295 if (first_setup)
1296 goto err_first_setup;
1297 else {
1298 /* Undo setup using chip as backup copy */
1299 tmp = chip->bits_per_word;
1300 spi->bits_per_word = tmp;
1301 }
1302 }
1303 chip->bits_per_word = tmp;
1304 u32_EDIT(chip->control, SPI_CONTROL_BITCOUNT_MASK, tmp - 1);
1305 chip->n_bytes = (tmp <= 8) ? 1 : 2;
1306
1307 /* SPI datarate */
1308 tmp = spi_data_rate(spi->max_speed_hz);
1309 if (tmp == SPI_CONTROL_DATARATE_BAD) {
1310 status = -EINVAL;
1311 dev_err(&spi->dev,
1312 "setup - "
1313 "HW min speed (%d Hz) exceeds required "
1314 "max speed (%d Hz)\n",
1315 spi_speed_hz(SPI_CONTROL_DATARATE_MIN),
1316 spi->max_speed_hz);
1317 if (first_setup)
1318 goto err_first_setup;
1319 else
1320 /* Undo setup using chip as backup copy */
1321 spi->max_speed_hz = chip->max_speed_hz;
1322 } else {
1323 u32_EDIT(chip->control, SPI_CONTROL_DATARATE, tmp);
1324 /* Actual rounded max_speed_hz */
1325 tmp = spi_speed_hz(tmp);
1326 spi->max_speed_hz = tmp;
1327 chip->max_speed_hz = tmp;
1328 }
1329
1330 /* SPI chip-select management */
1331 if (chip_info->cs_control)
1332 chip->cs_control = chip_info->cs_control;
1333 else
1334 chip->cs_control = null_cs_control;
1335
1336 /* Save controller_state */
1337 spi_set_ctldata(spi, chip);
1338
1339 /* Summary */
1340 dev_dbg(&spi->dev,
1341 "setup succeded\n"
1342 " loopback enable = %s\n"
1343 " dma enable = %s\n"
1344 " insert /ss pulse = %s\n"
1345 " period wait = %d\n"
1346 " mode = %d\n"
1347 " bits per word = %d\n"
1348 " min speed = %d Hz\n"
1349 " rounded max speed = %d Hz\n",
1350 chip->test & SPI_TEST_LBC ? "Yes" : "No",
1351 chip->enable_dma ? "Yes" : "No",
1352 chip->control & SPI_CONTROL_SSCTL ? "Yes" : "No",
1353 chip->period & SPI_PERIOD_WAIT,
1354 spi->mode,
1355 spi->bits_per_word,
1356 spi_speed_hz(SPI_CONTROL_DATARATE_MIN),
1357 spi->max_speed_hz);
1358
1359err_first_setup:
1360 kfree(chip);
1361 return status;
1362}
1363
1364static void cleanup(const struct spi_device *spi)
1365{
1366 struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
1367 kfree(chip);
1368}
1369
1370static int init_queue(struct driver_data *drv_data)
1371{
1372 INIT_LIST_HEAD(&drv_data->queue);
1373 spin_lock_init(&drv_data->lock);
1374
1375 drv_data->run = QUEUE_STOPPED;
1376 drv_data->busy = 0;
1377
1378 tasklet_init(&drv_data->pump_transfers,
1379 pump_transfers, (unsigned long)drv_data);
1380
1381 INIT_WORK(&drv_data->work, pump_messages);
1382 drv_data->workqueue = create_singlethread_workqueue(
1383 drv_data->master->cdev.dev->bus_id);
1384 if (drv_data->workqueue == NULL)
1385 return -EBUSY;
1386
1387 return 0;
1388}
1389
1390static int start_queue(struct driver_data *drv_data)
1391{
1392 unsigned long flags;
1393
1394 spin_lock_irqsave(&drv_data->lock, flags);
1395
1396 if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
1397 spin_unlock_irqrestore(&drv_data->lock, flags);
1398 return -EBUSY;
1399 }
1400
1401 drv_data->run = QUEUE_RUNNING;
1402 drv_data->cur_msg = NULL;
1403 drv_data->cur_transfer = NULL;
1404 drv_data->cur_chip = NULL;
1405 spin_unlock_irqrestore(&drv_data->lock, flags);
1406
1407 queue_work(drv_data->workqueue, &drv_data->work);
1408
1409 return 0;
1410}
1411
1412static int stop_queue(struct driver_data *drv_data)
1413{
1414 unsigned long flags;
1415 unsigned limit = 500;
1416 int status = 0;
1417
1418 spin_lock_irqsave(&drv_data->lock, flags);
1419
1420 /* This is a bit lame, but is optimized for the common execution path.
1421 * A wait_queue on the drv_data->busy could be used, but then the common
1422 * execution path (pump_messages) would be required to call wake_up or
1423 * friends on every SPI message. Do this instead */
1424 drv_data->run = QUEUE_STOPPED;
1425 while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
1426 spin_unlock_irqrestore(&drv_data->lock, flags);
1427 msleep(10);
1428 spin_lock_irqsave(&drv_data->lock, flags);
1429 }
1430
1431 if (!list_empty(&drv_data->queue) || drv_data->busy)
1432 status = -EBUSY;
1433
1434 spin_unlock_irqrestore(&drv_data->lock, flags);
1435
1436 return status;
1437}
1438
1439static int destroy_queue(struct driver_data *drv_data)
1440{
1441 int status;
1442
1443 status = stop_queue(drv_data);
1444 if (status != 0)
1445 return status;
1446
1447 if (drv_data->workqueue)
1448 destroy_workqueue(drv_data->workqueue);
1449
1450 return 0;
1451}
1452
1453static int spi_imx_probe(struct platform_device *pdev)
1454{
1455 struct device *dev = &pdev->dev;
1456 struct spi_imx_master *platform_info;
1457 struct spi_master *master;
1458 struct driver_data *drv_data = NULL;
1459 struct resource *res;
1460 int irq, status = 0;
1461
1462 platform_info = dev->platform_data;
1463 if (platform_info == NULL) {
1464 dev_err(&pdev->dev, "probe - no platform data supplied\n");
1465 status = -ENODEV;
1466 goto err_no_pdata;
1467 }
1468
1469 /* Allocate master with space for drv_data */
1470 master = spi_alloc_master(dev, sizeof(struct driver_data));
1471 if (!master) {
1472 dev_err(&pdev->dev, "probe - cannot alloc spi_master\n");
1473 status = -ENOMEM;
1474 goto err_no_mem;
1475 }
1476 drv_data = spi_master_get_devdata(master);
1477 drv_data->master = master;
1478 drv_data->master_info = platform_info;
1479 drv_data->pdev = pdev;
1480
1481 master->bus_num = pdev->id;
1482 master->num_chipselect = platform_info->num_chipselect;
1483 master->cleanup = cleanup;
1484 master->setup = setup;
1485 master->transfer = transfer;
1486
1487 drv_data->dummy_dma_buf = SPI_DUMMY_u32;
1488
1489 /* Find and map resources */
1490 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1491 if (!res) {
1492 dev_err(&pdev->dev, "probe - MEM resources not defined\n");
1493 status = -ENODEV;
1494 goto err_no_iores;
1495 }
1496 drv_data->ioarea = request_mem_region(res->start,
1497 res->end - res->start + 1,
1498 pdev->name);
1499 if (drv_data->ioarea == NULL) {
1500 dev_err(&pdev->dev, "probe - cannot reserve region\n");
1501 status = -ENXIO;
1502 goto err_no_iores;
1503 }
1504 drv_data->regs = ioremap(res->start, res->end - res->start + 1);
1505 if (drv_data->regs == NULL) {
1506 dev_err(&pdev->dev, "probe - cannot map IO\n");
1507 status = -ENXIO;
1508 goto err_no_iomap;
1509 }
1510 drv_data->rd_data_phys = (dma_addr_t)res->start;
1511
1512 /* Attach to IRQ */
1513 irq = platform_get_irq(pdev, 0);
1514 if (irq < 0) {
1515 dev_err(&pdev->dev, "probe - IRQ resource not defined\n");
1516 status = -ENODEV;
1517 goto err_no_irqres;
1518 }
1519 status = request_irq(irq, spi_int, IRQF_DISABLED, dev->bus_id, drv_data);
1520 if (status < 0) {
1521 dev_err(&pdev->dev, "probe - cannot get IRQ (%d)\n", status);
1522 goto err_no_irqres;
1523 }
1524
1525 /* Setup DMA if requested */
1526 drv_data->tx_channel = -1;
1527 drv_data->rx_channel = -1;
1528 if (platform_info->enable_dma) {
1529 /* Get rx DMA channel */
1530 status = imx_dma_request_by_prio(&drv_data->rx_channel,
1531 "spi_imx_rx", DMA_PRIO_HIGH);
1532 if (status < 0) {
1533 dev_err(dev,
1534 "probe - problem (%d) requesting rx channel\n",
1535 status);
1536 goto err_no_rxdma;
1537 } else
1538 imx_dma_setup_handlers(drv_data->rx_channel, NULL,
1539 dma_err_handler, drv_data);
1540
1541 /* Get tx DMA channel */
1542 status = imx_dma_request_by_prio(&drv_data->tx_channel,
1543 "spi_imx_tx", DMA_PRIO_MEDIUM);
1544 if (status < 0) {
1545 dev_err(dev,
1546 "probe - problem (%d) requesting tx channel\n",
1547 status);
1548 imx_dma_free(drv_data->rx_channel);
1549 goto err_no_txdma;
1550 } else
1551 imx_dma_setup_handlers(drv_data->tx_channel,
1552 dma_tx_handler, dma_err_handler,
1553 drv_data);
1554
1555 /* Set request source and burst length for allocated channels */
1556 switch (drv_data->pdev->id) {
1557 case 1:
1558 /* Using SPI1 */
1559 RSSR(drv_data->rx_channel) = DMA_REQ_SPI1_R;
1560 RSSR(drv_data->tx_channel) = DMA_REQ_SPI1_T;
1561 break;
1562 case 2:
1563 /* Using SPI2 */
1564 RSSR(drv_data->rx_channel) = DMA_REQ_SPI2_R;
1565 RSSR(drv_data->tx_channel) = DMA_REQ_SPI2_T;
1566 break;
1567 default:
1568 dev_err(dev, "probe - bad SPI Id\n");
1569 imx_dma_free(drv_data->rx_channel);
1570 imx_dma_free(drv_data->tx_channel);
1571 status = -ENODEV;
1572 goto err_no_devid;
1573 }
1574 BLR(drv_data->rx_channel) = SPI_DMA_BLR;
1575 BLR(drv_data->tx_channel) = SPI_DMA_BLR;
1576 }
1577
1578 /* Load default SPI configuration */
1579 writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
1580 writel(0, drv_data->regs + SPI_RESET);
1581 writel(SPI_DEFAULT_CONTROL, drv_data->regs + SPI_CONTROL);
1582
1583 /* Initial and start queue */
1584 status = init_queue(drv_data);
1585 if (status != 0) {
1586 dev_err(&pdev->dev, "probe - problem initializing queue\n");
1587 goto err_init_queue;
1588 }
1589 status = start_queue(drv_data);
1590 if (status != 0) {
1591 dev_err(&pdev->dev, "probe - problem starting queue\n");
1592 goto err_start_queue;
1593 }
1594
1595 /* Register with the SPI framework */
1596 platform_set_drvdata(pdev, drv_data);
1597 status = spi_register_master(master);
1598 if (status != 0) {
1599 dev_err(&pdev->dev, "probe - problem registering spi master\n");
1600 goto err_spi_register;
1601 }
1602
1603 dev_dbg(dev, "probe succeded\n");
1604 return 0;
1605
1606err_init_queue:
1607err_start_queue:
1608err_spi_register:
1609 destroy_queue(drv_data);
1610
1611err_no_rxdma:
1612err_no_txdma:
1613err_no_devid:
1614 free_irq(irq, drv_data);
1615
1616err_no_irqres:
1617 iounmap(drv_data->regs);
1618
1619err_no_iomap:
1620 release_resource(drv_data->ioarea);
1621 kfree(drv_data->ioarea);
1622
1623err_no_iores:
1624 spi_master_put(master);
1625
1626err_no_pdata:
1627err_no_mem:
1628 return status;
1629}
1630
1631static int __devexit spi_imx_remove(struct platform_device *pdev)
1632{
1633 struct driver_data *drv_data = platform_get_drvdata(pdev);
1634 int irq;
1635 int status = 0;
1636
1637 if (!drv_data)
1638 return 0;
1639
1640 tasklet_kill(&drv_data->pump_transfers);
1641
1642 /* Remove the queue */
1643 status = destroy_queue(drv_data);
1644 if (status != 0) {
1645 dev_err(&pdev->dev, "queue remove failed (%d)\n", status);
1646 return status;
1647 }
1648
1649 /* Reset SPI */
1650 writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
1651 writel(0, drv_data->regs + SPI_RESET);
1652
1653 /* Release DMA */
1654 if (drv_data->master_info->enable_dma) {
1655 RSSR(drv_data->rx_channel) = 0;
1656 RSSR(drv_data->tx_channel) = 0;
1657 imx_dma_free(drv_data->tx_channel);
1658 imx_dma_free(drv_data->rx_channel);
1659 }
1660
1661 /* Release IRQ */
1662 irq = platform_get_irq(pdev, 0);
1663 if (irq >= 0)
1664 free_irq(irq, drv_data);
1665
1666 /* Release map resources */
1667 iounmap(drv_data->regs);
1668 release_resource(drv_data->ioarea);
1669 kfree(drv_data->ioarea);
1670
1671 /* Disconnect from the SPI framework */
1672 spi_unregister_master(drv_data->master);
1673 spi_master_put(drv_data->master);
1674
1675 /* Prevent double remove */
1676 platform_set_drvdata(pdev, NULL);
1677
1678 dev_dbg(&pdev->dev, "remove succeded\n");
1679
1680 return 0;
1681}
1682
1683static void spi_imx_shutdown(struct platform_device *pdev)
1684{
1685 struct driver_data *drv_data = platform_get_drvdata(pdev);
1686
1687 /* Reset SPI */
1688 writel(SPI_RESET_START, drv_data->regs + SPI_RESET);
1689 writel(0, drv_data->regs + SPI_RESET);
1690
1691 dev_dbg(&pdev->dev, "shutdown succeded\n");
1692}
1693
1694#ifdef CONFIG_PM
1695static int suspend_devices(struct device *dev, void *pm_message)
1696{
1697 pm_message_t *state = pm_message;
1698
1699 if (dev->power.power_state.event != state->event) {
1700 dev_warn(dev, "pm state does not match request\n");
1701 return -1;
1702 }
1703
1704 return 0;
1705}
1706
1707static int spi_imx_suspend(struct platform_device *pdev, pm_message_t state)
1708{
1709 struct driver_data *drv_data = platform_get_drvdata(pdev);
1710 int status = 0;
1711
1712 status = stop_queue(drv_data);
1713 if (status != 0) {
1714 dev_warn(&pdev->dev, "suspend cannot stop queue\n");
1715 return status;
1716 }
1717
1718 dev_dbg(&pdev->dev, "suspended\n");
1719
1720 return 0;
1721}
1722
1723static int spi_imx_resume(struct platform_device *pdev)
1724{
1725 struct driver_data *drv_data = platform_get_drvdata(pdev);
1726 int status = 0;
1727
1728 /* Start the queue running */
1729 status = start_queue(drv_data);
1730 if (status != 0)
1731 dev_err(&pdev->dev, "problem starting queue (%d)\n", status);
1732 else
1733 dev_dbg(&pdev->dev, "resumed\n");
1734
1735 return status;
1736}
1737#else
1738#define spi_imx_suspend NULL
1739#define spi_imx_resume NULL
1740#endif /* CONFIG_PM */
1741
1742static struct platform_driver driver = {
1743 .driver = {
1744 .name = "imx-spi",
1745 .bus = &platform_bus_type,
1746 .owner = THIS_MODULE,
1747 },
1748 .probe = spi_imx_probe,
1749 .remove = __devexit_p(spi_imx_remove),
1750 .shutdown = spi_imx_shutdown,
1751 .suspend = spi_imx_suspend,
1752 .resume = spi_imx_resume,
1753};
1754
1755static int __init spi_imx_init(void)
1756{
1757 return platform_driver_register(&driver);
1758}
1759module_init(spi_imx_init);
1760
1761static void __exit spi_imx_exit(void)
1762{
1763 platform_driver_unregister(&driver);
1764}
1765module_exit(spi_imx_exit);
1766
1767MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
1768MODULE_DESCRIPTION("iMX SPI Contoller Driver");
1769MODULE_LICENSE("GPL");