aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi
diff options
context:
space:
mode:
authorLaxman Dewangan <ldewangan@nvidia.com>2012-10-30 03:04:05 -0400
committerMark Brown <broonie@opensource.wolfsonmicro.com>2012-10-30 14:38:35 -0400
commitdc4dc36056392c0b0b1ca9e81bebff964b9297e0 (patch)
treeadbe20e39ba98a2ae893548511b656048686cd85 /drivers/spi
parent8f0d8163b50e01f398b14bcd4dc039ac5ab18d64 (diff)
spi: tegra: add spi driver for SLINK controller
Tegra20/Tegra30 supports the spi interface through its SLINK controller. Add spi driver for SLINK controller. Signed-off-by: Laxman Dewangan <ldewangan@nvidia.com> Reviewed-by: Stephen Warren <swarren@nvidia.com> Signed-off-by: Mark Brown <broonie@opensource.wolfsonmicro.com>
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig6
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/spi-tegra20-slink.c1359
3 files changed, 1366 insertions, 1 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 1acae359cabe..25290d9780b2 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -385,6 +385,12 @@ config SPI_MXS
385 help 385 help
386 SPI driver for Freescale MXS devices. 386 SPI driver for Freescale MXS devices.
387 387
388config SPI_TEGRA20_SLINK
389 tristate "Nvidia Tegra20/Tegra30 SLINK Controller"
390 depends on ARCH_TEGRA && TEGRA20_APB_DMA
391 help
392 SPI driver for Nvidia Tegra20/Tegra30 SLINK Controller interface.
393
388config SPI_TI_SSP 394config SPI_TI_SSP
389 tristate "TI Sequencer Serial Port - SPI Support" 395 tristate "TI Sequencer Serial Port - SPI Support"
390 depends on MFD_TI_SSP 396 depends on MFD_TI_SSP
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index c48df47e4b0f..f87c0f142e5a 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -60,10 +60,10 @@ obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
60obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o 60obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
61obj-$(CONFIG_SPI_SIRF) += spi-sirf.o 61obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
62obj-$(CONFIG_SPI_STMP3XXX) += spi-stmp.o 62obj-$(CONFIG_SPI_STMP3XXX) += spi-stmp.o
63obj-$(CONFIG_SPI_TEGRA20_SLINK) += spi-tegra20-slink.o
63obj-$(CONFIG_SPI_TI_SSP) += spi-ti-ssp.o 64obj-$(CONFIG_SPI_TI_SSP) += spi-ti-ssp.o
64obj-$(CONFIG_SPI_TLE62X0) += spi-tle62x0.o 65obj-$(CONFIG_SPI_TLE62X0) += spi-tle62x0.o
65obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o 66obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
66obj-$(CONFIG_SPI_TXX9) += spi-txx9.o 67obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
67obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o 68obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o
68obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o 69obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
69
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
new file mode 100644
index 000000000000..b8985be81d96
--- /dev/null
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -0,0 +1,1359 @@
1/*
2 * SPI driver for Nvidia's Tegra20/Tegra30 SLINK Controller.
3 *
4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/clk.h>
20#include <linux/completion.h>
21#include <linux/delay.h>
22#include <linux/dmaengine.h>
23#include <linux/dma-mapping.h>
24#include <linux/dmapool.h>
25#include <linux/err.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
29#include <linux/kernel.h>
30#include <linux/kthread.h>
31#include <linux/module.h>
32#include <linux/platform_device.h>
33#include <linux/pm_runtime.h>
34#include <linux/of.h>
35#include <linux/of_device.h>
36#include <linux/spi/spi.h>
37#include <linux/spi/spi-tegra.h>
38#include <mach/clk.h>
39
40#define SLINK_COMMAND 0x000
41#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
42#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5)
43#define SLINK_BOTH_EN (1 << 10)
44#define SLINK_CS_SW (1 << 11)
45#define SLINK_CS_VALUE (1 << 12)
46#define SLINK_CS_POLARITY (1 << 13)
47#define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16)
48#define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16)
49#define SLINK_IDLE_SDA_PULL_LOW (2 << 16)
50#define SLINK_IDLE_SDA_PULL_HIGH (3 << 16)
51#define SLINK_IDLE_SDA_MASK (3 << 16)
52#define SLINK_CS_POLARITY1 (1 << 20)
53#define SLINK_CK_SDA (1 << 21)
54#define SLINK_CS_POLARITY2 (1 << 22)
55#define SLINK_CS_POLARITY3 (1 << 23)
56#define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24)
57#define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24)
58#define SLINK_IDLE_SCLK_PULL_LOW (2 << 24)
59#define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24)
60#define SLINK_IDLE_SCLK_MASK (3 << 24)
61#define SLINK_M_S (1 << 28)
62#define SLINK_WAIT (1 << 29)
63#define SLINK_GO (1 << 30)
64#define SLINK_ENB (1 << 31)
65
66#define SLINK_MODES (SLINK_IDLE_SCLK_MASK | SLINK_CK_SDA)
67
68#define SLINK_COMMAND2 0x004
69#define SLINK_LSBFE (1 << 0)
70#define SLINK_SSOE (1 << 1)
71#define SLINK_SPIE (1 << 4)
72#define SLINK_BIDIROE (1 << 6)
73#define SLINK_MODFEN (1 << 7)
74#define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8)
75#define SLINK_CS_ACTIVE_BETWEEN (1 << 17)
76#define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18)
77#define SLINK_SS_SETUP(x) (((x) & 0x3) << 20)
78#define SLINK_FIFO_REFILLS_0 (0 << 22)
79#define SLINK_FIFO_REFILLS_1 (1 << 22)
80#define SLINK_FIFO_REFILLS_2 (2 << 22)
81#define SLINK_FIFO_REFILLS_3 (3 << 22)
82#define SLINK_FIFO_REFILLS_MASK (3 << 22)
83#define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26)
84#define SLINK_SPC0 (1 << 29)
85#define SLINK_TXEN (1 << 30)
86#define SLINK_RXEN (1 << 31)
87
88#define SLINK_STATUS 0x008
89#define SLINK_COUNT(val) (((val) >> 0) & 0x1f)
90#define SLINK_WORD(val) (((val) >> 5) & 0x1f)
91#define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff)
92#define SLINK_MODF (1 << 16)
93#define SLINK_RX_UNF (1 << 18)
94#define SLINK_TX_OVF (1 << 19)
95#define SLINK_TX_FULL (1 << 20)
96#define SLINK_TX_EMPTY (1 << 21)
97#define SLINK_RX_FULL (1 << 22)
98#define SLINK_RX_EMPTY (1 << 23)
99#define SLINK_TX_UNF (1 << 24)
100#define SLINK_RX_OVF (1 << 25)
101#define SLINK_TX_FLUSH (1 << 26)
102#define SLINK_RX_FLUSH (1 << 27)
103#define SLINK_SCLK (1 << 28)
104#define SLINK_ERR (1 << 29)
105#define SLINK_RDY (1 << 30)
106#define SLINK_BSY (1 << 31)
107#define SLINK_FIFO_ERROR (SLINK_TX_OVF | SLINK_RX_UNF | \
108 SLINK_TX_UNF | SLINK_RX_OVF)
109
110#define SLINK_FIFO_EMPTY (SLINK_TX_EMPTY | SLINK_RX_EMPTY)
111
112#define SLINK_MAS_DATA 0x010
113#define SLINK_SLAVE_DATA 0x014
114
115#define SLINK_DMA_CTL 0x018
116#define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0)
117#define SLINK_TX_TRIG_1 (0 << 16)
118#define SLINK_TX_TRIG_4 (1 << 16)
119#define SLINK_TX_TRIG_8 (2 << 16)
120#define SLINK_TX_TRIG_16 (3 << 16)
121#define SLINK_TX_TRIG_MASK (3 << 16)
122#define SLINK_RX_TRIG_1 (0 << 18)
123#define SLINK_RX_TRIG_4 (1 << 18)
124#define SLINK_RX_TRIG_8 (2 << 18)
125#define SLINK_RX_TRIG_16 (3 << 18)
126#define SLINK_RX_TRIG_MASK (3 << 18)
127#define SLINK_PACKED (1 << 20)
128#define SLINK_PACK_SIZE_4 (0 << 21)
129#define SLINK_PACK_SIZE_8 (1 << 21)
130#define SLINK_PACK_SIZE_16 (2 << 21)
131#define SLINK_PACK_SIZE_32 (3 << 21)
132#define SLINK_PACK_SIZE_MASK (3 << 21)
133#define SLINK_IE_TXC (1 << 26)
134#define SLINK_IE_RXC (1 << 27)
135#define SLINK_DMA_EN (1 << 31)
136
137#define SLINK_STATUS2 0x01c
138#define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0)
139#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f0000) >> 16)
140#define SLINK_SS_HOLD_TIME(val) (((val) & 0xF) << 6)
141
142#define SLINK_TX_FIFO 0x100
143#define SLINK_RX_FIFO 0x180
144
145#define DATA_DIR_TX (1 << 0)
146#define DATA_DIR_RX (1 << 1)
147
148#define SLINK_DMA_TIMEOUT (msecs_to_jiffies(1000))
149
150#define DEFAULT_SPI_DMA_BUF_LEN (16*1024)
151#define TX_FIFO_EMPTY_COUNT_MAX SLINK_TX_FIFO_EMPTY_COUNT(0x20)
152#define RX_FIFO_FULL_COUNT_ZERO SLINK_RX_FIFO_FULL_COUNT(0)
153
154#define SLINK_STATUS2_RESET \
155 (TX_FIFO_EMPTY_COUNT_MAX | RX_FIFO_FULL_COUNT_ZERO << 16)
156
157#define MAX_CHIP_SELECT 4
158#define SLINK_FIFO_DEPTH 32
159
160struct tegra_slink_chip_data {
161 bool cs_hold_time;
162};
163
164struct tegra_slink_data {
165 struct device *dev;
166 struct spi_master *master;
167 const struct tegra_slink_chip_data *chip_data;
168 spinlock_t lock;
169
170 struct clk *clk;
171 void __iomem *base;
172 phys_addr_t phys;
173 unsigned irq;
174 int dma_req_sel;
175 u32 spi_max_frequency;
176 u32 cur_speed;
177
178 struct spi_device *cur_spi;
179 unsigned cur_pos;
180 unsigned cur_len;
181 unsigned words_per_32bit;
182 unsigned bytes_per_word;
183 unsigned curr_dma_words;
184 unsigned cur_direction;
185
186 unsigned cur_rx_pos;
187 unsigned cur_tx_pos;
188
189 unsigned dma_buf_size;
190 unsigned max_buf_size;
191 bool is_curr_dma_xfer;
192 bool is_hw_based_cs;
193
194 struct completion rx_dma_complete;
195 struct completion tx_dma_complete;
196
197 u32 tx_status;
198 u32 rx_status;
199 u32 status_reg;
200 bool is_packed;
201 unsigned long packed_size;
202
203 u32 command_reg;
204 u32 command2_reg;
205 u32 dma_control_reg;
206 u32 def_command_reg;
207 u32 def_command2_reg;
208
209 struct completion xfer_completion;
210 struct spi_transfer *curr_xfer;
211 struct dma_chan *rx_dma_chan;
212 u32 *rx_dma_buf;
213 dma_addr_t rx_dma_phys;
214 struct dma_async_tx_descriptor *rx_dma_desc;
215
216 struct dma_chan *tx_dma_chan;
217 u32 *tx_dma_buf;
218 dma_addr_t tx_dma_phys;
219 struct dma_async_tx_descriptor *tx_dma_desc;
220};
221
222static int tegra_slink_runtime_suspend(struct device *dev);
223static int tegra_slink_runtime_resume(struct device *dev);
224
225static inline unsigned long tegra_slink_readl(struct tegra_slink_data *tspi,
226 unsigned long reg)
227{
228 return readl(tspi->base + reg);
229}
230
231static inline void tegra_slink_writel(struct tegra_slink_data *tspi,
232 unsigned long val, unsigned long reg)
233{
234 writel(val, tspi->base + reg);
235
236 /* Read back register to make sure that register writes completed */
237 if (reg != SLINK_TX_FIFO)
238 readl(tspi->base + SLINK_MAS_DATA);
239}
240
241static void tegra_slink_clear_status(struct tegra_slink_data *tspi)
242{
243 unsigned long val;
244 unsigned long val_write = 0;
245
246 val = tegra_slink_readl(tspi, SLINK_STATUS);
247
248 /* Write 1 to clear status register */
249 val_write = SLINK_RDY | SLINK_FIFO_ERROR;
250 tegra_slink_writel(tspi, val_write, SLINK_STATUS);
251}
252
253static unsigned long tegra_slink_get_packed_size(struct tegra_slink_data *tspi,
254 struct spi_transfer *t)
255{
256 unsigned long val;
257
258 switch (tspi->bytes_per_word) {
259 case 0:
260 val = SLINK_PACK_SIZE_4;
261 break;
262 case 1:
263 val = SLINK_PACK_SIZE_8;
264 break;
265 case 2:
266 val = SLINK_PACK_SIZE_16;
267 break;
268 case 4:
269 val = SLINK_PACK_SIZE_32;
270 break;
271 default:
272 val = 0;
273 }
274 return val;
275}
276
277static unsigned tegra_slink_calculate_curr_xfer_param(
278 struct spi_device *spi, struct tegra_slink_data *tspi,
279 struct spi_transfer *t)
280{
281 unsigned remain_len = t->len - tspi->cur_pos;
282 unsigned max_word;
283 unsigned bits_per_word ;
284 unsigned max_len;
285 unsigned total_fifo_words;
286
287 bits_per_word = t->bits_per_word ? t->bits_per_word :
288 spi->bits_per_word;
289 tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1;
290
291 if (bits_per_word == 8 || bits_per_word == 16) {
292 tspi->is_packed = 1;
293 tspi->words_per_32bit = 32/bits_per_word;
294 } else {
295 tspi->is_packed = 0;
296 tspi->words_per_32bit = 1;
297 }
298 tspi->packed_size = tegra_slink_get_packed_size(tspi, t);
299
300 if (tspi->is_packed) {
301 max_len = min(remain_len, tspi->max_buf_size);
302 tspi->curr_dma_words = max_len/tspi->bytes_per_word;
303 total_fifo_words = max_len/4;
304 } else {
305 max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
306 max_word = min(max_word, tspi->max_buf_size/4);
307 tspi->curr_dma_words = max_word;
308 total_fifo_words = max_word;
309 }
310 return total_fifo_words;
311}
312
313static unsigned tegra_slink_fill_tx_fifo_from_client_txbuf(
314 struct tegra_slink_data *tspi, struct spi_transfer *t)
315{
316 unsigned nbytes;
317 unsigned tx_empty_count;
318 unsigned long fifo_status;
319 unsigned max_n_32bit;
320 unsigned i, count;
321 unsigned long x;
322 unsigned int written_words;
323 unsigned fifo_words_left;
324 u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
325
326 fifo_status = tegra_slink_readl(tspi, SLINK_STATUS2);
327 tx_empty_count = SLINK_TX_FIFO_EMPTY_COUNT(fifo_status);
328
329 if (tspi->is_packed) {
330 fifo_words_left = tx_empty_count * tspi->words_per_32bit;
331 written_words = min(fifo_words_left, tspi->curr_dma_words);
332 nbytes = written_words * tspi->bytes_per_word;
333 max_n_32bit = DIV_ROUND_UP(nbytes, 4);
334 for (count = 0; count < max_n_32bit; count++) {
335 x = 0;
336 for (i = 0; (i < 4) && nbytes; i++, nbytes--)
337 x |= (*tx_buf++) << (i*8);
338 tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
339 }
340 } else {
341 max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
342 written_words = max_n_32bit;
343 nbytes = written_words * tspi->bytes_per_word;
344 for (count = 0; count < max_n_32bit; count++) {
345 x = 0;
346 for (i = 0; nbytes && (i < tspi->bytes_per_word);
347 i++, nbytes--)
348 x |= ((*tx_buf++) << i*8);
349 tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
350 }
351 }
352 tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
353 return written_words;
354}
355
356static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
357 struct tegra_slink_data *tspi, struct spi_transfer *t)
358{
359 unsigned rx_full_count;
360 unsigned long fifo_status;
361 unsigned i, count;
362 unsigned long x;
363 unsigned int read_words = 0;
364 unsigned len;
365 u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
366
367 fifo_status = tegra_slink_readl(tspi, SLINK_STATUS2);
368 rx_full_count = SLINK_RX_FIFO_FULL_COUNT(fifo_status);
369 if (tspi->is_packed) {
370 len = tspi->curr_dma_words * tspi->bytes_per_word;
371 for (count = 0; count < rx_full_count; count++) {
372 x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
373 for (i = 0; len && (i < 4); i++, len--)
374 *rx_buf++ = (x >> i*8) & 0xFF;
375 }
376 tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
377 read_words += tspi->curr_dma_words;
378 } else {
379 unsigned int bits_per_word;
380
381 bits_per_word = t->bits_per_word ? t->bits_per_word :
382 tspi->cur_spi->bits_per_word;
383 for (count = 0; count < rx_full_count; count++) {
384 x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
385 for (i = 0; (i < tspi->bytes_per_word); i++)
386 *rx_buf++ = (x >> (i*8)) & 0xFF;
387 }
388 tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
389 read_words += rx_full_count;
390 }
391 return read_words;
392}
393
394static void tegra_slink_copy_client_txbuf_to_spi_txbuf(
395 struct tegra_slink_data *tspi, struct spi_transfer *t)
396{
397 unsigned len;
398
399 /* Make the dma buffer to read by cpu */
400 dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
401 tspi->dma_buf_size, DMA_TO_DEVICE);
402
403 if (tspi->is_packed) {
404 len = tspi->curr_dma_words * tspi->bytes_per_word;
405 memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
406 } else {
407 unsigned int i;
408 unsigned int count;
409 u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
410 unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
411 unsigned int x;
412
413 for (count = 0; count < tspi->curr_dma_words; count++) {
414 x = 0;
415 for (i = 0; consume && (i < tspi->bytes_per_word);
416 i++, consume--)
417 x |= ((*tx_buf++) << i * 8);
418 tspi->tx_dma_buf[count] = x;
419 }
420 }
421 tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
422
423 /* Make the dma buffer to read by dma */
424 dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
425 tspi->dma_buf_size, DMA_TO_DEVICE);
426}
427
428static void tegra_slink_copy_spi_rxbuf_to_client_rxbuf(
429 struct tegra_slink_data *tspi, struct spi_transfer *t)
430{
431 unsigned len;
432
433 /* Make the dma buffer to read by cpu */
434 dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
435 tspi->dma_buf_size, DMA_FROM_DEVICE);
436
437 if (tspi->is_packed) {
438 len = tspi->curr_dma_words * tspi->bytes_per_word;
439 memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
440 } else {
441 unsigned int i;
442 unsigned int count;
443 unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
444 unsigned int x;
445 unsigned int rx_mask, bits_per_word;
446
447 bits_per_word = t->bits_per_word ? t->bits_per_word :
448 tspi->cur_spi->bits_per_word;
449 rx_mask = (1 << bits_per_word) - 1;
450 for (count = 0; count < tspi->curr_dma_words; count++) {
451 x = tspi->rx_dma_buf[count];
452 x &= rx_mask;
453 for (i = 0; (i < tspi->bytes_per_word); i++)
454 *rx_buf++ = (x >> (i*8)) & 0xFF;
455 }
456 }
457 tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
458
459 /* Make the dma buffer to read by dma */
460 dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
461 tspi->dma_buf_size, DMA_FROM_DEVICE);
462}
463
464static void tegra_slink_dma_complete(void *args)
465{
466 struct completion *dma_complete = args;
467
468 complete(dma_complete);
469}
470
471static int tegra_slink_start_tx_dma(struct tegra_slink_data *tspi, int len)
472{
473 INIT_COMPLETION(tspi->tx_dma_complete);
474 tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
475 tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
476 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
477 if (!tspi->tx_dma_desc) {
478 dev_err(tspi->dev, "Not able to get desc for Tx\n");
479 return -EIO;
480 }
481
482 tspi->tx_dma_desc->callback = tegra_slink_dma_complete;
483 tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete;
484
485 dmaengine_submit(tspi->tx_dma_desc);
486 dma_async_issue_pending(tspi->tx_dma_chan);
487 return 0;
488}
489
490static int tegra_slink_start_rx_dma(struct tegra_slink_data *tspi, int len)
491{
492 INIT_COMPLETION(tspi->rx_dma_complete);
493 tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
494 tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
495 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
496 if (!tspi->rx_dma_desc) {
497 dev_err(tspi->dev, "Not able to get desc for Rx\n");
498 return -EIO;
499 }
500
501 tspi->rx_dma_desc->callback = tegra_slink_dma_complete;
502 tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete;
503
504 dmaengine_submit(tspi->rx_dma_desc);
505 dma_async_issue_pending(tspi->rx_dma_chan);
506 return 0;
507}
508
509static int tegra_slink_start_dma_based_transfer(
510 struct tegra_slink_data *tspi, struct spi_transfer *t)
511{
512 unsigned long val;
513 unsigned long test_val;
514 unsigned int len;
515 int ret = 0;
516 unsigned long status;
517
518 /* Make sure that Rx and Tx fifo are empty */
519 status = tegra_slink_readl(tspi, SLINK_STATUS);
520 if ((status & SLINK_FIFO_EMPTY) != SLINK_FIFO_EMPTY) {
521 dev_err(tspi->dev,
522 "Rx/Tx fifo are not empty status 0x%08lx\n", status);
523 return -EIO;
524 }
525
526 val = SLINK_DMA_BLOCK_SIZE(tspi->curr_dma_words - 1);
527 val |= tspi->packed_size;
528 if (tspi->is_packed)
529 len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
530 4) * 4;
531 else
532 len = tspi->curr_dma_words * 4;
533
534 /* Set attention level based on length of transfer */
535 if (len & 0xF)
536 val |= SLINK_TX_TRIG_1 | SLINK_RX_TRIG_1;
537 else if (((len) >> 4) & 0x1)
538 val |= SLINK_TX_TRIG_4 | SLINK_RX_TRIG_4;
539 else
540 val |= SLINK_TX_TRIG_8 | SLINK_RX_TRIG_8;
541
542 if (tspi->cur_direction & DATA_DIR_TX)
543 val |= SLINK_IE_TXC;
544
545 if (tspi->cur_direction & DATA_DIR_RX)
546 val |= SLINK_IE_RXC;
547
548 tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
549 tspi->dma_control_reg = val;
550
551 if (tspi->cur_direction & DATA_DIR_TX) {
552 tegra_slink_copy_client_txbuf_to_spi_txbuf(tspi, t);
553 wmb();
554 ret = tegra_slink_start_tx_dma(tspi, len);
555 if (ret < 0) {
556 dev_err(tspi->dev,
557 "Starting tx dma failed, err %d\n", ret);
558 return ret;
559 }
560
561 /* Wait for tx fifo to be fill before starting slink */
562 test_val = tegra_slink_readl(tspi, SLINK_STATUS);
563 while (!(test_val & SLINK_TX_FULL))
564 test_val = tegra_slink_readl(tspi, SLINK_STATUS);
565 }
566
567 if (tspi->cur_direction & DATA_DIR_RX) {
568 /* Make the dma buffer to read by dma */
569 dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
570 tspi->dma_buf_size, DMA_FROM_DEVICE);
571
572 ret = tegra_slink_start_rx_dma(tspi, len);
573 if (ret < 0) {
574 dev_err(tspi->dev,
575 "Starting rx dma failed, err %d\n", ret);
576 if (tspi->cur_direction & DATA_DIR_TX)
577 dmaengine_terminate_all(tspi->tx_dma_chan);
578 return ret;
579 }
580 }
581 tspi->is_curr_dma_xfer = true;
582 if (tspi->is_packed) {
583 val |= SLINK_PACKED;
584 tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
585 /* HW need small delay after settign Packed mode */
586 udelay(1);
587 }
588 tspi->dma_control_reg = val;
589
590 val |= SLINK_DMA_EN;
591 tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
592 return ret;
593}
594
595static int tegra_slink_start_cpu_based_transfer(
596 struct tegra_slink_data *tspi, struct spi_transfer *t)
597{
598 unsigned long val;
599 unsigned cur_words;
600
601 val = tspi->packed_size;
602 if (tspi->cur_direction & DATA_DIR_TX)
603 val |= SLINK_IE_TXC;
604
605 if (tspi->cur_direction & DATA_DIR_RX)
606 val |= SLINK_IE_RXC;
607
608 tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
609 tspi->dma_control_reg = val;
610
611 if (tspi->cur_direction & DATA_DIR_TX)
612 cur_words = tegra_slink_fill_tx_fifo_from_client_txbuf(tspi, t);
613 else
614 cur_words = tspi->curr_dma_words;
615 val |= SLINK_DMA_BLOCK_SIZE(cur_words - 1);
616 tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
617 tspi->dma_control_reg = val;
618
619 tspi->is_curr_dma_xfer = false;
620 if (tspi->is_packed) {
621 val |= SLINK_PACKED;
622 tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
623 udelay(1);
624 wmb();
625 }
626 tspi->dma_control_reg = val;
627 val |= SLINK_DMA_EN;
628 tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
629 return 0;
630}
631
632static int tegra_slink_init_dma_param(struct tegra_slink_data *tspi,
633 bool dma_to_memory)
634{
635 struct dma_chan *dma_chan;
636 u32 *dma_buf;
637 dma_addr_t dma_phys;
638 int ret;
639 struct dma_slave_config dma_sconfig;
640 dma_cap_mask_t mask;
641
642 dma_cap_zero(mask);
643 dma_cap_set(DMA_SLAVE, mask);
644 dma_chan = dma_request_channel(mask, NULL, NULL);
645 if (!dma_chan) {
646 dev_err(tspi->dev,
647 "Dma channel is not available, will try later\n");
648 return -EPROBE_DEFER;
649 }
650
651 dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
652 &dma_phys, GFP_KERNEL);
653 if (!dma_buf) {
654 dev_err(tspi->dev, " Not able to allocate the dma buffer\n");
655 dma_release_channel(dma_chan);
656 return -ENOMEM;
657 }
658
659 dma_sconfig.slave_id = tspi->dma_req_sel;
660 if (dma_to_memory) {
661 dma_sconfig.src_addr = tspi->phys + SLINK_RX_FIFO;
662 dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
663 dma_sconfig.src_maxburst = 0;
664 } else {
665 dma_sconfig.dst_addr = tspi->phys + SLINK_TX_FIFO;
666 dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
667 dma_sconfig.dst_maxburst = 0;
668 }
669
670 ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
671 if (ret)
672 goto scrub;
673 if (dma_to_memory) {
674 tspi->rx_dma_chan = dma_chan;
675 tspi->rx_dma_buf = dma_buf;
676 tspi->rx_dma_phys = dma_phys;
677 } else {
678 tspi->tx_dma_chan = dma_chan;
679 tspi->tx_dma_buf = dma_buf;
680 tspi->tx_dma_phys = dma_phys;
681 }
682 return 0;
683
684scrub:
685 dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
686 dma_release_channel(dma_chan);
687 return ret;
688}
689
690static void tegra_slink_deinit_dma_param(struct tegra_slink_data *tspi,
691 bool dma_to_memory)
692{
693 u32 *dma_buf;
694 dma_addr_t dma_phys;
695 struct dma_chan *dma_chan;
696
697 if (dma_to_memory) {
698 dma_buf = tspi->rx_dma_buf;
699 dma_chan = tspi->rx_dma_chan;
700 dma_phys = tspi->rx_dma_phys;
701 tspi->rx_dma_chan = NULL;
702 tspi->rx_dma_buf = NULL;
703 } else {
704 dma_buf = tspi->tx_dma_buf;
705 dma_chan = tspi->tx_dma_chan;
706 dma_phys = tspi->tx_dma_phys;
707 tspi->tx_dma_buf = NULL;
708 tspi->tx_dma_chan = NULL;
709 }
710 if (!dma_chan)
711 return;
712
713 dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
714 dma_release_channel(dma_chan);
715}
716
717static int tegra_slink_start_transfer_one(struct spi_device *spi,
718 struct spi_transfer *t, bool is_first_of_msg,
719 bool is_single_xfer)
720{
721 struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
722 u32 speed;
723 u8 bits_per_word;
724 unsigned total_fifo_words;
725 int ret;
726 struct tegra_spi_device_controller_data *cdata = spi->controller_data;
727 unsigned long command;
728 unsigned long command2;
729
730 bits_per_word = t->bits_per_word ? t->bits_per_word :
731 spi->bits_per_word;
732 speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
733 if (!speed)
734 speed = tspi->spi_max_frequency;
735 if (speed != tspi->cur_speed) {
736 clk_set_rate(tspi->clk, speed * 4);
737 tspi->cur_speed = speed;
738 }
739
740 tspi->cur_spi = spi;
741 tspi->cur_pos = 0;
742 tspi->cur_rx_pos = 0;
743 tspi->cur_tx_pos = 0;
744 tspi->curr_xfer = t;
745 total_fifo_words = tegra_slink_calculate_curr_xfer_param(spi, tspi, t);
746
747 if (is_first_of_msg) {
748 tegra_slink_clear_status(tspi);
749
750 command = tspi->def_command_reg;
751 command |= SLINK_BIT_LENGTH(bits_per_word - 1);
752
753 command2 = tspi->def_command2_reg;
754 command2 |= SLINK_SS_EN_CS(spi->chip_select);
755
756 /* possibly use the hw based chip select */
757 tspi->is_hw_based_cs = false;
758 if (cdata && cdata->is_hw_based_cs && is_single_xfer &&
759 ((tspi->curr_dma_words * tspi->bytes_per_word) ==
760 (t->len - tspi->cur_pos))) {
761 int setup_count;
762 int sts2;
763
764 setup_count = cdata->cs_setup_clk_count >> 1;
765 setup_count = max(setup_count, 3);
766 command2 |= SLINK_SS_SETUP(setup_count);
767 if (tspi->chip_data->cs_hold_time) {
768 int hold_count;
769
770 hold_count = cdata->cs_hold_clk_count;
771 hold_count = max(hold_count, 0xF);
772 sts2 = tegra_slink_readl(tspi, SLINK_STATUS2);
773 sts2 &= ~SLINK_SS_HOLD_TIME(0xF);
774 sts2 |= SLINK_SS_HOLD_TIME(hold_count);
775 tegra_slink_writel(tspi, sts2, SLINK_STATUS2);
776 }
777 tspi->is_hw_based_cs = true;
778 }
779
780 if (tspi->is_hw_based_cs)
781 command &= ~SLINK_CS_SW;
782 else
783 command |= SLINK_CS_SW | SLINK_CS_VALUE;
784
785 command &= ~SLINK_MODES;
786 if (spi->mode & SPI_CPHA)
787 command |= SLINK_CK_SDA;
788
789 if (spi->mode & SPI_CPOL)
790 command |= SLINK_IDLE_SCLK_DRIVE_HIGH;
791 else
792 command |= SLINK_IDLE_SCLK_DRIVE_LOW;
793 } else {
794 command = tspi->command_reg;
795 command &= ~SLINK_BIT_LENGTH(~0);
796 command |= SLINK_BIT_LENGTH(bits_per_word - 1);
797
798 command2 = tspi->command2_reg;
799 command2 &= ~(SLINK_RXEN | SLINK_TXEN);
800 }
801
802 tegra_slink_writel(tspi, command, SLINK_COMMAND);
803 tspi->command_reg = command;
804
805 tspi->cur_direction = 0;
806 if (t->rx_buf) {
807 command2 |= SLINK_RXEN;
808 tspi->cur_direction |= DATA_DIR_RX;
809 }
810 if (t->tx_buf) {
811 command2 |= SLINK_TXEN;
812 tspi->cur_direction |= DATA_DIR_TX;
813 }
814 tegra_slink_writel(tspi, command2, SLINK_COMMAND2);
815 tspi->command2_reg = command2;
816
817 if (total_fifo_words > SLINK_FIFO_DEPTH)
818 ret = tegra_slink_start_dma_based_transfer(tspi, t);
819 else
820 ret = tegra_slink_start_cpu_based_transfer(tspi, t);
821 return ret;
822}
823
824static int tegra_slink_setup(struct spi_device *spi)
825{
826 struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
827 unsigned long val;
828 unsigned long flags;
829 int ret;
830 unsigned int cs_pol_bit[MAX_CHIP_SELECT] = {
831 SLINK_CS_POLARITY,
832 SLINK_CS_POLARITY1,
833 SLINK_CS_POLARITY2,
834 SLINK_CS_POLARITY3,
835 };
836
837 dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
838 spi->bits_per_word,
839 spi->mode & SPI_CPOL ? "" : "~",
840 spi->mode & SPI_CPHA ? "" : "~",
841 spi->max_speed_hz);
842
843 BUG_ON(spi->chip_select >= MAX_CHIP_SELECT);
844
845 ret = pm_runtime_get_sync(tspi->dev);
846 if (ret < 0) {
847 dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
848 return ret;
849 }
850
851 spin_lock_irqsave(&tspi->lock, flags);
852 val = tspi->def_command_reg;
853 if (spi->mode & SPI_CS_HIGH)
854 val |= cs_pol_bit[spi->chip_select];
855 else
856 val &= ~cs_pol_bit[spi->chip_select];
857 tspi->def_command_reg = val;
858 tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
859 spin_unlock_irqrestore(&tspi->lock, flags);
860
861 pm_runtime_put(tspi->dev);
862 return 0;
863}
864
865static int tegra_slink_prepare_transfer(struct spi_master *master)
866{
867 struct tegra_slink_data *tspi = spi_master_get_devdata(master);
868
869 return pm_runtime_get_sync(tspi->dev);
870}
871
872static int tegra_slink_unprepare_transfer(struct spi_master *master)
873{
874 struct tegra_slink_data *tspi = spi_master_get_devdata(master);
875
876 pm_runtime_put(tspi->dev);
877 return 0;
878}
879
880static int tegra_slink_transfer_one_message(struct spi_master *master,
881 struct spi_message *msg)
882{
883 bool is_first_msg = true;
884 int single_xfer;
885 struct tegra_slink_data *tspi = spi_master_get_devdata(master);
886 struct spi_transfer *xfer;
887 struct spi_device *spi = msg->spi;
888 int ret;
889
890 msg->status = 0;
891 msg->actual_length = 0;
892 single_xfer = list_is_singular(&msg->transfers);
893 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
894 INIT_COMPLETION(tspi->xfer_completion);
895 ret = tegra_slink_start_transfer_one(spi, xfer,
896 is_first_msg, single_xfer);
897 if (ret < 0) {
898 dev_err(tspi->dev,
899 "spi can not start transfer, err %d\n", ret);
900 goto exit;
901 }
902 is_first_msg = false;
903 ret = wait_for_completion_timeout(&tspi->xfer_completion,
904 SLINK_DMA_TIMEOUT);
905 if (WARN_ON(ret == 0)) {
906 dev_err(tspi->dev,
907 "spi trasfer timeout, err %d\n", ret);
908 ret = -EIO;
909 goto exit;
910 }
911
912 if (tspi->tx_status || tspi->rx_status) {
913 dev_err(tspi->dev, "Error in Transfer\n");
914 ret = -EIO;
915 goto exit;
916 }
917 msg->actual_length += xfer->len;
918 if (xfer->cs_change && xfer->delay_usecs) {
919 tegra_slink_writel(tspi, tspi->def_command_reg,
920 SLINK_COMMAND);
921 udelay(xfer->delay_usecs);
922 }
923 }
924 ret = 0;
925exit:
926 tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
927 tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
928 msg->status = ret;
929 spi_finalize_current_message(master);
930 return ret;
931}
932
933static irqreturn_t handle_cpu_based_xfer(struct tegra_slink_data *tspi)
934{
935 struct spi_transfer *t = tspi->curr_xfer;
936 unsigned long flags;
937
938 spin_lock_irqsave(&tspi->lock, flags);
939 if (tspi->tx_status || tspi->rx_status ||
940 (tspi->status_reg & SLINK_BSY)) {
941 dev_err(tspi->dev,
942 "CpuXfer ERROR bit set 0x%x\n", tspi->status_reg);
943 dev_err(tspi->dev,
944 "CpuXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
945 tspi->command2_reg, tspi->dma_control_reg);
946 tegra_periph_reset_assert(tspi->clk);
947 udelay(2);
948 tegra_periph_reset_deassert(tspi->clk);
949 complete(&tspi->xfer_completion);
950 goto exit;
951 }
952
953 if (tspi->cur_direction & DATA_DIR_RX)
954 tegra_slink_read_rx_fifo_to_client_rxbuf(tspi, t);
955
956 if (tspi->cur_direction & DATA_DIR_TX)
957 tspi->cur_pos = tspi->cur_tx_pos;
958 else
959 tspi->cur_pos = tspi->cur_rx_pos;
960
961 if (tspi->cur_pos == t->len) {
962 complete(&tspi->xfer_completion);
963 goto exit;
964 }
965
966 tegra_slink_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
967 tegra_slink_start_cpu_based_transfer(tspi, t);
968exit:
969 spin_unlock_irqrestore(&tspi->lock, flags);
970 return IRQ_HANDLED;
971}
972
973static irqreturn_t handle_dma_based_xfer(struct tegra_slink_data *tspi)
974{
975 struct spi_transfer *t = tspi->curr_xfer;
976 long wait_status;
977 int err = 0;
978 unsigned total_fifo_words;
979 unsigned long flags;
980
981 /* Abort dmas if any error */
982 if (tspi->cur_direction & DATA_DIR_TX) {
983 if (tspi->tx_status) {
984 dmaengine_terminate_all(tspi->tx_dma_chan);
985 err += 1;
986 } else {
987 wait_status = wait_for_completion_interruptible_timeout(
988 &tspi->tx_dma_complete, SLINK_DMA_TIMEOUT);
989 if (wait_status <= 0) {
990 dmaengine_terminate_all(tspi->tx_dma_chan);
991 dev_err(tspi->dev, "TxDma Xfer failed\n");
992 err += 1;
993 }
994 }
995 }
996
997 if (tspi->cur_direction & DATA_DIR_RX) {
998 if (tspi->rx_status) {
999 dmaengine_terminate_all(tspi->rx_dma_chan);
1000 err += 2;
1001 } else {
1002 wait_status = wait_for_completion_interruptible_timeout(
1003 &tspi->rx_dma_complete, SLINK_DMA_TIMEOUT);
1004 if (wait_status <= 0) {
1005 dmaengine_terminate_all(tspi->rx_dma_chan);
1006 dev_err(tspi->dev, "RxDma Xfer failed\n");
1007 err += 2;
1008 }
1009 }
1010 }
1011
1012 spin_lock_irqsave(&tspi->lock, flags);
1013 if (err) {
1014 dev_err(tspi->dev,
1015 "DmaXfer: ERROR bit set 0x%x\n", tspi->status_reg);
1016 dev_err(tspi->dev,
1017 "DmaXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
1018 tspi->command2_reg, tspi->dma_control_reg);
1019 tegra_periph_reset_assert(tspi->clk);
1020 udelay(2);
1021 tegra_periph_reset_deassert(tspi->clk);
1022 complete(&tspi->xfer_completion);
1023 spin_unlock_irqrestore(&tspi->lock, flags);
1024 return IRQ_HANDLED;
1025 }
1026
1027 if (tspi->cur_direction & DATA_DIR_RX)
1028 tegra_slink_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
1029
1030 if (tspi->cur_direction & DATA_DIR_TX)
1031 tspi->cur_pos = tspi->cur_tx_pos;
1032 else
1033 tspi->cur_pos = tspi->cur_rx_pos;
1034
1035 if (tspi->cur_pos == t->len) {
1036 complete(&tspi->xfer_completion);
1037 goto exit;
1038 }
1039
1040 /* Continue transfer in current message */
1041 total_fifo_words = tegra_slink_calculate_curr_xfer_param(tspi->cur_spi,
1042 tspi, t);
1043 if (total_fifo_words > SLINK_FIFO_DEPTH)
1044 err = tegra_slink_start_dma_based_transfer(tspi, t);
1045 else
1046 err = tegra_slink_start_cpu_based_transfer(tspi, t);
1047
1048exit:
1049 spin_unlock_irqrestore(&tspi->lock, flags);
1050 return IRQ_HANDLED;
1051}
1052
1053static irqreturn_t tegra_slink_isr_thread(int irq, void *context_data)
1054{
1055 struct tegra_slink_data *tspi = context_data;
1056
1057 if (!tspi->is_curr_dma_xfer)
1058 return handle_cpu_based_xfer(tspi);
1059 return handle_dma_based_xfer(tspi);
1060}
1061
1062static irqreturn_t tegra_slink_isr(int irq, void *context_data)
1063{
1064 struct tegra_slink_data *tspi = context_data;
1065
1066 tspi->status_reg = tegra_slink_readl(tspi, SLINK_STATUS);
1067 if (tspi->cur_direction & DATA_DIR_TX)
1068 tspi->tx_status = tspi->status_reg &
1069 (SLINK_TX_OVF | SLINK_TX_UNF);
1070
1071 if (tspi->cur_direction & DATA_DIR_RX)
1072 tspi->rx_status = tspi->status_reg &
1073 (SLINK_RX_OVF | SLINK_RX_UNF);
1074 tegra_slink_clear_status(tspi);
1075
1076 return IRQ_WAKE_THREAD;
1077}
1078
1079static struct tegra_spi_platform_data *tegra_slink_parse_dt(
1080 struct platform_device *pdev)
1081{
1082 struct tegra_spi_platform_data *pdata;
1083 const unsigned int *prop;
1084 struct device_node *np = pdev->dev.of_node;
1085 u32 of_dma[2];
1086
1087 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1088 if (!pdata) {
1089 dev_err(&pdev->dev, "Memory alloc for pdata failed\n");
1090 return NULL;
1091 }
1092
1093 if (of_property_read_u32_array(np, "nvidia,dma-request-selector",
1094 of_dma, 2) >= 0)
1095 pdata->dma_req_sel = of_dma[1];
1096
1097 prop = of_get_property(np, "spi-max-frequency", NULL);
1098 if (prop)
1099 pdata->spi_max_frequency = be32_to_cpup(prop);
1100
1101 return pdata;
1102}
1103
1104const struct tegra_slink_chip_data tegra30_spi_cdata = {
1105 .cs_hold_time = true,
1106};
1107
1108const struct tegra_slink_chip_data tegra20_spi_cdata = {
1109 .cs_hold_time = false,
1110};
1111
1112static struct of_device_id tegra_slink_of_match[] __devinitconst = {
1113 { .compatible = "nvidia,tegra20-slink", .data = &tegra20_spi_cdata, },
1114 { .compatible = "nvidia,tegra30-slink", .data = &tegra30_spi_cdata, },
1115 {}
1116};
1117MODULE_DEVICE_TABLE(of, tegra_slink_of_match);
1118
1119static int __devinit tegra_slink_probe(struct platform_device *pdev)
1120{
1121 struct spi_master *master;
1122 struct tegra_slink_data *tspi;
1123 struct resource *r;
1124 struct tegra_spi_platform_data *pdata = pdev->dev.platform_data;
1125 int ret, spi_irq;
1126 const struct tegra_slink_chip_data *cdata = NULL;
1127 const struct of_device_id *match;
1128
1129 match = of_match_device(of_match_ptr(tegra_slink_of_match), &pdev->dev);
1130 if (!match) {
1131 dev_err(&pdev->dev, "Error: No device match found\n");
1132 return -ENODEV;
1133 }
1134 cdata = match->data;
1135 if (!pdata && pdev->dev.of_node)
1136 pdata = tegra_slink_parse_dt(pdev);
1137
1138 if (!pdata) {
1139 dev_err(&pdev->dev, "No platform data, exiting\n");
1140 return -ENODEV;
1141 }
1142
1143 if (!pdata->spi_max_frequency)
1144 pdata->spi_max_frequency = 25000000; /* 25MHz */
1145
1146 master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
1147 if (!master) {
1148 dev_err(&pdev->dev, "master allocation failed\n");
1149 return -ENOMEM;
1150 }
1151
1152 /* the spi->mode bits understood by this driver: */
1153 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1154 master->setup = tegra_slink_setup;
1155 master->prepare_transfer_hardware = tegra_slink_prepare_transfer;
1156 master->transfer_one_message = tegra_slink_transfer_one_message;
1157 master->unprepare_transfer_hardware = tegra_slink_unprepare_transfer;
1158 master->num_chipselect = MAX_CHIP_SELECT;
1159 master->bus_num = -1;
1160
1161 dev_set_drvdata(&pdev->dev, master);
1162 tspi = spi_master_get_devdata(master);
1163 tspi->master = master;
1164 tspi->dma_req_sel = pdata->dma_req_sel;
1165 tspi->dev = &pdev->dev;
1166 tspi->chip_data = cdata;
1167 spin_lock_init(&tspi->lock);
1168
1169 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1170 if (!r) {
1171 dev_err(&pdev->dev, "No IO memory resource\n");
1172 ret = -ENODEV;
1173 goto exit_free_master;
1174 }
1175 tspi->phys = r->start;
1176 tspi->base = devm_request_and_ioremap(&pdev->dev, r);
1177 if (!tspi->base) {
1178 dev_err(&pdev->dev,
1179 "Cannot request memregion/iomap dma address\n");
1180 ret = -EADDRNOTAVAIL;
1181 goto exit_free_master;
1182 }
1183
1184 spi_irq = platform_get_irq(pdev, 0);
1185 tspi->irq = spi_irq;
1186 ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
1187 tegra_slink_isr_thread, IRQF_ONESHOT,
1188 dev_name(&pdev->dev), tspi);
1189 if (ret < 0) {
1190 dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
1191 tspi->irq);
1192 goto exit_free_master;
1193 }
1194
1195 tspi->clk = devm_clk_get(&pdev->dev, "slink");
1196 if (IS_ERR(tspi->clk)) {
1197 dev_err(&pdev->dev, "can not get clock\n");
1198 ret = PTR_ERR(tspi->clk);
1199 goto exit_free_irq;
1200 }
1201
1202 tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
1203 tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
1204 tspi->spi_max_frequency = pdata->spi_max_frequency;
1205
1206 if (pdata->dma_req_sel) {
1207 ret = tegra_slink_init_dma_param(tspi, true);
1208 if (ret < 0) {
1209 dev_err(&pdev->dev, "RxDma Init failed, err %d\n", ret);
1210 goto exit_free_irq;
1211 }
1212
1213 ret = tegra_slink_init_dma_param(tspi, false);
1214 if (ret < 0) {
1215 dev_err(&pdev->dev, "TxDma Init failed, err %d\n", ret);
1216 goto exit_rx_dma_free;
1217 }
1218 tspi->max_buf_size = tspi->dma_buf_size;
1219 init_completion(&tspi->tx_dma_complete);
1220 init_completion(&tspi->rx_dma_complete);
1221 }
1222
1223 init_completion(&tspi->xfer_completion);
1224
1225 pm_runtime_enable(&pdev->dev);
1226 if (!pm_runtime_enabled(&pdev->dev)) {
1227 ret = tegra_slink_runtime_resume(&pdev->dev);
1228 if (ret)
1229 goto exit_pm_disable;
1230 }
1231
1232 ret = pm_runtime_get_sync(&pdev->dev);
1233 if (ret < 0) {
1234 dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
1235 goto exit_pm_disable;
1236 }
1237 tspi->def_command_reg = SLINK_M_S;
1238 tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
1239 tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
1240 tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
1241 pm_runtime_put(&pdev->dev);
1242
1243 master->dev.of_node = pdev->dev.of_node;
1244 ret = spi_register_master(master);
1245 if (ret < 0) {
1246 dev_err(&pdev->dev, "can not register to master err %d\n", ret);
1247 goto exit_pm_disable;
1248 }
1249 return ret;
1250
1251exit_pm_disable:
1252 pm_runtime_disable(&pdev->dev);
1253 if (!pm_runtime_status_suspended(&pdev->dev))
1254 tegra_slink_runtime_suspend(&pdev->dev);
1255 tegra_slink_deinit_dma_param(tspi, false);
1256exit_rx_dma_free:
1257 tegra_slink_deinit_dma_param(tspi, true);
1258exit_free_irq:
1259 free_irq(spi_irq, tspi);
1260exit_free_master:
1261 spi_master_put(master);
1262 return ret;
1263}
1264
1265static int __devexit tegra_slink_remove(struct platform_device *pdev)
1266{
1267 struct spi_master *master = dev_get_drvdata(&pdev->dev);
1268 struct tegra_slink_data *tspi = spi_master_get_devdata(master);
1269
1270 free_irq(tspi->irq, tspi);
1271 spi_unregister_master(master);
1272
1273 if (tspi->tx_dma_chan)
1274 tegra_slink_deinit_dma_param(tspi, false);
1275
1276 if (tspi->rx_dma_chan)
1277 tegra_slink_deinit_dma_param(tspi, true);
1278
1279 pm_runtime_disable(&pdev->dev);
1280 if (!pm_runtime_status_suspended(&pdev->dev))
1281 tegra_slink_runtime_suspend(&pdev->dev);
1282
1283 return 0;
1284}
1285
1286#ifdef CONFIG_PM_SLEEP
1287static int tegra_slink_suspend(struct device *dev)
1288{
1289 struct spi_master *master = dev_get_drvdata(dev);
1290
1291 return spi_master_suspend(master);
1292}
1293
1294static int tegra_slink_resume(struct device *dev)
1295{
1296 struct spi_master *master = dev_get_drvdata(dev);
1297 struct tegra_slink_data *tspi = spi_master_get_devdata(master);
1298 int ret;
1299
1300 ret = pm_runtime_get_sync(dev);
1301 if (ret < 0) {
1302 dev_err(dev, "pm runtime failed, e = %d\n", ret);
1303 return ret;
1304 }
1305 tegra_slink_writel(tspi, tspi->command_reg, SLINK_COMMAND);
1306 tegra_slink_writel(tspi, tspi->command2_reg, SLINK_COMMAND2);
1307 pm_runtime_put(dev);
1308
1309 return spi_master_resume(master);
1310}
1311#endif
1312
1313static int tegra_slink_runtime_suspend(struct device *dev)
1314{
1315 struct spi_master *master = dev_get_drvdata(dev);
1316 struct tegra_slink_data *tspi = spi_master_get_devdata(master);
1317
1318 /* Flush all write which are in PPSB queue by reading back */
1319 tegra_slink_readl(tspi, SLINK_MAS_DATA);
1320
1321 clk_disable_unprepare(tspi->clk);
1322 return 0;
1323}
1324
1325static int tegra_slink_runtime_resume(struct device *dev)
1326{
1327 struct spi_master *master = dev_get_drvdata(dev);
1328 struct tegra_slink_data *tspi = spi_master_get_devdata(master);
1329 int ret;
1330
1331 ret = clk_prepare_enable(tspi->clk);
1332 if (ret < 0) {
1333 dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
1334 return ret;
1335 }
1336 return 0;
1337}
1338
1339static const struct dev_pm_ops slink_pm_ops = {
1340 SET_RUNTIME_PM_OPS(tegra_slink_runtime_suspend,
1341 tegra_slink_runtime_resume, NULL)
1342 SET_SYSTEM_SLEEP_PM_OPS(tegra_slink_suspend, tegra_slink_resume)
1343};
1344static struct platform_driver tegra_slink_driver = {
1345 .driver = {
1346 .name = "spi-tegra-slink",
1347 .owner = THIS_MODULE,
1348 .pm = &slink_pm_ops,
1349 .of_match_table = of_match_ptr(tegra_slink_of_match),
1350 },
1351 .probe = tegra_slink_probe,
1352 .remove = __devexit_p(tegra_slink_remove),
1353};
1354module_platform_driver(tegra_slink_driver);
1355
1356MODULE_ALIAS("platform:spi-tegra-slink");
1357MODULE_DESCRIPTION("NVIDIA Tegra20/Tegra30 SLINK Controller Driver");
1358MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1359MODULE_LICENSE("GPL v2");