aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi/spi-tegra.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
commitfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch)
treea57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/spi/spi-tegra.c
parent8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff)
Added missing tegra files.HEADmaster
Diffstat (limited to 'drivers/spi/spi-tegra.c')
-rw-r--r--drivers/spi/spi-tegra.c1666
1 files changed, 1666 insertions, 0 deletions
diff --git a/drivers/spi/spi-tegra.c b/drivers/spi/spi-tegra.c
new file mode 100644
index 00000000000..6810f611c55
--- /dev/null
+++ b/drivers/spi/spi-tegra.c
@@ -0,0 +1,1666 @@
1/*
2 * Driver for Nvidia TEGRA spi controller.
3 *
4 * Copyright (C) 2010 Google, Inc.
5 *
6 * Author:
7 * Erik Gilling <konkers@android.com>
8 *
9 * Copyright (C) 2010-2011 NVIDIA Corporation
10 *
11 * This software is licensed under the terms of the GNU General Public
12 * License version 2, as published by the Free Software Foundation, and
13 * may be copied, distributed, and modified under those terms.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 */
21
22/*#define DEBUG 1*/
23/*#define VERBOSE_DEBUG 1*/
24
25#include <linux/kernel.h>
26#include <linux/init.h>
27#include <linux/err.h>
28#include <linux/platform_device.h>
29#include <linux/io.h>
30#include <linux/dma-mapping.h>
31#include <linux/dmapool.h>
32#include <linux/clk.h>
33#include <linux/interrupt.h>
34#include <linux/delay.h>
35#include <linux/completion.h>
36#include <linux/kthread.h>
37#include <linux/pm_runtime.h>
38
39#include <linux/spi/spi.h>
40#include <linux/spi-tegra.h>
41
42#include <mach/dma.h>
43#include <mach/clk.h>
44
45#define SLINK_COMMAND 0x000
46#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
47#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5)
48#define SLINK_BOTH_EN (1 << 10)
49#define SLINK_CS_SW (1 << 11)
50#define SLINK_CS_VALUE (1 << 12)
51#define SLINK_CS_POLARITY (1 << 13)
52#define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16)
53#define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16)
54#define SLINK_IDLE_SDA_PULL_LOW (2 << 16)
55#define SLINK_IDLE_SDA_PULL_HIGH (3 << 16)
56#define SLINK_IDLE_SDA_MASK (3 << 16)
57#define SLINK_CS_POLARITY1 (1 << 20)
58#define SLINK_CK_SDA (1 << 21)
59#define SLINK_CS_POLARITY2 (1 << 22)
60#define SLINK_CS_POLARITY3 (1 << 23)
61#define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24)
62#define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24)
63#define SLINK_IDLE_SCLK_PULL_LOW (2 << 24)
64#define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24)
65#define SLINK_IDLE_SCLK_MASK (3 << 24)
66#define SLINK_M_S (1 << 28)
67#define SLINK_WAIT (1 << 29)
68#define SLINK_GO (1 << 30)
69#define SLINK_ENB (1 << 31)
70
71#define SLINK_COMMAND2 0x004
72#define SLINK_LSBFE (1 << 0)
73#define SLINK_SSOE (1 << 1)
74#define SLINK_SPIE (1 << 4)
75#define SLINK_BIDIROE (1 << 6)
76#define SLINK_MODFEN (1 << 7)
77#define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8)
78#define SLINK_CS_ACTIVE_BETWEEN (1 << 17)
79#define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18)
80#define SLINK_SS_SETUP(x) (((x) & 0x3) << 20)
81#define SLINK_FIFO_REFILLS_0 (0 << 22)
82#define SLINK_FIFO_REFILLS_1 (1 << 22)
83#define SLINK_FIFO_REFILLS_2 (2 << 22)
84#define SLINK_FIFO_REFILLS_3 (3 << 22)
85#define SLINK_FIFO_REFILLS_MASK (3 << 22)
86#define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26)
87#define SLINK_SPC0 (1 << 29)
88#define SLINK_TXEN (1 << 30)
89#define SLINK_RXEN (1 << 31)
90
91#define SLINK_STATUS 0x008
92#define SLINK_COUNT(val) (((val) >> 0) & 0x1f)
93#define SLINK_WORD(val) (((val) >> 5) & 0x1f)
94#define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff)
95#define SLINK_MODF (1 << 16)
96#define SLINK_RX_UNF (1 << 18)
97#define SLINK_TX_OVF (1 << 19)
98#define SLINK_TX_FULL (1 << 20)
99#define SLINK_TX_EMPTY (1 << 21)
100#define SLINK_RX_FULL (1 << 22)
101#define SLINK_RX_EMPTY (1 << 23)
102#define SLINK_TX_UNF (1 << 24)
103#define SLINK_RX_OVF (1 << 25)
104#define SLINK_TX_FLUSH (1 << 26)
105#define SLINK_RX_FLUSH (1 << 27)
106#define SLINK_SCLK (1 << 28)
107#define SLINK_ERR (1 << 29)
108#define SLINK_RDY (1 << 30)
109#define SLINK_BSY (1 << 31)
110
111#define SLINK_MAS_DATA 0x010
112#define SLINK_SLAVE_DATA 0x014
113
114#define SLINK_DMA_CTL 0x018
115#define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0)
116#define SLINK_TX_TRIG_1 (0 << 16)
117#define SLINK_TX_TRIG_4 (1 << 16)
118#define SLINK_TX_TRIG_8 (2 << 16)
119#define SLINK_TX_TRIG_16 (3 << 16)
120#define SLINK_TX_TRIG_MASK (3 << 16)
121#define SLINK_RX_TRIG_1 (0 << 18)
122#define SLINK_RX_TRIG_4 (1 << 18)
123#define SLINK_RX_TRIG_8 (2 << 18)
124#define SLINK_RX_TRIG_16 (3 << 18)
125#define SLINK_RX_TRIG_MASK (3 << 18)
126#define SLINK_PACKED (1 << 20)
127#define SLINK_PACK_SIZE_4 (0 << 21)
128#define SLINK_PACK_SIZE_8 (1 << 21)
129#define SLINK_PACK_SIZE_16 (2 << 21)
130#define SLINK_PACK_SIZE_32 (3 << 21)
131#define SLINK_PACK_SIZE_MASK (3 << 21)
132#define SLINK_IE_TXC (1 << 26)
133#define SLINK_IE_RXC (1 << 27)
134#define SLINK_DMA_EN (1 << 31)
135
136#define SLINK_STATUS2 0x01c
137#define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0)
138#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f0000) >> 16)
139#define SLINK_SS_HOLD_TIME(val) (((val) & 0xF) << 6)
140
141#define SLINK_TX_FIFO 0x100
142#define SLINK_RX_FIFO 0x180
143
144#define DATA_DIR_TX (1 << 0)
145#define DATA_DIR_RX (1 << 1)
146
147#define SPI_FIFO_DEPTH 32
148#define SLINK_DMA_TIMEOUT (msecs_to_jiffies(1000))
149
150
151static const unsigned long spi_tegra_req_sels[] = {
152 TEGRA_DMA_REQ_SEL_SL2B1,
153 TEGRA_DMA_REQ_SEL_SL2B2,
154 TEGRA_DMA_REQ_SEL_SL2B3,
155 TEGRA_DMA_REQ_SEL_SL2B4,
156#ifndef CONFIG_ARCH_TEGRA_2x_SOC
157 TEGRA_DMA_REQ_SEL_SL2B5,
158 TEGRA_DMA_REQ_SEL_SL2B6,
159#endif
160
161};
162
163#define DEFAULT_SPI_DMA_BUF_LEN (16*1024)
164#define TX_FIFO_EMPTY_COUNT_MAX SLINK_TX_FIFO_EMPTY_COUNT(0x20)
165#define RX_FIFO_FULL_COUNT_ZERO SLINK_RX_FIFO_FULL_COUNT(0)
166
167#define SLINK_STATUS2_RESET \
168 (TX_FIFO_EMPTY_COUNT_MAX | \
169 RX_FIFO_FULL_COUNT_ZERO << 16)
170
171#define MAX_CHIP_SELECT 4
172#define SLINK_FIFO_DEPTH 4
173
174struct spi_tegra_data {
175 struct spi_master *master;
176 struct platform_device *pdev;
177 spinlock_t lock;
178 char port_name[32];
179
180 struct clk *clk;
181 struct clk *sclk;
182 void __iomem *base;
183 phys_addr_t phys;
184 unsigned irq;
185
186 u32 cur_speed;
187
188 struct list_head queue;
189 struct spi_transfer *cur;
190 struct spi_device *cur_spi;
191 unsigned cur_pos;
192 unsigned cur_len;
193 unsigned words_per_32bit;
194 unsigned bytes_per_word;
195 unsigned curr_dma_words;
196
197 unsigned cur_direction;
198
199 bool is_dma_allowed;
200
201 struct tegra_dma_req rx_dma_req;
202 struct tegra_dma_channel *rx_dma;
203 u32 *rx_buf;
204 dma_addr_t rx_buf_phys;
205 unsigned cur_rx_pos;
206
207 struct tegra_dma_req tx_dma_req;
208 struct tegra_dma_channel *tx_dma;
209 u32 *tx_buf;
210 dma_addr_t tx_buf_phys;
211 unsigned cur_tx_pos;
212
213 unsigned dma_buf_size;
214 unsigned max_buf_size;
215 bool is_curr_dma_xfer;
216
217 bool is_clkon_always;
218 bool clk_state;
219 bool is_suspended;
220
221 bool is_hw_based_cs;
222
223 struct completion rx_dma_complete;
224 struct completion tx_dma_complete;
225 bool is_transfer_in_progress;
226
227 u32 rx_complete;
228 u32 tx_complete;
229 u32 tx_status;
230 u32 rx_status;
231 u32 status_reg;
232 bool is_packed;
233 unsigned long packed_size;
234
235 u32 command_reg;
236 u32 command2_reg;
237 u32 dma_control_reg;
238 u32 def_command_reg;
239 u32 def_command2_reg;
240
241 struct spi_clk_parent *parent_clk_list;
242 int parent_clk_count;
243 unsigned long max_rate;
244 unsigned long max_parent_rate;
245 int min_div;
246 struct workqueue_struct *spi_workqueue;
247 struct work_struct spi_transfer_work;
248};
249
250static inline unsigned long spi_tegra_readl(struct spi_tegra_data *tspi,
251 unsigned long reg)
252{
253 if (!tspi->clk_state)
254 BUG();
255 return readl(tspi->base + reg);
256}
257
258static inline void spi_tegra_writel(struct spi_tegra_data *tspi,
259 unsigned long val, unsigned long reg)
260{
261 if (!tspi->clk_state)
262 BUG();
263 writel(val, tspi->base + reg);
264}
265
266static void spi_tegra_clear_status(struct spi_tegra_data *tspi)
267{
268 unsigned long val;
269 unsigned long val_write = 0;
270
271 val = spi_tegra_readl(tspi, SLINK_STATUS);
272
273 val_write = SLINK_RDY;
274 if (val & SLINK_TX_OVF)
275 val_write |= SLINK_TX_OVF;
276 if (val & SLINK_RX_OVF)
277 val_write |= SLINK_RX_OVF;
278 if (val & SLINK_RX_UNF)
279 val_write |= SLINK_RX_UNF;
280 if (val & SLINK_TX_UNF)
281 val_write |= SLINK_TX_UNF;
282
283 spi_tegra_writel(tspi, val_write, SLINK_STATUS);
284}
285
286static unsigned long spi_tegra_get_packed_size(struct spi_tegra_data *tspi,
287 struct spi_transfer *t)
288{
289 unsigned long val;
290
291 switch (tspi->bytes_per_word) {
292 case 0:
293 val = SLINK_PACK_SIZE_4;
294 break;
295 case 1:
296 val = SLINK_PACK_SIZE_8;
297 break;
298 case 2:
299 val = SLINK_PACK_SIZE_16;
300 break;
301 case 4:
302 val = SLINK_PACK_SIZE_32;
303 break;
304 default:
305 val = 0;
306 }
307 return val;
308}
309
310static unsigned spi_tegra_calculate_curr_xfer_param(
311 struct spi_device *spi, struct spi_tegra_data *tspi,
312 struct spi_transfer *t)
313{
314 unsigned remain_len = t->len - tspi->cur_pos;
315 unsigned max_word;
316 unsigned bits_per_word ;
317 unsigned max_len;
318 unsigned total_fifo_words;
319
320 bits_per_word = t->bits_per_word ? t->bits_per_word :
321 spi->bits_per_word;
322 tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1;
323
324 if (bits_per_word == 8 || bits_per_word == 16) {
325 tspi->is_packed = 1;
326 tspi->words_per_32bit = 32/bits_per_word;
327 } else {
328 tspi->is_packed = 0;
329 tspi->words_per_32bit = 1;
330 }
331 tspi->packed_size = spi_tegra_get_packed_size(tspi, t);
332
333 if (tspi->is_packed) {
334 max_len = min(remain_len, tspi->max_buf_size);
335 tspi->curr_dma_words = max_len/tspi->bytes_per_word;
336 total_fifo_words = remain_len/4;
337 } else {
338 max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
339 max_word = min(max_word, tspi->max_buf_size/4);
340 tspi->curr_dma_words = max_word;
341 total_fifo_words = remain_len/tspi->bytes_per_word;
342 }
343 return total_fifo_words;
344}
345
346static unsigned spi_tegra_fill_tx_fifo_from_client_txbuf(
347 struct spi_tegra_data *tspi, struct spi_transfer *t)
348{
349 unsigned nbytes;
350 unsigned tx_empty_count;
351 unsigned long fifo_status;
352 u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
353 unsigned max_n_32bit;
354 unsigned i, count;
355 unsigned long x;
356 unsigned int written_words;
357
358 fifo_status = spi_tegra_readl(tspi, SLINK_STATUS2);
359 tx_empty_count = SLINK_TX_FIFO_EMPTY_COUNT(fifo_status);
360
361 if (tspi->is_packed) {
362 nbytes = tspi->curr_dma_words * tspi->bytes_per_word;
363 max_n_32bit = (min(nbytes, tx_empty_count*4) - 1)/4 + 1;
364 for (count = 0; count < max_n_32bit; ++count) {
365 x = 0;
366 for (i = 0; (i < 4) && nbytes; i++, nbytes--)
367 x |= (*tx_buf++) << (i*8);
368 spi_tegra_writel(tspi, x, SLINK_TX_FIFO);
369 }
370 written_words = min(max_n_32bit * tspi->words_per_32bit,
371 tspi->curr_dma_words);
372 } else {
373 max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
374 nbytes = max_n_32bit * tspi->bytes_per_word;
375 for (count = 0; count < max_n_32bit; ++count) {
376 x = 0;
377 for (i = 0; nbytes && (i < tspi->bytes_per_word);
378 ++i, nbytes--)
379 x |= ((*tx_buf++) << i*8);
380 spi_tegra_writel(tspi, x, SLINK_TX_FIFO);
381 }
382 written_words = max_n_32bit;
383 }
384 tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
385 return written_words;
386}
387
388static unsigned int spi_tegra_read_rx_fifo_to_client_rxbuf(
389 struct spi_tegra_data *tspi, struct spi_transfer *t)
390{
391 unsigned rx_full_count;
392 unsigned long fifo_status;
393 u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
394 unsigned i, count;
395 unsigned long x;
396 unsigned int read_words = 0;
397 unsigned len;
398
399 fifo_status = spi_tegra_readl(tspi, SLINK_STATUS2);
400 rx_full_count = SLINK_RX_FIFO_FULL_COUNT(fifo_status);
401 dev_dbg(&tspi->pdev->dev, "Rx fifo count %d\n", rx_full_count);
402 if (tspi->is_packed) {
403 len = tspi->curr_dma_words * tspi->bytes_per_word;
404 for (count = 0; count < rx_full_count; ++count) {
405 x = spi_tegra_readl(tspi, SLINK_RX_FIFO);
406 for (i = 0; len && (i < 4); ++i, len--)
407 *rx_buf++ = (x >> i*8) & 0xFF;
408 }
409 tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
410 read_words += tspi->curr_dma_words;
411 } else {
412 unsigned int rx_mask, bits_per_word;
413
414 bits_per_word = t->bits_per_word ? t->bits_per_word :
415 tspi->cur_spi->bits_per_word;
416 rx_mask = (1 << bits_per_word) - 1;
417 for (count = 0; count < rx_full_count; ++count) {
418 x = spi_tegra_readl(tspi, SLINK_RX_FIFO);
419 x &= rx_mask;
420 for (i = 0; (i < tspi->bytes_per_word); ++i)
421 *rx_buf++ = (x >> (i*8)) & 0xFF;
422 }
423 tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
424 read_words += rx_full_count;
425 }
426 return read_words;
427}
428
429static void spi_tegra_copy_client_txbuf_to_spi_txbuf(
430 struct spi_tegra_data *tspi, struct spi_transfer *t)
431{
432 unsigned len;
433
434 /* Make the dma buffer to read by cpu */
435 dma_sync_single_for_cpu(&tspi->pdev->dev, tspi->tx_buf_phys,
436 tspi->dma_buf_size, DMA_FROM_DEVICE);
437 if (tspi->is_packed) {
438 len = tspi->curr_dma_words * tspi->bytes_per_word;
439 memcpy(tspi->tx_buf, t->tx_buf + tspi->cur_pos, len);
440 } else {
441 unsigned int i;
442 unsigned int count;
443 u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
444 unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
445 unsigned int x;
446
447 for (count = 0; count < tspi->curr_dma_words; ++count) {
448 x = 0;
449 for (i = 0; consume && (i < tspi->bytes_per_word);
450 ++i, consume--)
451 x |= ((*tx_buf++) << i*8);
452 tspi->tx_buf[count] = x;
453 }
454 }
455 tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
456 /* Make the dma buffer to read by dma */
457 dma_sync_single_for_device(&tspi->pdev->dev, tspi->tx_buf_phys,
458 tspi->dma_buf_size, DMA_TO_DEVICE);
459}
460
461static void spi_tegra_copy_spi_rxbuf_to_client_rxbuf(
462 struct spi_tegra_data *tspi, struct spi_transfer *t)
463{
464 unsigned len;
465
466 /* Make the dma buffer to read by cpu */
467 dma_sync_single_for_cpu(&tspi->pdev->dev, tspi->rx_buf_phys,
468 tspi->dma_buf_size, DMA_FROM_DEVICE);
469
470 if (tspi->is_packed) {
471 len = tspi->curr_dma_words * tspi->bytes_per_word;
472 memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_buf, len);
473 } else {
474 unsigned int i;
475 unsigned int count;
476 unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
477 unsigned int x;
478 unsigned int rx_mask, bits_per_word;
479
480 bits_per_word = t->bits_per_word ? t->bits_per_word :
481 tspi->cur_spi->bits_per_word;
482 rx_mask = (1 << bits_per_word) - 1;
483 for (count = 0; count < tspi->curr_dma_words; ++count) {
484 x = tspi->rx_buf[count];
485 x &= rx_mask;
486 for (i = 0; (i < tspi->bytes_per_word); ++i)
487 *rx_buf++ = (x >> (i*8)) & 0xFF;
488 }
489 }
490 tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
491
492 /* Make the dma buffer to read by dma */
493 dma_sync_single_for_device(&tspi->pdev->dev, tspi->rx_buf_phys,
494 tspi->dma_buf_size, DMA_TO_DEVICE);
495}
496
497static int spi_tegra_start_dma_based_transfer(
498 struct spi_tegra_data *tspi, struct spi_transfer *t)
499{
500 unsigned long val;
501 unsigned long test_val;
502 unsigned int len;
503 int ret = 0;
504
505 INIT_COMPLETION(tspi->rx_dma_complete);
506 INIT_COMPLETION(tspi->tx_dma_complete);
507
508 /* Make sure that Rx and Tx fifo are empty */
509 test_val = spi_tegra_readl(tspi, SLINK_STATUS);
510 if (((test_val >> 20) & 0xF) != 0xA)
511 dev_err(&tspi->pdev->dev,
512 "The Rx and Tx fifo are not empty status 0x%08lx\n",
513 test_val);
514
515 val = SLINK_DMA_BLOCK_SIZE(tspi->curr_dma_words - 1);
516 val |= tspi->packed_size;
517 if (tspi->is_packed)
518 len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
519 4) * 4;
520 else
521 len = tspi->curr_dma_words * 4;
522
523 if (len & 0xF)
524 val |= SLINK_TX_TRIG_1 | SLINK_RX_TRIG_1;
525 else if (((len) >> 4) & 0x1)
526 val |= SLINK_TX_TRIG_4 | SLINK_RX_TRIG_4;
527 else
528 val |= SLINK_TX_TRIG_8 | SLINK_RX_TRIG_8;
529
530 if (tspi->cur_direction & DATA_DIR_TX)
531 val |= SLINK_IE_TXC;
532
533 if (tspi->cur_direction & DATA_DIR_RX)
534 val |= SLINK_IE_RXC;
535
536 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
537 tspi->dma_control_reg = val;
538
539 if (tspi->cur_direction & DATA_DIR_TX) {
540 spi_tegra_copy_client_txbuf_to_spi_txbuf(tspi, t);
541 wmb();
542 /* Make the dma buffer to read by dma */
543 dma_sync_single_for_device(&tspi->pdev->dev, tspi->tx_buf_phys,
544 tspi->dma_buf_size, DMA_TO_DEVICE);
545 tspi->tx_dma_req.size = len;
546 ret = tegra_dma_enqueue_req(tspi->tx_dma, &tspi->tx_dma_req);
547 if (ret < 0) {
548 dev_err(&tspi->pdev->dev,
549 "Error in starting tx dma error = %d\n", ret);
550 return ret;
551 }
552
553 /* Wait for tx fifo to be fill before starting slink */
554 test_val = spi_tegra_readl(tspi, SLINK_STATUS);
555 while (!(test_val & SLINK_TX_FULL))
556 test_val = spi_tegra_readl(tspi, SLINK_STATUS);
557 }
558
559 if (tspi->cur_direction & DATA_DIR_RX) {
560 /* Make the dma buffer to read by dma */
561 dma_sync_single_for_device(&tspi->pdev->dev, tspi->rx_buf_phys,
562 tspi->dma_buf_size, DMA_TO_DEVICE);
563 tspi->rx_dma_req.size = len;
564 ret = tegra_dma_enqueue_req(tspi->rx_dma, &tspi->rx_dma_req);
565 if (ret < 0) {
566 dev_err(&tspi->pdev->dev,
567 "Error in starting rx dma error = %d\n", ret);
568 if (tspi->cur_direction & DATA_DIR_TX)
569 tegra_dma_dequeue_req(tspi->tx_dma,
570 &tspi->tx_dma_req);
571 return ret;
572 }
573 }
574 tspi->is_curr_dma_xfer = true;
575 if (tspi->is_packed) {
576 val |= SLINK_PACKED;
577 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
578 udelay(1);
579 wmb();
580 }
581
582 val |= SLINK_DMA_EN;
583 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
584 return ret;
585}
586
587static int spi_tegra_start_cpu_based_transfer(
588 struct spi_tegra_data *tspi, struct spi_transfer *t)
589{
590 unsigned long val;
591 unsigned curr_words;
592
593 val = tspi->packed_size;
594 if (tspi->cur_direction & DATA_DIR_TX)
595 val |= SLINK_IE_TXC;
596
597 if (tspi->cur_direction & DATA_DIR_RX)
598 val |= SLINK_IE_RXC;
599
600 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
601 tspi->dma_control_reg = val;
602
603 if (tspi->cur_direction & DATA_DIR_TX)
604 curr_words = spi_tegra_fill_tx_fifo_from_client_txbuf(tspi, t);
605 else
606 curr_words = tspi->curr_dma_words;
607 val |= SLINK_DMA_BLOCK_SIZE(curr_words - 1);
608 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
609 tspi->dma_control_reg = val;
610
611 tspi->is_curr_dma_xfer = false;
612 if (tspi->is_packed) {
613 val |= SLINK_PACKED;
614 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
615 udelay(1);
616 wmb();
617 }
618 val |= SLINK_DMA_EN;
619 spi_tegra_writel(tspi, val, SLINK_DMA_CTL);
620 return 0;
621}
622
623static void set_best_clk_source(struct spi_tegra_data *tspi,
624 unsigned long speed)
625{
626 long new_rate;
627 unsigned long err_rate;
628 int rate = speed * 4;
629 unsigned int fin_err = speed * 4;
630 int final_index = -1;
631 int count;
632 int ret;
633 struct clk *pclk;
634 unsigned long prate, crate, nrate;
635 unsigned long cdiv;
636
637 if (!tspi->parent_clk_count || !tspi->parent_clk_list)
638 return;
639
640 /* make sure divisor is more than min_div */
641 pclk = clk_get_parent(tspi->clk);
642 prate = clk_get_rate(pclk);
643 crate = clk_get_rate(tspi->clk);
644 cdiv = DIV_ROUND_UP(prate, crate);
645 if (cdiv < tspi->min_div) {
646 nrate = DIV_ROUND_UP(prate, tspi->min_div);
647 clk_set_rate(tspi->clk, nrate);
648 }
649
650 for (count = 0; count < tspi->parent_clk_count; ++count) {
651 if (!tspi->parent_clk_list[count].parent_clk)
652 continue;
653 ret = clk_set_parent(tspi->clk,
654 tspi->parent_clk_list[count].parent_clk);
655 if (ret < 0) {
656 dev_warn(&tspi->pdev->dev,
657 "Error in setting parent clk src %s\n",
658 tspi->parent_clk_list[count].name);
659 continue;
660 }
661
662 new_rate = clk_round_rate(tspi->clk, rate);
663 if (new_rate < 0)
664 continue;
665
666 err_rate = abs(new_rate - rate);
667 if (err_rate < fin_err) {
668 final_index = count;
669 fin_err = err_rate;
670 }
671 }
672
673 if (final_index >= 0) {
674 dev_info(&tspi->pdev->dev, "Setting clk_src %s\n",
675 tspi->parent_clk_list[final_index].name);
676 clk_set_parent(tspi->clk,
677 tspi->parent_clk_list[final_index].parent_clk);
678 }
679}
680
681static void spi_tegra_start_transfer(struct spi_device *spi,
682 struct spi_transfer *t, bool is_first_of_msg,
683 bool is_single_xfer)
684{
685 struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
686 u32 speed;
687 u8 bits_per_word;
688 unsigned total_fifo_words;
689 int ret;
690 struct tegra_spi_device_controller_data *cdata = spi->controller_data;
691 unsigned long command;
692 unsigned long command2;
693#ifndef CONFIG_ARCH_TEGRA_2x_SOC
694 unsigned long status2;
695#endif
696 int cs_setup_count;
697 int cs_hold_count;
698
699 unsigned int cs_pol_bit[] = {
700 SLINK_CS_POLARITY,
701 SLINK_CS_POLARITY1,
702 SLINK_CS_POLARITY2,
703 SLINK_CS_POLARITY3,
704 };
705
706 bits_per_word = t->bits_per_word ? t->bits_per_word :
707 spi->bits_per_word;
708
709 speed = t->speed_hz ? t->speed_hz : spi->max_speed_hz;
710 if (speed != tspi->cur_speed) {
711 set_best_clk_source(tspi, speed);
712 clk_set_rate(tspi->clk, speed * 4);
713 tspi->cur_speed = speed;
714 }
715
716 tspi->cur = t;
717 tspi->cur_spi = spi;
718 tspi->cur_pos = 0;
719 tspi->cur_rx_pos = 0;
720 tspi->cur_tx_pos = 0;
721 tspi->rx_complete = 0;
722 tspi->tx_complete = 0;
723 total_fifo_words = spi_tegra_calculate_curr_xfer_param(spi, tspi, t);
724
725 command2 = tspi->def_command2_reg;
726 if (is_first_of_msg) {
727 if (!tspi->is_clkon_always) {
728 if (!tspi->clk_state) {
729 pm_runtime_get_sync(&tspi->pdev->dev);
730 tspi->clk_state = 1;
731 }
732 }
733
734 spi_tegra_clear_status(tspi);
735
736 command = tspi->def_command_reg;
737 command |= SLINK_BIT_LENGTH(bits_per_word - 1);
738
739 /* possibly use the hw based chip select */
740 tspi->is_hw_based_cs = false;
741 if (cdata && cdata->is_hw_based_cs && is_single_xfer) {
742 if ((tspi->curr_dma_words * tspi->bytes_per_word) ==
743 (t->len - tspi->cur_pos)) {
744 cs_setup_count = cdata->cs_setup_clk_count >> 1;
745 if (cs_setup_count > 3)
746 cs_setup_count = 3;
747 cs_hold_count = cdata->cs_hold_clk_count;
748 if (cs_hold_count > 0xF)
749 cs_hold_count = 0xF;
750 tspi->is_hw_based_cs = true;
751
752 command &= ~SLINK_CS_SW;
753 command2 &= ~SLINK_SS_SETUP(3);
754 command2 |= SLINK_SS_SETUP(cs_setup_count);
755#ifndef CONFIG_ARCH_TEGRA_2x_SOC
756 status2 = spi_tegra_readl(tspi, SLINK_STATUS2);
757 status2 &= ~SLINK_SS_HOLD_TIME(0xF);
758 status2 |= SLINK_SS_HOLD_TIME(cs_hold_count);
759 spi_tegra_writel(tspi, status2, SLINK_STATUS2);
760#endif
761 }
762 }
763 if (!tspi->is_hw_based_cs) {
764 command |= SLINK_CS_SW;
765 command ^= cs_pol_bit[spi->chip_select];
766 }
767
768 command &= ~SLINK_IDLE_SCLK_MASK & ~SLINK_CK_SDA;
769 if (spi->mode & SPI_CPHA)
770 command |= SLINK_CK_SDA;
771
772 if (spi->mode & SPI_CPOL)
773 command |= SLINK_IDLE_SCLK_DRIVE_HIGH;
774 else
775 command |= SLINK_IDLE_SCLK_DRIVE_LOW;
776 } else {
777 command = tspi->command_reg;
778 command &= ~SLINK_BIT_LENGTH(~0);
779 command |= SLINK_BIT_LENGTH(bits_per_word - 1);
780 }
781
782 spi_tegra_writel(tspi, command, SLINK_COMMAND);
783 tspi->command_reg = command;
784
785 dev_dbg(&tspi->pdev->dev, "The def 0x%x and written 0x%lx\n",
786 tspi->def_command_reg, command);
787
788 command2 &= ~(SLINK_SS_EN_CS(~0) | SLINK_RXEN | SLINK_TXEN);
789 tspi->cur_direction = 0;
790 if (t->rx_buf) {
791 command2 |= SLINK_RXEN;
792 tspi->cur_direction |= DATA_DIR_RX;
793 }
794 if (t->tx_buf) {
795 command2 |= SLINK_TXEN;
796 tspi->cur_direction |= DATA_DIR_TX;
797 }
798 command2 |= SLINK_SS_EN_CS(spi->chip_select);
799 spi_tegra_writel(tspi, command2, SLINK_COMMAND2);
800 tspi->command2_reg = command2;
801
802 if (total_fifo_words > SPI_FIFO_DEPTH)
803 ret = spi_tegra_start_dma_based_transfer(tspi, t);
804 else
805 ret = spi_tegra_start_cpu_based_transfer(tspi, t);
806 WARN_ON(ret < 0);
807}
808
809static int spi_tegra_setup(struct spi_device *spi)
810{
811 struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
812 unsigned long cs_bit;
813 unsigned long val;
814 unsigned long flags;
815
816 dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
817 spi->bits_per_word,
818 spi->mode & SPI_CPOL ? "" : "~",
819 spi->mode & SPI_CPHA ? "" : "~",
820 spi->max_speed_hz);
821
822 BUG_ON(spi->chip_select >= MAX_CHIP_SELECT);
823 switch (spi->chip_select) {
824 case 0:
825 cs_bit = SLINK_CS_POLARITY;
826 break;
827
828 case 1:
829 cs_bit = SLINK_CS_POLARITY1;
830 break;
831
832 case 2:
833 cs_bit = SLINK_CS_POLARITY2;
834 break;
835
836 case 3:
837 cs_bit = SLINK_CS_POLARITY3;
838 break;
839
840 default:
841 return -EINVAL;
842 }
843
844 spin_lock_irqsave(&tspi->lock, flags);
845 val = tspi->def_command_reg;
846 if (spi->mode & SPI_CS_HIGH)
847 val |= cs_bit;
848 else
849 val &= ~cs_bit;
850 tspi->def_command_reg = val;
851
852 if (!tspi->is_clkon_always && !tspi->clk_state) {
853 spin_unlock_irqrestore(&tspi->lock, flags);
854 pm_runtime_get_sync(&tspi->pdev->dev);
855 spin_lock_irqsave(&tspi->lock, flags);
856 tspi->clk_state = 1;
857 }
858 spi_tegra_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
859 if (!tspi->is_clkon_always && tspi->clk_state) {
860 tspi->clk_state = 0;
861 spin_unlock_irqrestore(&tspi->lock, flags);
862 pm_runtime_put_sync(&tspi->pdev->dev);
863 } else
864 spin_unlock_irqrestore(&tspi->lock, flags);
865 return 0;
866}
867
868static void tegra_spi_transfer_work(struct work_struct *work)
869{
870 struct spi_tegra_data *tspi;
871 struct spi_device *spi;
872 struct spi_message *m;
873 struct spi_transfer *t;
874 int single_xfer = 0;
875 unsigned long flags;
876
877 tspi = container_of(work, struct spi_tegra_data, spi_transfer_work);
878
879 spin_lock_irqsave(&tspi->lock, flags);
880
881 if (tspi->is_transfer_in_progress || tspi->is_suspended) {
882 spin_unlock_irqrestore(&tspi->lock, flags);
883 return;
884 }
885 if (list_empty(&tspi->queue)) {
886 spin_unlock_irqrestore(&tspi->lock, flags);
887 return;
888 }
889
890 m = list_first_entry(&tspi->queue, struct spi_message, queue);
891 spi = m->state;
892 single_xfer = list_is_singular(&m->transfers);
893 m->actual_length = 0;
894 m->status = 0;
895 t = list_first_entry(&m->transfers, struct spi_transfer, transfer_list);
896 tspi->is_transfer_in_progress = true;
897
898 spin_unlock_irqrestore(&tspi->lock, flags);
899 spi_tegra_start_transfer(spi, t, true, single_xfer);
900}
901
902static int spi_tegra_transfer(struct spi_device *spi, struct spi_message *m)
903{
904 struct spi_tegra_data *tspi = spi_master_get_devdata(spi->master);
905 struct spi_transfer *t;
906 unsigned long flags;
907 int was_empty;
908 int bytes_per_word;
909
910 if (list_empty(&m->transfers) || !m->complete)
911 return -EINVAL;
912
913 list_for_each_entry(t, &m->transfers, transfer_list) {
914 if (t->bits_per_word < 0 || t->bits_per_word > 32)
915 return -EINVAL;
916
917 if (t->len == 0)
918 return -EINVAL;
919
920 /* Check that the all words are available */
921 if (t->bits_per_word)
922 bytes_per_word = (t->bits_per_word + 7)/8;
923 else
924 bytes_per_word = (spi->bits_per_word + 7)/8;
925
926 if (t->len % bytes_per_word != 0)
927 return -EINVAL;
928
929 if (!t->rx_buf && !t->tx_buf)
930 return -EINVAL;
931 }
932
933 spin_lock_irqsave(&tspi->lock, flags);
934
935 if (WARN_ON(tspi->is_suspended)) {
936 spin_unlock_irqrestore(&tspi->lock, flags);
937 return -EBUSY;
938 }
939
940 m->state = spi;
941 was_empty = list_empty(&tspi->queue);
942 list_add_tail(&m->queue, &tspi->queue);
943 if (was_empty)
944 queue_work(tspi->spi_workqueue, &tspi->spi_transfer_work);
945
946 spin_unlock_irqrestore(&tspi->lock, flags);
947 return 0;
948}
949
950static void spi_tegra_curr_transfer_complete(struct spi_tegra_data *tspi,
951 unsigned err, unsigned cur_xfer_size, unsigned long *irq_flags)
952{
953 struct spi_message *m;
954 struct spi_device *spi;
955 struct spi_transfer *t;
956 int single_xfer = 0;
957
958 /* Check if CS need to be toggele here */
959 if (tspi->cur && tspi->cur->cs_change &&
960 tspi->cur->delay_usecs) {
961 udelay(tspi->cur->delay_usecs);
962 }
963
964 m = list_first_entry(&tspi->queue, struct spi_message, queue);
965 if (err)
966 m->status = -EIO;
967 spi = m->state;
968
969 m->actual_length += cur_xfer_size;
970
971 if (!list_is_last(&tspi->cur->transfer_list, &m->transfers)) {
972 tspi->cur = list_first_entry(&tspi->cur->transfer_list,
973 struct spi_transfer, transfer_list);
974 spin_unlock_irqrestore(&tspi->lock, *irq_flags);
975 spi_tegra_start_transfer(spi, tspi->cur, false, 0);
976 spin_lock_irqsave(&tspi->lock, *irq_flags);
977 } else {
978 list_del(&m->queue);
979 m->complete(m->context);
980 if (!list_empty(&tspi->queue)) {
981 if (tspi->is_suspended) {
982 spi_tegra_writel(tspi, tspi->def_command_reg,
983 SLINK_COMMAND);
984 spi_tegra_writel(tspi, tspi->def_command2_reg,
985 SLINK_COMMAND2);
986 tspi->is_transfer_in_progress = false;
987 return;
988 }
989 m = list_first_entry(&tspi->queue, struct spi_message,
990 queue);
991 spi = m->state;
992 single_xfer = list_is_singular(&m->transfers);
993 m->actual_length = 0;
994 m->status = 0;
995
996 t = list_first_entry(&m->transfers, struct spi_transfer,
997 transfer_list);
998 spin_unlock_irqrestore(&tspi->lock, *irq_flags);
999 spi_tegra_start_transfer(spi, t, true, single_xfer);
1000 spin_lock_irqsave(&tspi->lock, *irq_flags);
1001 } else {
1002 spi_tegra_writel(tspi, tspi->def_command_reg,
1003 SLINK_COMMAND);
1004 spi_tegra_writel(tspi, tspi->def_command2_reg,
1005 SLINK_COMMAND2);
1006 if (!tspi->is_clkon_always) {
1007 if (tspi->clk_state) {
1008 /* Provide delay to stablize the signal
1009 state */
1010 spin_unlock_irqrestore(&tspi->lock,
1011 *irq_flags);
1012 udelay(10);
1013 pm_runtime_put_sync(&tspi->pdev->dev);
1014 spin_lock_irqsave(&tspi->lock,
1015 *irq_flags);
1016 tspi->clk_state = 0;
1017 }
1018 }
1019 tspi->is_transfer_in_progress = false;
1020 /* Check if any new request has come between
1021 * clock disable */
1022 queue_work(tspi->spi_workqueue,
1023 &tspi->spi_transfer_work);
1024 }
1025 }
1026 return;
1027}
1028
1029static void tegra_spi_tx_dma_complete(struct tegra_dma_req *req)
1030{
1031 struct spi_tegra_data *tspi = req->dev;
1032 complete(&tspi->tx_dma_complete);
1033}
1034
1035static void tegra_spi_rx_dma_complete(struct tegra_dma_req *req)
1036{
1037 struct spi_tegra_data *tspi = req->dev;
1038 complete(&tspi->rx_dma_complete);
1039}
1040
1041static void handle_cpu_based_xfer(void *context_data)
1042{
1043 struct spi_tegra_data *tspi = context_data;
1044 struct spi_transfer *t = tspi->cur;
1045 unsigned long flags;
1046
1047 spin_lock_irqsave(&tspi->lock, flags);
1048 if (tspi->tx_status || tspi->rx_status ||
1049 (tspi->status_reg & SLINK_BSY)) {
1050 dev_err(&tspi->pdev->dev, "%s ERROR bit set 0x%x\n",
1051 __func__, tspi->status_reg);
1052 tegra_periph_reset_assert(tspi->clk);
1053 udelay(2);
1054 tegra_periph_reset_deassert(tspi->clk);
1055 WARN_ON(1);
1056 spi_tegra_curr_transfer_complete(tspi,
1057 tspi->tx_status || tspi->rx_status, t->len, &flags);
1058 goto exit;
1059 }
1060
1061 dev_vdbg(&tspi->pdev->dev, "Current direction %x\n",
1062 tspi->cur_direction);
1063 if (tspi->cur_direction & DATA_DIR_RX)
1064 spi_tegra_read_rx_fifo_to_client_rxbuf(tspi, t);
1065
1066 if (tspi->cur_direction & DATA_DIR_TX)
1067 tspi->cur_pos = tspi->cur_tx_pos;
1068 else if (tspi->cur_direction & DATA_DIR_RX)
1069 tspi->cur_pos = tspi->cur_rx_pos;
1070 else
1071 WARN_ON(1);
1072
1073 dev_vdbg(&tspi->pdev->dev,
1074 "current position %d and length of the transfer %d\n",
1075 tspi->cur_pos, t->len);
1076 if (tspi->cur_pos == t->len) {
1077 spi_tegra_curr_transfer_complete(tspi,
1078 tspi->tx_status || tspi->rx_status, t->len, &flags);
1079 goto exit;
1080 }
1081
1082 spi_tegra_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
1083 spi_tegra_start_cpu_based_transfer(tspi, t);
1084exit:
1085 spin_unlock_irqrestore(&tspi->lock, flags);
1086 return;
1087}
1088
1089static irqreturn_t spi_tegra_isr_thread(int irq, void *context_data)
1090{
1091 struct spi_tegra_data *tspi = context_data;
1092 struct spi_transfer *t = tspi->cur;
1093 long wait_status;
1094 int err = 0;
1095 unsigned total_fifo_words;
1096 unsigned long flags;
1097
1098 if (!tspi->is_curr_dma_xfer) {
1099 handle_cpu_based_xfer(context_data);
1100 return IRQ_HANDLED;
1101 }
1102
1103 /* Abort dmas if any error */
1104 if (tspi->cur_direction & DATA_DIR_TX) {
1105 if (tspi->tx_status) {
1106 tegra_dma_dequeue_req(tspi->tx_dma, &tspi->tx_dma_req);
1107 err += 1;
1108 } else {
1109 wait_status = wait_for_completion_interruptible_timeout(
1110 &tspi->tx_dma_complete, SLINK_DMA_TIMEOUT);
1111 if (wait_status <= 0) {
1112 tegra_dma_dequeue_req(tspi->tx_dma,
1113 &tspi->tx_dma_req);
1114 dev_err(&tspi->pdev->dev,
1115 "Error in Dma Tx transfer\n");
1116 err += 1;
1117 }
1118 }
1119 }
1120
1121 if (tspi->cur_direction & DATA_DIR_RX) {
1122 if (tspi->rx_status) {
1123 tegra_dma_dequeue_req(tspi->rx_dma, &tspi->rx_dma_req);
1124 err += 2;
1125 } else {
1126 wait_status = wait_for_completion_interruptible_timeout(
1127 &tspi->rx_dma_complete, SLINK_DMA_TIMEOUT);
1128 if (wait_status <= 0) {
1129 tegra_dma_dequeue_req(tspi->rx_dma,
1130 &tspi->rx_dma_req);
1131 dev_err(&tspi->pdev->dev,
1132 "Error in Dma Rx transfer\n");
1133 err += 2;
1134 }
1135 }
1136 }
1137
1138 spin_lock_irqsave(&tspi->lock, flags);
1139 if (err) {
1140 dev_err(&tspi->pdev->dev, "%s ERROR bit set 0x%x\n",
1141 __func__, tspi->status_reg);
1142 tegra_periph_reset_assert(tspi->clk);
1143 udelay(2);
1144 tegra_periph_reset_deassert(tspi->clk);
1145 WARN_ON(1);
1146 spi_tegra_curr_transfer_complete(tspi, err, t->len, &flags);
1147 spin_unlock_irqrestore(&tspi->lock, flags);
1148 return IRQ_HANDLED;
1149 }
1150
1151 if (tspi->cur_direction & DATA_DIR_RX)
1152 spi_tegra_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
1153
1154 if (tspi->cur_direction & DATA_DIR_TX)
1155 tspi->cur_pos = tspi->cur_tx_pos;
1156 else if (tspi->cur_direction & DATA_DIR_RX)
1157 tspi->cur_pos = tspi->cur_rx_pos;
1158 else
1159 WARN_ON(1);
1160
1161 if (tspi->cur_pos == t->len) {
1162 spi_tegra_curr_transfer_complete(tspi,
1163 tspi->tx_status || tspi->rx_status, t->len, &flags);
1164 spin_unlock_irqrestore(&tspi->lock, flags);
1165 return IRQ_HANDLED;
1166 }
1167
1168 /* Continue transfer in current message */
1169 total_fifo_words = spi_tegra_calculate_curr_xfer_param(tspi->cur_spi,
1170 tspi, t);
1171 if (total_fifo_words > SPI_FIFO_DEPTH)
1172 err = spi_tegra_start_dma_based_transfer(tspi, t);
1173 else
1174 err = spi_tegra_start_cpu_based_transfer(tspi, t);
1175
1176 spin_unlock_irqrestore(&tspi->lock, flags);
1177 WARN_ON(err < 0);
1178 return IRQ_HANDLED;
1179}
1180
1181static irqreturn_t spi_tegra_isr(int irq, void *context_data)
1182{
1183 struct spi_tegra_data *tspi = context_data;
1184
1185 tspi->status_reg = spi_tegra_readl(tspi, SLINK_STATUS);
1186 if (tspi->cur_direction & DATA_DIR_TX)
1187 tspi->tx_status = tspi->status_reg &
1188 (SLINK_TX_OVF | SLINK_TX_UNF);
1189
1190 if (tspi->cur_direction & DATA_DIR_RX)
1191 tspi->rx_status = tspi->status_reg &
1192 (SLINK_RX_OVF | SLINK_RX_UNF);
1193 spi_tegra_clear_status(tspi);
1194
1195
1196 return IRQ_WAKE_THREAD;
1197}
1198
1199static int __init spi_tegra_probe(struct platform_device *pdev)
1200{
1201 struct spi_master *master;
1202 struct spi_tegra_data *tspi;
1203 struct resource *r;
1204 struct tegra_spi_platform_data *pdata = pdev->dev.platform_data;
1205 int ret, spi_irq;
1206 int i;
1207 char spi_wq_name[20];
1208
1209 master = spi_alloc_master(&pdev->dev, sizeof *tspi);
1210 if (master == NULL) {
1211 dev_err(&pdev->dev, "master allocation failed\n");
1212 return -ENOMEM;
1213 }
1214
1215 /* the spi->mode bits understood by this driver: */
1216 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
1217
1218 if (pdev->id != -1)
1219 master->bus_num = pdev->id;
1220
1221 master->setup = spi_tegra_setup;
1222 master->transfer = spi_tegra_transfer;
1223 master->num_chipselect = MAX_CHIP_SELECT;
1224
1225 dev_set_drvdata(&pdev->dev, master);
1226 tspi = spi_master_get_devdata(master);
1227 tspi->master = master;
1228 tspi->pdev = pdev;
1229 tspi->is_transfer_in_progress = false;
1230 tspi->is_suspended = false;
1231 spin_lock_init(&tspi->lock);
1232
1233 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1234 if (r == NULL) {
1235 ret = -ENODEV;
1236 goto fail_no_mem;
1237 }
1238
1239 if (!request_mem_region(r->start, resource_size(r),
1240 dev_name(&pdev->dev))) {
1241 ret = -EBUSY;
1242 goto fail_no_mem;
1243 }
1244
1245 tspi->phys = r->start;
1246 tspi->base = ioremap(r->start, resource_size(r));
1247 if (!tspi->base) {
1248 dev_err(&pdev->dev, "can't ioremap iomem\n");
1249 ret = -ENOMEM;
1250 goto fail_io_map;
1251 }
1252
1253 spi_irq = platform_get_irq(pdev, 0);
1254 if (unlikely(spi_irq < 0)) {
1255 dev_err(&pdev->dev, "can't find irq resource\n");
1256 ret = -ENXIO;
1257 goto fail_irq_req;
1258 }
1259 tspi->irq = spi_irq;
1260
1261 sprintf(tspi->port_name, "tegra_spi_%d", pdev->id);
1262 ret = request_threaded_irq(tspi->irq, spi_tegra_isr,
1263 spi_tegra_isr_thread, IRQF_DISABLED,
1264 tspi->port_name, tspi);
1265 if (ret < 0) {
1266 dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
1267 tspi->irq);
1268 goto fail_irq_req;
1269 }
1270
1271 tspi->clk = clk_get(&pdev->dev, "spi");
1272 if (IS_ERR(tspi->clk)) {
1273 dev_err(&pdev->dev, "can not get clock\n");
1274 ret = PTR_ERR(tspi->clk);
1275 goto fail_clk_get;
1276 }
1277
1278 tspi->sclk = clk_get(&pdev->dev, "sclk");
1279 if (IS_ERR(tspi->sclk)) {
1280 dev_err(&pdev->dev, "can not get sclock\n");
1281 ret = PTR_ERR(tspi->sclk);
1282 goto fail_sclk_get;
1283 }
1284
1285 INIT_LIST_HEAD(&tspi->queue);
1286
1287 if (pdata) {
1288 tspi->is_clkon_always = pdata->is_clkon_always;
1289 tspi->is_dma_allowed = pdata->is_dma_based;
1290 tspi->dma_buf_size = (pdata->max_dma_buffer) ?
1291 pdata->max_dma_buffer : DEFAULT_SPI_DMA_BUF_LEN;
1292 tspi->parent_clk_count = pdata->parent_clk_count;
1293 tspi->parent_clk_list = pdata->parent_clk_list;
1294 tspi->max_rate = pdata->max_rate;
1295 } else {
1296 tspi->is_clkon_always = false;
1297 tspi->is_dma_allowed = true;
1298 tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
1299 tspi->parent_clk_count = 0;
1300 tspi->parent_clk_list = NULL;
1301 tspi->max_rate = 0;
1302 }
1303
1304 tspi->max_parent_rate = 0;
1305 tspi->min_div = 0;
1306
1307 if (tspi->parent_clk_count) {
1308 tspi->max_parent_rate = tspi->parent_clk_list[0].fixed_clk_rate;
1309 for (i = 1; i < tspi->parent_clk_count; ++i) {
1310 tspi->max_parent_rate = max(tspi->max_parent_rate,
1311 tspi->parent_clk_list[i].fixed_clk_rate);
1312 }
1313 if (tspi->max_rate)
1314 tspi->min_div = DIV_ROUND_UP(tspi->max_parent_rate,
1315 tspi->max_rate);
1316 }
1317 tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
1318
1319 if (!tspi->is_dma_allowed)
1320 goto skip_dma_alloc;
1321
1322 init_completion(&tspi->tx_dma_complete);
1323 init_completion(&tspi->rx_dma_complete);
1324
1325
1326 tspi->rx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT,
1327 "spi_rx_%d", pdev->id);
1328 if (!tspi->rx_dma) {
1329 dev_err(&pdev->dev, "can not allocate rx dma channel\n");
1330 ret = -ENODEV;
1331 goto fail_rx_dma_alloc;
1332 }
1333
1334 tspi->rx_buf = dma_alloc_coherent(&pdev->dev, tspi->dma_buf_size,
1335 &tspi->rx_buf_phys, GFP_KERNEL);
1336 if (!tspi->rx_buf) {
1337 dev_err(&pdev->dev, "can not allocate rx bounce buffer\n");
1338 ret = -ENOMEM;
1339 goto fail_rx_buf_alloc;
1340 }
1341
1342 /* Make the dma buffer to read by dma */
1343 dma_sync_single_for_device(&tspi->pdev->dev, tspi->rx_buf_phys,
1344 tspi->dma_buf_size, DMA_TO_DEVICE);
1345
1346 memset(&tspi->rx_dma_req, 0, sizeof(struct tegra_dma_req));
1347 tspi->rx_dma_req.complete = tegra_spi_rx_dma_complete;
1348 tspi->rx_dma_req.to_memory = 1;
1349 tspi->rx_dma_req.dest_addr = tspi->rx_buf_phys;
1350 tspi->rx_dma_req.virt_addr = tspi->rx_buf;
1351 tspi->rx_dma_req.dest_bus_width = 32;
1352 tspi->rx_dma_req.source_addr = tspi->phys + SLINK_RX_FIFO;
1353 tspi->rx_dma_req.source_bus_width = 32;
1354 tspi->rx_dma_req.source_wrap = 4;
1355 tspi->rx_dma_req.dest_wrap = 0;
1356 tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id];
1357 tspi->rx_dma_req.dev = tspi;
1358
1359 tspi->tx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT,
1360 "spi_tx_%d", pdev->id);
1361 if (!tspi->tx_dma) {
1362 dev_err(&pdev->dev, "can not allocate tx dma channel\n");
1363 ret = -ENODEV;
1364 goto fail_tx_dma_alloc;
1365 }
1366
1367 tspi->tx_buf = dma_alloc_coherent(&pdev->dev, tspi->dma_buf_size,
1368 &tspi->tx_buf_phys, GFP_KERNEL);
1369 if (!tspi->tx_buf) {
1370 dev_err(&pdev->dev, "can not allocate tx bounce buffer\n");
1371 ret = -ENOMEM;
1372 goto fail_tx_buf_alloc;
1373 }
1374
1375 /* Make the dma buffer to read by dma */
1376 dma_sync_single_for_device(&tspi->pdev->dev, tspi->tx_buf_phys,
1377 tspi->dma_buf_size, DMA_TO_DEVICE);
1378
1379 memset(&tspi->tx_dma_req, 0, sizeof(struct tegra_dma_req));
1380 tspi->tx_dma_req.complete = tegra_spi_tx_dma_complete;
1381 tspi->tx_dma_req.to_memory = 0;
1382 tspi->tx_dma_req.dest_addr = tspi->phys + SLINK_TX_FIFO;
1383 tspi->tx_dma_req.virt_addr = tspi->tx_buf;
1384 tspi->tx_dma_req.dest_bus_width = 32;
1385 tspi->tx_dma_req.dest_wrap = 4;
1386 tspi->tx_dma_req.source_wrap = 0;
1387 tspi->tx_dma_req.source_addr = tspi->tx_buf_phys;
1388 tspi->tx_dma_req.source_bus_width = 32;
1389 tspi->tx_dma_req.req_sel = spi_tegra_req_sels[pdev->id];
1390 tspi->tx_dma_req.dev = tspi;
1391 tspi->max_buf_size = tspi->dma_buf_size;
1392 tspi->def_command_reg = SLINK_CS_SW | SLINK_M_S;
1393 tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
1394
1395skip_dma_alloc:
1396 pm_runtime_enable(&pdev->dev);
1397 pm_runtime_get_sync(&pdev->dev);
1398 tspi->clk_state = 1;
1399 master->dev.of_node = pdev->dev.of_node;
1400 ret = spi_register_master(master);
1401 if (!tspi->is_clkon_always) {
1402 if (tspi->clk_state) {
1403 pm_runtime_put_sync(&pdev->dev);
1404 tspi->clk_state = 0;
1405 }
1406 }
1407
1408 if (ret < 0) {
1409 dev_err(&pdev->dev, "can not register to master err %d\n", ret);
1410 goto fail_master_register;
1411 }
1412
1413 /* create the workqueue for the kbc path */
1414 snprintf(spi_wq_name, sizeof(spi_wq_name), "spi_tegra-%d", pdev->id);
1415 tspi->spi_workqueue = create_singlethread_workqueue(spi_wq_name);
1416 if (!tspi->spi_workqueue) {
1417 dev_err(&pdev->dev, "Failed to create work queue\n");
1418 ret = -ENODEV;
1419 goto fail_workqueue;
1420 }
1421
1422 INIT_WORK(&tspi->spi_transfer_work, tegra_spi_transfer_work);
1423
1424 return ret;
1425
1426fail_workqueue:
1427 spi_unregister_master(master);
1428
1429fail_master_register:
1430 if (tspi->tx_buf)
1431 dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
1432 tspi->tx_buf, tspi->tx_buf_phys);
1433fail_tx_buf_alloc:
1434 if (tspi->tx_dma)
1435 tegra_dma_free_channel(tspi->tx_dma);
1436fail_tx_dma_alloc:
1437 if (tspi->rx_buf)
1438 dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
1439 tspi->rx_buf, tspi->rx_buf_phys);
1440fail_rx_buf_alloc:
1441 if (tspi->rx_dma)
1442 tegra_dma_free_channel(tspi->rx_dma);
1443fail_rx_dma_alloc:
1444 pm_runtime_disable(&pdev->dev);
1445 clk_put(tspi->sclk);
1446fail_sclk_get:
1447 clk_put(tspi->clk);
1448fail_clk_get:
1449 free_irq(tspi->irq, tspi);
1450fail_irq_req:
1451 iounmap(tspi->base);
1452fail_io_map:
1453 release_mem_region(r->start, resource_size(r));
1454fail_no_mem:
1455 spi_master_put(master);
1456 return ret;
1457}
1458
1459static int __devexit spi_tegra_remove(struct platform_device *pdev)
1460{
1461 struct spi_master *master;
1462 struct spi_tegra_data *tspi;
1463 struct resource *r;
1464
1465 master = dev_get_drvdata(&pdev->dev);
1466 tspi = spi_master_get_devdata(master);
1467
1468 spi_unregister_master(master);
1469 if (tspi->tx_buf)
1470 dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
1471 tspi->tx_buf, tspi->tx_buf_phys);
1472 if (tspi->tx_dma)
1473 tegra_dma_free_channel(tspi->tx_dma);
1474 if (tspi->rx_buf)
1475 dma_free_coherent(&pdev->dev, tspi->dma_buf_size,
1476 tspi->rx_buf, tspi->rx_buf_phys);
1477 if (tspi->rx_dma)
1478 tegra_dma_free_channel(tspi->rx_dma);
1479
1480 if (tspi->is_clkon_always) {
1481 pm_runtime_put_sync(&pdev->dev);
1482 tspi->clk_state = 0;
1483 }
1484
1485 pm_runtime_disable(&pdev->dev);
1486 clk_put(tspi->sclk);
1487 clk_put(tspi->clk);
1488 iounmap(tspi->base);
1489
1490 destroy_workqueue(tspi->spi_workqueue);
1491
1492 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1493 release_mem_region(r->start, resource_size(r));
1494
1495 return 0;
1496}
1497
1498#ifdef CONFIG_PM
1499static int spi_tegra_suspend(struct platform_device *pdev, pm_message_t state)
1500{
1501 struct spi_master *master;
1502 struct spi_tegra_data *tspi;
1503 unsigned limit = 50;
1504 unsigned long flags;
1505
1506 master = dev_get_drvdata(&pdev->dev);
1507 tspi = spi_master_get_devdata(master);
1508 spin_lock_irqsave(&tspi->lock, flags);
1509
1510 /* Wait for all transfer completes */
1511 if (!list_empty(&tspi->queue))
1512 dev_warn(&pdev->dev, "The transfer list is not empty "
1513 "Waiting for time %d ms to complete transfer\n",
1514 limit * 20);
1515
1516 while (!list_empty(&tspi->queue) && limit--) {
1517 spin_unlock_irqrestore(&tspi->lock, flags);
1518 msleep(20);
1519 spin_lock_irqsave(&tspi->lock, flags);
1520 }
1521
1522 /* Wait for current transfer completes only */
1523 tspi->is_suspended = true;
1524 if (!list_empty(&tspi->queue)) {
1525 limit = 50;
1526 dev_err(&pdev->dev, "All transfer has not completed, "
1527 "Waiting for %d ms current transfer to complete\n",
1528 limit * 20);
1529 while (tspi->is_transfer_in_progress && limit--) {
1530 spin_unlock_irqrestore(&tspi->lock, flags);
1531 msleep(20);
1532 spin_lock_irqsave(&tspi->lock, flags);
1533 }
1534 }
1535
1536 if (tspi->is_transfer_in_progress) {
1537 dev_err(&pdev->dev,
1538 "Spi transfer is in progress Avoiding suspend\n");
1539 tspi->is_suspended = false;
1540 spin_unlock_irqrestore(&tspi->lock, flags);
1541 return -EBUSY;
1542 }
1543
1544 spin_unlock_irqrestore(&tspi->lock, flags);
1545 if (tspi->is_clkon_always) {
1546 pm_runtime_put_sync(&pdev->dev);
1547 tspi->clk_state = 0;
1548 }
1549 return 0;
1550}
1551
1552static int spi_tegra_resume(struct platform_device *pdev)
1553{
1554 struct spi_master *master;
1555 struct spi_tegra_data *tspi;
1556 struct spi_message *m;
1557 struct spi_device *spi;
1558 struct spi_transfer *t = NULL;
1559 int single_xfer = 0;
1560 unsigned long flags;
1561
1562 master = dev_get_drvdata(&pdev->dev);
1563 tspi = spi_master_get_devdata(master);
1564
1565 pm_runtime_get_sync(&pdev->dev);
1566 tspi->clk_state = 1;
1567 spi_tegra_writel(tspi, tspi->command_reg, SLINK_COMMAND);
1568 if (!tspi->is_clkon_always) {
1569 pm_runtime_put_sync(&pdev->dev);
1570 tspi->clk_state = 0;
1571 }
1572 spin_lock_irqsave(&tspi->lock, flags);
1573
1574 tspi->cur_speed = 0;
1575 tspi->is_suspended = false;
1576 if (!list_empty(&tspi->queue)) {
1577 m = list_first_entry(&tspi->queue, struct spi_message, queue);
1578 spi = m->state;
1579 single_xfer = list_is_singular(&m->transfers);
1580 m->actual_length = 0;
1581 m->status = 0;
1582 t = list_first_entry(&m->transfers, struct spi_transfer,
1583 transfer_list);
1584 tspi->is_transfer_in_progress = true;
1585 }
1586 spin_unlock_irqrestore(&tspi->lock, flags);
1587 if (t)
1588 spi_tegra_start_transfer(spi, t, true, single_xfer);
1589 return 0;
1590}
1591#endif
1592
1593#if defined(CONFIG_PM_RUNTIME)
1594
1595static int tegra_spi_runtime_idle(struct device *dev)
1596{
1597 struct spi_master *master;
1598 struct spi_tegra_data *tspi;
1599 master = dev_get_drvdata(dev);
1600 tspi = spi_master_get_devdata(master);
1601
1602 clk_disable(tspi->clk);
1603 clk_disable(tspi->sclk);
1604 return 0;
1605}
1606
1607static int tegra_spi_runtime_resume(struct device *dev)
1608{
1609 struct spi_master *master;
1610 struct spi_tegra_data *tspi;
1611 master = dev_get_drvdata(dev);
1612 tspi = spi_master_get_devdata(master);
1613
1614 clk_enable(tspi->sclk);
1615 clk_enable(tspi->clk);
1616 return 0;
1617}
1618
1619static const struct dev_pm_ops tegra_spi_dev_pm_ops = {
1620 .runtime_idle = tegra_spi_runtime_idle,
1621 .runtime_resume = tegra_spi_runtime_resume,
1622};
1623
1624#endif
1625
1626MODULE_ALIAS("platform:spi_tegra");
1627
1628#ifdef CONFIG_OF
1629static struct of_device_id spi_tegra_of_match_table[] __devinitdata = {
1630 { .compatible = "nvidia,tegra20-spi", },
1631 {}
1632};
1633MODULE_DEVICE_TABLE(of, spi_tegra_of_match_table);
1634#else /* CONFIG_OF */
1635#define spi_tegra_of_match_table NULL
1636#endif /* CONFIG_OF */
1637
1638static struct platform_driver spi_tegra_driver = {
1639 .driver = {
1640 .name = "spi_tegra",
1641 .owner = THIS_MODULE,
1642#if defined(CONFIG_PM_RUNTIME)
1643 .pm = &tegra_spi_dev_pm_ops,
1644#endif
1645 .of_match_table = spi_tegra_of_match_table,
1646 },
1647 .remove = __devexit_p(spi_tegra_remove),
1648#ifdef CONFIG_PM
1649 .suspend = spi_tegra_suspend,
1650 .resume = spi_tegra_resume,
1651#endif
1652};
1653
1654static int __init spi_tegra_init(void)
1655{
1656 return platform_driver_probe(&spi_tegra_driver, spi_tegra_probe);
1657}
1658subsys_initcall(spi_tegra_init);
1659
1660static void __exit spi_tegra_exit(void)
1661{
1662 platform_driver_unregister(&spi_tegra_driver);
1663}
1664module_exit(spi_tegra_exit);
1665
1666MODULE_LICENSE("GPL");